Page MenuHomeFreeBSD

No OneTemporary

This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/sys/contrib/openzfs/cmd/mount_zfs/mount_zfs.c b/sys/contrib/openzfs/cmd/mount_zfs/mount_zfs.c
index b9be69d1fb02..f59571ace6bc 100644
--- a/sys/contrib/openzfs/cmd/mount_zfs/mount_zfs.c
+++ b/sys/contrib/openzfs/cmd/mount_zfs/mount_zfs.c
@@ -1,388 +1,388 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011 Lawrence Livermore National Security, LLC.
*/
#include <libintl.h>
#include <unistd.h>
#include <sys/file.h>
#include <sys/mount.h>
#include <sys/mntent.h>
#include <sys/stat.h>
#include <libzfs.h>
#include <libzutil.h>
#include <locale.h>
#include <getopt.h>
#include <fcntl.h>
#include <errno.h>
#define ZS_COMMENT 0x00000000 /* comment */
#define ZS_ZFSUTIL 0x00000001 /* caller is zfs(8) */
libzfs_handle_t *g_zfs;
/*
* Opportunistically convert a target string into a pool name. If the
* string does not represent a block device with a valid zfs label
* then it is passed through without modification.
*/
static void
parse_dataset(const char *target, char **dataset)
{
/*
* Prior to util-linux 2.36.2, if a file or directory in the
* current working directory was named 'dataset' then mount(8)
* would prepend the current working directory to the dataset.
* Check for it and strip the prepended path when it is added.
*/
char cwd[PATH_MAX];
if (getcwd(cwd, PATH_MAX) == NULL) {
perror("getcwd");
return;
}
int len = strlen(cwd);
if (strncmp(cwd, target, len) == 0)
target += len;
/* Assume pool/dataset is more likely */
strlcpy(*dataset, target, PATH_MAX);
int fd = open(target, O_RDONLY | O_CLOEXEC);
if (fd < 0)
return;
nvlist_t *cfg = NULL;
if (zpool_read_label(fd, &cfg, NULL) == 0) {
char *nm = NULL;
if (!nvlist_lookup_string(cfg, ZPOOL_CONFIG_POOL_NAME, &nm))
strlcpy(*dataset, nm, PATH_MAX);
nvlist_free(cfg);
}
if (close(fd))
perror("close");
}
/*
* Update the mtab_* code to use the libmount library when it is commonly
* available otherwise fallback to legacy mode. The mount(8) utility will
* manage the lock file for us to prevent racing updates to /etc/mtab.
*/
static int
mtab_is_writeable(void)
{
struct stat st;
int error, fd;
error = lstat("/etc/mtab", &st);
if (error || S_ISLNK(st.st_mode))
return (0);
fd = open("/etc/mtab", O_RDWR | O_CREAT, 0644);
if (fd < 0)
return (0);
close(fd);
return (1);
}
static int
mtab_update(char *dataset, char *mntpoint, char *type, char *mntopts)
{
struct mntent mnt;
FILE *fp;
int error;
mnt.mnt_fsname = dataset;
mnt.mnt_dir = mntpoint;
mnt.mnt_type = type;
mnt.mnt_opts = mntopts ? mntopts : "";
mnt.mnt_freq = 0;
mnt.mnt_passno = 0;
fp = setmntent("/etc/mtab", "a+");
if (!fp) {
(void) fprintf(stderr, gettext(
"filesystem '%s' was mounted, but /etc/mtab "
"could not be opened due to error: %s\n"),
dataset, strerror(errno));
return (MOUNT_FILEIO);
}
error = addmntent(fp, &mnt);
if (error) {
(void) fprintf(stderr, gettext(
"filesystem '%s' was mounted, but /etc/mtab "
"could not be updated due to error: %s\n"),
dataset, strerror(errno));
return (MOUNT_FILEIO);
}
(void) endmntent(fp);
return (MOUNT_SUCCESS);
}
int
main(int argc, char **argv)
{
zfs_handle_t *zhp;
char prop[ZFS_MAXPROPLEN];
uint64_t zfs_version = 0;
char mntopts[MNT_LINE_MAX] = { '\0' };
char badopt[MNT_LINE_MAX] = { '\0' };
char mtabopt[MNT_LINE_MAX] = { '\0' };
char mntpoint[PATH_MAX];
char dataset[PATH_MAX], *pdataset = dataset;
unsigned long mntflags = 0, zfsflags = 0, remount = 0;
int sloppy = 0, fake = 0, verbose = 0, nomtab = 0, zfsutil = 0;
int error, c;
(void) setlocale(LC_ALL, "");
(void) setlocale(LC_NUMERIC, "C");
(void) textdomain(TEXT_DOMAIN);
opterr = 0;
/* check options */
while ((c = getopt_long(argc, argv, "sfnvo:h?", 0, 0)) != -1) {
switch (c) {
case 's':
sloppy = 1;
break;
case 'f':
fake = 1;
break;
case 'n':
nomtab = 1;
break;
case 'v':
verbose++;
break;
case 'o':
(void) strlcpy(mntopts, optarg, sizeof (mntopts));
break;
case 'h':
case '?':
if (optopt)
(void) fprintf(stderr,
gettext("Invalid option '%c'\n"), optopt);
(void) fprintf(stderr, gettext("Usage: mount.zfs "
"[-sfnvh] [-o options] <dataset> <mountpoint>\n"));
return (MOUNT_USAGE);
}
}
argc -= optind;
argv += optind;
/* check that we only have two arguments */
if (argc != 2) {
if (argc == 0)
(void) fprintf(stderr, gettext("missing dataset "
"argument\n"));
else if (argc == 1)
(void) fprintf(stderr,
gettext("missing mountpoint argument\n"));
else
(void) fprintf(stderr, gettext("too many arguments\n"));
(void) fprintf(stderr, "usage: mount <dataset> <mountpoint>\n");
return (MOUNT_USAGE);
}
parse_dataset(argv[0], &pdataset);
/* canonicalize the mount point */
if (realpath(argv[1], mntpoint) == NULL) {
(void) fprintf(stderr, gettext("filesystem '%s' cannot be "
"mounted at '%s' due to canonicalization error: %s\n"),
dataset, argv[1], strerror(errno));
return (MOUNT_SYSERR);
}
/* validate mount options and set mntflags */
error = zfs_parse_mount_options(mntopts, &mntflags, &zfsflags, sloppy,
badopt, mtabopt);
if (error) {
switch (error) {
case ENOMEM:
(void) fprintf(stderr, gettext("filesystem '%s' "
"cannot be mounted due to a memory allocation "
"failure.\n"), dataset);
return (MOUNT_SYSERR);
case ENOENT:
(void) fprintf(stderr, gettext("filesystem '%s' "
"cannot be mounted due to invalid option "
"'%s'.\n"), dataset, badopt);
(void) fprintf(stderr, gettext("Use the '-s' option "
"to ignore the bad mount option.\n"));
return (MOUNT_USAGE);
default:
(void) fprintf(stderr, gettext("filesystem '%s' "
"cannot be mounted due to internal error %d.\n"),
dataset, error);
return (MOUNT_SOFTWARE);
}
}
if (verbose)
(void) fprintf(stdout, gettext("mount.zfs:\n"
" dataset: \"%s\"\n mountpoint: \"%s\"\n"
" mountflags: 0x%lx\n zfsflags: 0x%lx\n"
" mountopts: \"%s\"\n mtabopts: \"%s\"\n"),
dataset, mntpoint, mntflags, zfsflags, mntopts, mtabopt);
if (mntflags & MS_REMOUNT) {
nomtab = 1;
remount = 1;
}
if (zfsflags & ZS_ZFSUTIL)
zfsutil = 1;
if ((g_zfs = libzfs_init()) == NULL) {
(void) fprintf(stderr, "%s\n", libzfs_error_init(errno));
return (MOUNT_SYSERR);
}
/* try to open the dataset to access the mount point */
if ((zhp = zfs_open(g_zfs, dataset,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_SNAPSHOT)) == NULL) {
(void) fprintf(stderr, gettext("filesystem '%s' cannot be "
"mounted, unable to open the dataset\n"), dataset);
libzfs_fini(g_zfs);
return (MOUNT_USAGE);
}
zfs_adjust_mount_options(zhp, mntpoint, mntopts, mtabopt);
/* treat all snapshots as legacy mount points */
if (zfs_get_type(zhp) == ZFS_TYPE_SNAPSHOT)
(void) strlcpy(prop, ZFS_MOUNTPOINT_LEGACY, ZFS_MAXPROPLEN);
else
(void) zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, prop,
sizeof (prop), NULL, NULL, 0, B_FALSE);
/*
* Fetch the max supported zfs version in case we get ENOTSUP
* back from the mount command, since we need the zfs handle
* to do so.
*/
zfs_version = zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
if (zfs_version == 0) {
fprintf(stderr, gettext("unable to fetch "
"ZFS version for filesystem '%s'\n"), dataset);
return (MOUNT_SYSERR);
}
zfs_close(zhp);
libzfs_fini(g_zfs);
/*
* Legacy mount points may only be mounted using 'mount', never using
* 'zfs mount'. However, since 'zfs mount' actually invokes 'mount'
* we differentiate the two cases using the 'zfsutil' mount option.
* This mount option should only be supplied by the 'zfs mount' util.
*
* The only exception to the above rule is '-o remount' which is
* always allowed for non-legacy datasets. This is done because when
* using zfs as your root file system both rc.sysinit/umountroot and
* systemd depend on 'mount -o remount <mountpoint>' to work.
*/
if (zfsutil && (strcmp(prop, ZFS_MOUNTPOINT_LEGACY) == 0)) {
(void) fprintf(stderr, gettext(
"filesystem '%s' cannot be mounted using 'zfs mount'.\n"
"Use 'zfs set mountpoint=%s' or 'mount -t zfs %s %s'.\n"
"See zfs(8) for more information.\n"),
dataset, mntpoint, dataset, mntpoint);
return (MOUNT_USAGE);
}
if (!zfsutil && !(remount || fake) &&
strcmp(prop, ZFS_MOUNTPOINT_LEGACY)) {
(void) fprintf(stderr, gettext(
"filesystem '%s' cannot be mounted using 'mount'.\n"
"Use 'zfs set mountpoint=%s' or 'zfs mount %s'.\n"
"See zfs(8) for more information.\n"),
dataset, "legacy", dataset);
return (MOUNT_USAGE);
}
if (!fake) {
error = mount(dataset, mntpoint, MNTTYPE_ZFS,
mntflags, mntopts);
}
if (error) {
switch (errno) {
case ENOENT:
(void) fprintf(stderr, gettext("mount point "
"'%s' does not exist\n"), mntpoint);
return (MOUNT_SYSERR);
case EBUSY:
(void) fprintf(stderr, gettext("filesystem "
"'%s' is already mounted\n"), dataset);
return (MOUNT_BUSY);
case ENOTSUP:
if (zfs_version > ZPL_VERSION) {
(void) fprintf(stderr,
gettext("filesystem '%s' (v%d) is not "
"supported by this implementation of "
"ZFS (max v%d).\n"), dataset,
(int)zfs_version, (int)ZPL_VERSION);
} else {
(void) fprintf(stderr,
gettext("filesystem '%s' mount "
"failed for unknown reason.\n"), dataset);
}
return (MOUNT_SYSERR);
#ifdef MS_MANDLOCK
case EPERM:
if (mntflags & MS_MANDLOCK) {
(void) fprintf(stderr, gettext("filesystem "
"'%s' has the 'nbmand=on' property set, "
"this mount\noption may be disabled in "
"your kernel. Use 'zfs set nbmand=off'\n"
"to disable this option and try to "
"mount the filesystem again.\n"), dataset);
return (MOUNT_SYSERR);
}
- /* fallthru */
#endif
+ /* FALLTHROUGH */
default:
(void) fprintf(stderr, gettext("filesystem "
"'%s' can not be mounted: %s\n"), dataset,
strerror(errno));
return (MOUNT_USAGE);
}
}
if (!nomtab && mtab_is_writeable()) {
error = mtab_update(dataset, mntpoint, MNTTYPE_ZFS, mtabopt);
if (error)
return (error);
}
return (MOUNT_SUCCESS);
}
diff --git a/sys/contrib/openzfs/cmd/zdb/zdb.c b/sys/contrib/openzfs/cmd/zdb/zdb.c
index 5017a0e7d5d9..ee85a2de8b96 100644
--- a/sys/contrib/openzfs/cmd/zdb/zdb.c
+++ b/sys/contrib/openzfs/cmd/zdb/zdb.c
@@ -1,8814 +1,8814 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2019 by Delphix. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2016 Nexenta Systems, Inc.
* Copyright (c) 2017, 2018 Lawrence Livermore National Security, LLC.
* Copyright (c) 2015, 2017, Intel Corporation.
* Copyright (c) 2020 Datto Inc.
* Copyright (c) 2020, The FreeBSD Foundation [1]
*
* [1] Portions of this software were developed by Allan Jude
* under sponsorship from the FreeBSD Foundation.
* Copyright (c) 2021 Allan Jude
* Copyright (c) 2021 Toomas Soome <tsoome@me.com>
*/
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <ctype.h>
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/dmu.h>
#include <sys/zap.h>
#include <sys/fs/zfs.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_sa.h>
#include <sys/sa.h>
#include <sys/sa_impl.h>
#include <sys/vdev.h>
#include <sys/vdev_impl.h>
#include <sys/metaslab_impl.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_bookmark.h>
#include <sys/dbuf.h>
#include <sys/zil.h>
#include <sys/zil_impl.h>
#include <sys/stat.h>
#include <sys/resource.h>
#include <sys/dmu_send.h>
#include <sys/dmu_traverse.h>
#include <sys/zio_checksum.h>
#include <sys/zio_compress.h>
#include <sys/zfs_fuid.h>
#include <sys/arc.h>
#include <sys/arc_impl.h>
#include <sys/ddt.h>
#include <sys/zfeature.h>
#include <sys/abd.h>
#include <sys/blkptr.h>
#include <sys/dsl_crypt.h>
#include <sys/dsl_scan.h>
#include <sys/btree.h>
#include <zfs_comutil.h>
#include <sys/zstd/zstd.h>
#include <libnvpair.h>
#include <libzutil.h>
#include "zdb.h"
#define ZDB_COMPRESS_NAME(idx) ((idx) < ZIO_COMPRESS_FUNCTIONS ? \
zio_compress_table[(idx)].ci_name : "UNKNOWN")
#define ZDB_CHECKSUM_NAME(idx) ((idx) < ZIO_CHECKSUM_FUNCTIONS ? \
zio_checksum_table[(idx)].ci_name : "UNKNOWN")
#define ZDB_OT_TYPE(idx) ((idx) < DMU_OT_NUMTYPES ? (idx) : \
(idx) == DMU_OTN_ZAP_DATA || (idx) == DMU_OTN_ZAP_METADATA ? \
DMU_OT_ZAP_OTHER : \
(idx) == DMU_OTN_UINT64_DATA || (idx) == DMU_OTN_UINT64_METADATA ? \
DMU_OT_UINT64_OTHER : DMU_OT_NUMTYPES)
static char *
zdb_ot_name(dmu_object_type_t type)
{
if (type < DMU_OT_NUMTYPES)
return (dmu_ot[type].ot_name);
else if ((type & DMU_OT_NEWTYPE) &&
((type & DMU_OT_BYTESWAP_MASK) < DMU_BSWAP_NUMFUNCS))
return (dmu_ot_byteswap[type & DMU_OT_BYTESWAP_MASK].ob_name);
else
return ("UNKNOWN");
}
extern int reference_tracking_enable;
extern int zfs_recover;
extern unsigned long zfs_arc_meta_min, zfs_arc_meta_limit;
extern int zfs_vdev_async_read_max_active;
extern boolean_t spa_load_verify_dryrun;
extern int zfs_reconstruct_indirect_combinations_max;
extern int zfs_btree_verify_intensity;
static const char cmdname[] = "zdb";
uint8_t dump_opt[256];
typedef void object_viewer_t(objset_t *, uint64_t, void *data, size_t size);
uint64_t *zopt_metaslab = NULL;
static unsigned zopt_metaslab_args = 0;
typedef struct zopt_object_range {
uint64_t zor_obj_start;
uint64_t zor_obj_end;
uint64_t zor_flags;
} zopt_object_range_t;
zopt_object_range_t *zopt_object_ranges = NULL;
static unsigned zopt_object_args = 0;
static int flagbits[256];
#define ZOR_FLAG_PLAIN_FILE 0x0001
#define ZOR_FLAG_DIRECTORY 0x0002
#define ZOR_FLAG_SPACE_MAP 0x0004
#define ZOR_FLAG_ZAP 0x0008
#define ZOR_FLAG_ALL_TYPES -1
#define ZOR_SUPPORTED_FLAGS (ZOR_FLAG_PLAIN_FILE | \
ZOR_FLAG_DIRECTORY | \
ZOR_FLAG_SPACE_MAP | \
ZOR_FLAG_ZAP)
#define ZDB_FLAG_CHECKSUM 0x0001
#define ZDB_FLAG_DECOMPRESS 0x0002
#define ZDB_FLAG_BSWAP 0x0004
#define ZDB_FLAG_GBH 0x0008
#define ZDB_FLAG_INDIRECT 0x0010
#define ZDB_FLAG_RAW 0x0020
#define ZDB_FLAG_PRINT_BLKPTR 0x0040
#define ZDB_FLAG_VERBOSE 0x0080
uint64_t max_inflight_bytes = 256 * 1024 * 1024; /* 256MB */
static int leaked_objects = 0;
static range_tree_t *mos_refd_objs;
static void snprintf_blkptr_compact(char *, size_t, const blkptr_t *,
boolean_t);
static void mos_obj_refd(uint64_t);
static void mos_obj_refd_multiple(uint64_t);
static int dump_bpobj_cb(void *arg, const blkptr_t *bp, boolean_t free,
dmu_tx_t *tx);
typedef struct sublivelist_verify {
/* FREE's that haven't yet matched to an ALLOC, in one sub-livelist */
zfs_btree_t sv_pair;
/* ALLOC's without a matching FREE, accumulates across sub-livelists */
zfs_btree_t sv_leftover;
} sublivelist_verify_t;
static int
livelist_compare(const void *larg, const void *rarg)
{
const blkptr_t *l = larg;
const blkptr_t *r = rarg;
/* Sort them according to dva[0] */
uint64_t l_dva0_vdev, r_dva0_vdev;
l_dva0_vdev = DVA_GET_VDEV(&l->blk_dva[0]);
r_dva0_vdev = DVA_GET_VDEV(&r->blk_dva[0]);
if (l_dva0_vdev < r_dva0_vdev)
return (-1);
else if (l_dva0_vdev > r_dva0_vdev)
return (+1);
/* if vdevs are equal, sort by offsets. */
uint64_t l_dva0_offset;
uint64_t r_dva0_offset;
l_dva0_offset = DVA_GET_OFFSET(&l->blk_dva[0]);
r_dva0_offset = DVA_GET_OFFSET(&r->blk_dva[0]);
if (l_dva0_offset < r_dva0_offset) {
return (-1);
} else if (l_dva0_offset > r_dva0_offset) {
return (+1);
}
/*
* Since we're storing blkptrs without cancelling FREE/ALLOC pairs,
* it's possible the offsets are equal. In that case, sort by txg
*/
if (l->blk_birth < r->blk_birth) {
return (-1);
} else if (l->blk_birth > r->blk_birth) {
return (+1);
}
return (0);
}
typedef struct sublivelist_verify_block {
dva_t svb_dva;
/*
* We need this to check if the block marked as allocated
* in the livelist was freed (and potentially reallocated)
* in the metaslab spacemaps at a later TXG.
*/
uint64_t svb_allocated_txg;
} sublivelist_verify_block_t;
static void zdb_print_blkptr(const blkptr_t *bp, int flags);
typedef struct sublivelist_verify_block_refcnt {
/* block pointer entry in livelist being verified */
blkptr_t svbr_blk;
/*
* Refcount gets incremented to 1 when we encounter the first
* FREE entry for the svfbr block pointer and a node for it
* is created in our ZDB verification/tracking metadata.
*
* As we encounter more FREE entries we increment this counter
* and similarly decrement it whenever we find the respective
* ALLOC entries for this block.
*
* When the refcount gets to 0 it means that all the FREE and
* ALLOC entries of this block have paired up and we no longer
* need to track it in our verification logic (e.g. the node
* containing this struct in our verification data structure
* should be freed).
*
* [refer to sublivelist_verify_blkptr() for the actual code]
*/
uint32_t svbr_refcnt;
} sublivelist_verify_block_refcnt_t;
static int
sublivelist_block_refcnt_compare(const void *larg, const void *rarg)
{
const sublivelist_verify_block_refcnt_t *l = larg;
const sublivelist_verify_block_refcnt_t *r = rarg;
return (livelist_compare(&l->svbr_blk, &r->svbr_blk));
}
static int
sublivelist_verify_blkptr(void *arg, const blkptr_t *bp, boolean_t free,
dmu_tx_t *tx)
{
ASSERT3P(tx, ==, NULL);
struct sublivelist_verify *sv = arg;
sublivelist_verify_block_refcnt_t current = {
.svbr_blk = *bp,
/*
* Start with 1 in case this is the first free entry.
* This field is not used for our B-Tree comparisons
* anyway.
*/
.svbr_refcnt = 1,
};
zfs_btree_index_t where;
sublivelist_verify_block_refcnt_t *pair =
zfs_btree_find(&sv->sv_pair, &current, &where);
if (free) {
if (pair == NULL) {
/* first free entry for this block pointer */
zfs_btree_add(&sv->sv_pair, &current);
} else {
pair->svbr_refcnt++;
}
} else {
if (pair == NULL) {
/* block that is currently marked as allocated */
for (int i = 0; i < SPA_DVAS_PER_BP; i++) {
if (DVA_IS_EMPTY(&bp->blk_dva[i]))
break;
sublivelist_verify_block_t svb = {
.svb_dva = bp->blk_dva[i],
.svb_allocated_txg = bp->blk_birth
};
if (zfs_btree_find(&sv->sv_leftover, &svb,
&where) == NULL) {
zfs_btree_add_idx(&sv->sv_leftover,
&svb, &where);
}
}
} else {
/* alloc matches a free entry */
pair->svbr_refcnt--;
if (pair->svbr_refcnt == 0) {
/* all allocs and frees have been matched */
zfs_btree_remove_idx(&sv->sv_pair, &where);
}
}
}
return (0);
}
static int
sublivelist_verify_func(void *args, dsl_deadlist_entry_t *dle)
{
int err;
struct sublivelist_verify *sv = args;
zfs_btree_create(&sv->sv_pair, sublivelist_block_refcnt_compare,
sizeof (sublivelist_verify_block_refcnt_t));
err = bpobj_iterate_nofree(&dle->dle_bpobj, sublivelist_verify_blkptr,
sv, NULL);
sublivelist_verify_block_refcnt_t *e;
zfs_btree_index_t *cookie = NULL;
while ((e = zfs_btree_destroy_nodes(&sv->sv_pair, &cookie)) != NULL) {
char blkbuf[BP_SPRINTF_LEN];
snprintf_blkptr_compact(blkbuf, sizeof (blkbuf),
&e->svbr_blk, B_TRUE);
(void) printf("\tERROR: %d unmatched FREE(s): %s\n",
e->svbr_refcnt, blkbuf);
}
zfs_btree_destroy(&sv->sv_pair);
return (err);
}
static int
livelist_block_compare(const void *larg, const void *rarg)
{
const sublivelist_verify_block_t *l = larg;
const sublivelist_verify_block_t *r = rarg;
if (DVA_GET_VDEV(&l->svb_dva) < DVA_GET_VDEV(&r->svb_dva))
return (-1);
else if (DVA_GET_VDEV(&l->svb_dva) > DVA_GET_VDEV(&r->svb_dva))
return (+1);
if (DVA_GET_OFFSET(&l->svb_dva) < DVA_GET_OFFSET(&r->svb_dva))
return (-1);
else if (DVA_GET_OFFSET(&l->svb_dva) > DVA_GET_OFFSET(&r->svb_dva))
return (+1);
if (DVA_GET_ASIZE(&l->svb_dva) < DVA_GET_ASIZE(&r->svb_dva))
return (-1);
else if (DVA_GET_ASIZE(&l->svb_dva) > DVA_GET_ASIZE(&r->svb_dva))
return (+1);
return (0);
}
/*
* Check for errors in a livelist while tracking all unfreed ALLOCs in the
* sublivelist_verify_t: sv->sv_leftover
*/
static void
livelist_verify(dsl_deadlist_t *dl, void *arg)
{
sublivelist_verify_t *sv = arg;
dsl_deadlist_iterate(dl, sublivelist_verify_func, sv);
}
/*
* Check for errors in the livelist entry and discard the intermediary
* data structures
*/
/* ARGSUSED */
static int
sublivelist_verify_lightweight(void *args, dsl_deadlist_entry_t *dle)
{
sublivelist_verify_t sv;
zfs_btree_create(&sv.sv_leftover, livelist_block_compare,
sizeof (sublivelist_verify_block_t));
int err = sublivelist_verify_func(&sv, dle);
zfs_btree_clear(&sv.sv_leftover);
zfs_btree_destroy(&sv.sv_leftover);
return (err);
}
typedef struct metaslab_verify {
/*
* Tree containing all the leftover ALLOCs from the livelists
* that are part of this metaslab.
*/
zfs_btree_t mv_livelist_allocs;
/*
* Metaslab information.
*/
uint64_t mv_vdid;
uint64_t mv_msid;
uint64_t mv_start;
uint64_t mv_end;
/*
* What's currently allocated for this metaslab.
*/
range_tree_t *mv_allocated;
} metaslab_verify_t;
typedef void ll_iter_t(dsl_deadlist_t *ll, void *arg);
typedef int (*zdb_log_sm_cb_t)(spa_t *spa, space_map_entry_t *sme, uint64_t txg,
void *arg);
typedef struct unflushed_iter_cb_arg {
spa_t *uic_spa;
uint64_t uic_txg;
void *uic_arg;
zdb_log_sm_cb_t uic_cb;
} unflushed_iter_cb_arg_t;
static int
iterate_through_spacemap_logs_cb(space_map_entry_t *sme, void *arg)
{
unflushed_iter_cb_arg_t *uic = arg;
return (uic->uic_cb(uic->uic_spa, sme, uic->uic_txg, uic->uic_arg));
}
static void
iterate_through_spacemap_logs(spa_t *spa, zdb_log_sm_cb_t cb, void *arg)
{
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
return;
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls)) {
space_map_t *sm = NULL;
VERIFY0(space_map_open(&sm, spa_meta_objset(spa),
sls->sls_sm_obj, 0, UINT64_MAX, SPA_MINBLOCKSHIFT));
unflushed_iter_cb_arg_t uic = {
.uic_spa = spa,
.uic_txg = sls->sls_txg,
.uic_arg = arg,
.uic_cb = cb
};
VERIFY0(space_map_iterate(sm, space_map_length(sm),
iterate_through_spacemap_logs_cb, &uic));
space_map_close(sm);
}
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
static void
verify_livelist_allocs(metaslab_verify_t *mv, uint64_t txg,
uint64_t offset, uint64_t size)
{
sublivelist_verify_block_t svb;
DVA_SET_VDEV(&svb.svb_dva, mv->mv_vdid);
DVA_SET_OFFSET(&svb.svb_dva, offset);
DVA_SET_ASIZE(&svb.svb_dva, size);
zfs_btree_index_t where;
uint64_t end_offset = offset + size;
/*
* Look for an exact match for spacemap entry in the livelist entries.
* Then, look for other livelist entries that fall within the range
* of the spacemap entry as it may have been condensed
*/
sublivelist_verify_block_t *found =
zfs_btree_find(&mv->mv_livelist_allocs, &svb, &where);
if (found == NULL) {
found = zfs_btree_next(&mv->mv_livelist_allocs, &where, &where);
}
for (; found != NULL && DVA_GET_VDEV(&found->svb_dva) == mv->mv_vdid &&
DVA_GET_OFFSET(&found->svb_dva) < end_offset;
found = zfs_btree_next(&mv->mv_livelist_allocs, &where, &where)) {
if (found->svb_allocated_txg <= txg) {
(void) printf("ERROR: Livelist ALLOC [%llx:%llx] "
"from TXG %llx FREED at TXG %llx\n",
(u_longlong_t)DVA_GET_OFFSET(&found->svb_dva),
(u_longlong_t)DVA_GET_ASIZE(&found->svb_dva),
(u_longlong_t)found->svb_allocated_txg,
(u_longlong_t)txg);
}
}
}
static int
metaslab_spacemap_validation_cb(space_map_entry_t *sme, void *arg)
{
metaslab_verify_t *mv = arg;
uint64_t offset = sme->sme_offset;
uint64_t size = sme->sme_run;
uint64_t txg = sme->sme_txg;
if (sme->sme_type == SM_ALLOC) {
if (range_tree_contains(mv->mv_allocated,
offset, size)) {
(void) printf("ERROR: DOUBLE ALLOC: "
"%llu [%llx:%llx] "
"%llu:%llu LOG_SM\n",
(u_longlong_t)txg, (u_longlong_t)offset,
(u_longlong_t)size, (u_longlong_t)mv->mv_vdid,
(u_longlong_t)mv->mv_msid);
} else {
range_tree_add(mv->mv_allocated,
offset, size);
}
} else {
if (!range_tree_contains(mv->mv_allocated,
offset, size)) {
(void) printf("ERROR: DOUBLE FREE: "
"%llu [%llx:%llx] "
"%llu:%llu LOG_SM\n",
(u_longlong_t)txg, (u_longlong_t)offset,
(u_longlong_t)size, (u_longlong_t)mv->mv_vdid,
(u_longlong_t)mv->mv_msid);
} else {
range_tree_remove(mv->mv_allocated,
offset, size);
}
}
if (sme->sme_type != SM_ALLOC) {
/*
* If something is freed in the spacemap, verify that
* it is not listed as allocated in the livelist.
*/
verify_livelist_allocs(mv, txg, offset, size);
}
return (0);
}
static int
spacemap_check_sm_log_cb(spa_t *spa, space_map_entry_t *sme,
uint64_t txg, void *arg)
{
metaslab_verify_t *mv = arg;
uint64_t offset = sme->sme_offset;
uint64_t vdev_id = sme->sme_vdev;
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
/* skip indirect vdevs */
if (!vdev_is_concrete(vd))
return (0);
if (vdev_id != mv->mv_vdid)
return (0);
metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
if (ms->ms_id != mv->mv_msid)
return (0);
if (txg < metaslab_unflushed_txg(ms))
return (0);
ASSERT3U(txg, ==, sme->sme_txg);
return (metaslab_spacemap_validation_cb(sme, mv));
}
static void
spacemap_check_sm_log(spa_t *spa, metaslab_verify_t *mv)
{
iterate_through_spacemap_logs(spa, spacemap_check_sm_log_cb, mv);
}
static void
spacemap_check_ms_sm(space_map_t *sm, metaslab_verify_t *mv)
{
if (sm == NULL)
return;
VERIFY0(space_map_iterate(sm, space_map_length(sm),
metaslab_spacemap_validation_cb, mv));
}
static void iterate_deleted_livelists(spa_t *spa, ll_iter_t func, void *arg);
/*
* Transfer blocks from sv_leftover tree to the mv_livelist_allocs if
* they are part of that metaslab (mv_msid).
*/
static void
mv_populate_livelist_allocs(metaslab_verify_t *mv, sublivelist_verify_t *sv)
{
zfs_btree_index_t where;
sublivelist_verify_block_t *svb;
ASSERT3U(zfs_btree_numnodes(&mv->mv_livelist_allocs), ==, 0);
for (svb = zfs_btree_first(&sv->sv_leftover, &where);
svb != NULL;
svb = zfs_btree_next(&sv->sv_leftover, &where, &where)) {
if (DVA_GET_VDEV(&svb->svb_dva) != mv->mv_vdid)
continue;
if (DVA_GET_OFFSET(&svb->svb_dva) < mv->mv_start &&
(DVA_GET_OFFSET(&svb->svb_dva) +
DVA_GET_ASIZE(&svb->svb_dva)) > mv->mv_start) {
(void) printf("ERROR: Found block that crosses "
"metaslab boundary: <%llu:%llx:%llx>\n",
(u_longlong_t)DVA_GET_VDEV(&svb->svb_dva),
(u_longlong_t)DVA_GET_OFFSET(&svb->svb_dva),
(u_longlong_t)DVA_GET_ASIZE(&svb->svb_dva));
continue;
}
if (DVA_GET_OFFSET(&svb->svb_dva) < mv->mv_start)
continue;
if (DVA_GET_OFFSET(&svb->svb_dva) >= mv->mv_end)
continue;
if ((DVA_GET_OFFSET(&svb->svb_dva) +
DVA_GET_ASIZE(&svb->svb_dva)) > mv->mv_end) {
(void) printf("ERROR: Found block that crosses "
"metaslab boundary: <%llu:%llx:%llx>\n",
(u_longlong_t)DVA_GET_VDEV(&svb->svb_dva),
(u_longlong_t)DVA_GET_OFFSET(&svb->svb_dva),
(u_longlong_t)DVA_GET_ASIZE(&svb->svb_dva));
continue;
}
zfs_btree_add(&mv->mv_livelist_allocs, svb);
}
for (svb = zfs_btree_first(&mv->mv_livelist_allocs, &where);
svb != NULL;
svb = zfs_btree_next(&mv->mv_livelist_allocs, &where, &where)) {
zfs_btree_remove(&sv->sv_leftover, svb);
}
}
/*
* [Livelist Check]
* Iterate through all the sublivelists and:
* - report leftover frees (**)
* - record leftover ALLOCs together with their TXG [see Cross Check]
*
* (**) Note: Double ALLOCs are valid in datasets that have dedup
* enabled. Similarly double FREEs are allowed as well but
* only if they pair up with a corresponding ALLOC entry once
* we our done with our sublivelist iteration.
*
* [Spacemap Check]
* for each metaslab:
* - iterate over spacemap and then the metaslab's entries in the
* spacemap log, then report any double FREEs and ALLOCs (do not
* blow up).
*
* [Cross Check]
* After finishing the Livelist Check phase and while being in the
* Spacemap Check phase, we find all the recorded leftover ALLOCs
* of the livelist check that are part of the metaslab that we are
* currently looking at in the Spacemap Check. We report any entries
* that are marked as ALLOCs in the livelists but have been actually
* freed (and potentially allocated again) after their TXG stamp in
* the spacemaps. Also report any ALLOCs from the livelists that
* belong to indirect vdevs (e.g. their vdev completed removal).
*
* Note that this will miss Log Spacemap entries that cancelled each other
* out before being flushed to the metaslab, so we are not guaranteed
* to match all erroneous ALLOCs.
*/
static void
livelist_metaslab_validate(spa_t *spa)
{
(void) printf("Verifying deleted livelist entries\n");
sublivelist_verify_t sv;
zfs_btree_create(&sv.sv_leftover, livelist_block_compare,
sizeof (sublivelist_verify_block_t));
iterate_deleted_livelists(spa, livelist_verify, &sv);
(void) printf("Verifying metaslab entries\n");
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
if (!vdev_is_concrete(vd))
continue;
for (uint64_t mid = 0; mid < vd->vdev_ms_count; mid++) {
metaslab_t *m = vd->vdev_ms[mid];
(void) fprintf(stderr,
"\rverifying concrete vdev %llu, "
"metaslab %llu of %llu ...",
(longlong_t)vd->vdev_id,
(longlong_t)mid,
(longlong_t)vd->vdev_ms_count);
uint64_t shift, start;
range_seg_type_t type =
metaslab_calculate_range_tree_type(vd, m,
&start, &shift);
metaslab_verify_t mv;
mv.mv_allocated = range_tree_create(NULL,
type, NULL, start, shift);
mv.mv_vdid = vd->vdev_id;
mv.mv_msid = m->ms_id;
mv.mv_start = m->ms_start;
mv.mv_end = m->ms_start + m->ms_size;
zfs_btree_create(&mv.mv_livelist_allocs,
livelist_block_compare,
sizeof (sublivelist_verify_block_t));
mv_populate_livelist_allocs(&mv, &sv);
spacemap_check_ms_sm(m->ms_sm, &mv);
spacemap_check_sm_log(spa, &mv);
range_tree_vacate(mv.mv_allocated, NULL, NULL);
range_tree_destroy(mv.mv_allocated);
zfs_btree_clear(&mv.mv_livelist_allocs);
zfs_btree_destroy(&mv.mv_livelist_allocs);
}
}
(void) fprintf(stderr, "\n");
/*
* If there are any segments in the leftover tree after we walked
* through all the metaslabs in the concrete vdevs then this means
* that we have segments in the livelists that belong to indirect
* vdevs and are marked as allocated.
*/
if (zfs_btree_numnodes(&sv.sv_leftover) == 0) {
zfs_btree_destroy(&sv.sv_leftover);
return;
}
(void) printf("ERROR: Found livelist blocks marked as allocated "
"for indirect vdevs:\n");
zfs_btree_index_t *where = NULL;
sublivelist_verify_block_t *svb;
while ((svb = zfs_btree_destroy_nodes(&sv.sv_leftover, &where)) !=
NULL) {
int vdev_id = DVA_GET_VDEV(&svb->svb_dva);
ASSERT3U(vdev_id, <, rvd->vdev_children);
vdev_t *vd = rvd->vdev_child[vdev_id];
ASSERT(!vdev_is_concrete(vd));
(void) printf("<%d:%llx:%llx> TXG %llx\n",
vdev_id, (u_longlong_t)DVA_GET_OFFSET(&svb->svb_dva),
(u_longlong_t)DVA_GET_ASIZE(&svb->svb_dva),
(u_longlong_t)svb->svb_allocated_txg);
}
(void) printf("\n");
zfs_btree_destroy(&sv.sv_leftover);
}
/*
* These libumem hooks provide a reasonable set of defaults for the allocator's
* debugging facilities.
*/
const char *
_umem_debug_init(void)
{
return ("default,verbose"); /* $UMEM_DEBUG setting */
}
const char *
_umem_logging_init(void)
{
return ("fail,contents"); /* $UMEM_LOGGING setting */
}
static void
usage(void)
{
(void) fprintf(stderr,
"Usage:\t%s [-AbcdDFGhikLMPsvXy] [-e [-V] [-p <path> ...]] "
"[-I <inflight I/Os>]\n"
"\t\t[-o <var>=<value>]... [-t <txg>] [-U <cache>] [-x <dumpdir>]\n"
"\t\t[<poolname>[/<dataset | objset id>] [<object | range> ...]]\n"
"\t%s [-AdiPv] [-e [-V] [-p <path> ...]] [-U <cache>]\n"
"\t\t[<poolname>[/<dataset | objset id>] [<object | range> ...]\n"
"\t%s [-v] <bookmark>\n"
"\t%s -C [-A] [-U <cache>]\n"
"\t%s -l [-Aqu] <device>\n"
"\t%s -m [-AFLPX] [-e [-V] [-p <path> ...]] [-t <txg>] "
"[-U <cache>]\n\t\t<poolname> [<vdev> [<metaslab> ...]]\n"
"\t%s -O <dataset> <path>\n"
"\t%s -r <dataset> <path> <destination>\n"
"\t%s -R [-A] [-e [-V] [-p <path> ...]] [-U <cache>]\n"
"\t\t<poolname> <vdev>:<offset>:<size>[:<flags>]\n"
"\t%s -E [-A] word0:word1:...:word15\n"
"\t%s -S [-AP] [-e [-V] [-p <path> ...]] [-U <cache>] "
"<poolname>\n\n",
cmdname, cmdname, cmdname, cmdname, cmdname, cmdname, cmdname,
cmdname, cmdname, cmdname, cmdname);
(void) fprintf(stderr, " Dataset name must include at least one "
"separator character '/' or '@'\n");
(void) fprintf(stderr, " If dataset name is specified, only that "
"dataset is dumped\n");
(void) fprintf(stderr, " If object numbers or object number "
"ranges are specified, only those\n"
" objects or ranges are dumped.\n\n");
(void) fprintf(stderr,
" Object ranges take the form <start>:<end>[:<flags>]\n"
" start Starting object number\n"
" end Ending object number, or -1 for no upper bound\n"
" flags Optional flags to select object types:\n"
" A All objects (this is the default)\n"
" d ZFS directories\n"
" f ZFS files \n"
" m SPA space maps\n"
" z ZAPs\n"
" - Negate effect of next flag\n\n");
(void) fprintf(stderr, " Options to control amount of output:\n");
(void) fprintf(stderr, " -b block statistics\n");
(void) fprintf(stderr, " -c checksum all metadata (twice for "
"all data) blocks\n");
(void) fprintf(stderr, " -C config (or cachefile if alone)\n");
(void) fprintf(stderr, " -d dataset(s)\n");
(void) fprintf(stderr, " -D dedup statistics\n");
(void) fprintf(stderr, " -E decode and display block from an "
"embedded block pointer\n");
(void) fprintf(stderr, " -h pool history\n");
(void) fprintf(stderr, " -i intent logs\n");
(void) fprintf(stderr, " -l read label contents\n");
(void) fprintf(stderr, " -k examine the checkpointed state "
"of the pool\n");
(void) fprintf(stderr, " -L disable leak tracking (do not "
"load spacemaps)\n");
(void) fprintf(stderr, " -m metaslabs\n");
(void) fprintf(stderr, " -M metaslab groups\n");
(void) fprintf(stderr, " -O perform object lookups by path\n");
(void) fprintf(stderr, " -r copy an object by path to file\n");
(void) fprintf(stderr, " -R read and display block from a "
"device\n");
(void) fprintf(stderr, " -s report stats on zdb's I/O\n");
(void) fprintf(stderr, " -S simulate dedup to measure effect\n");
(void) fprintf(stderr, " -v verbose (applies to all "
"others)\n");
(void) fprintf(stderr, " -y perform livelist and metaslab "
"validation on any livelists being deleted\n\n");
(void) fprintf(stderr, " Below options are intended for use "
"with other options:\n");
(void) fprintf(stderr, " -A ignore assertions (-A), enable "
"panic recovery (-AA) or both (-AAA)\n");
(void) fprintf(stderr, " -e pool is exported/destroyed/"
"has altroot/not in a cachefile\n");
(void) fprintf(stderr, " -F attempt automatic rewind within "
"safe range of transaction groups\n");
(void) fprintf(stderr, " -G dump zfs_dbgmsg buffer before "
"exiting\n");
(void) fprintf(stderr, " -I <number of inflight I/Os> -- "
"specify the maximum number of\n "
"checksumming I/Os [default is 200]\n");
(void) fprintf(stderr, " -o <variable>=<value> set global "
"variable to an unsigned 32-bit integer\n");
(void) fprintf(stderr, " -p <path> -- use one or more with "
"-e to specify path to vdev dir\n");
(void) fprintf(stderr, " -P print numbers in parseable form\n");
(void) fprintf(stderr, " -q don't print label contents\n");
(void) fprintf(stderr, " -t <txg> -- highest txg to use when "
"searching for uberblocks\n");
(void) fprintf(stderr, " -u uberblock\n");
(void) fprintf(stderr, " -U <cachefile_path> -- use alternate "
"cachefile\n");
(void) fprintf(stderr, " -V do verbatim import\n");
(void) fprintf(stderr, " -x <dumpdir> -- "
"dump all read blocks into specified directory\n");
(void) fprintf(stderr, " -X attempt extreme rewind (does not "
"work with dataset)\n");
(void) fprintf(stderr, " -Y attempt all reconstruction "
"combinations for split blocks\n");
(void) fprintf(stderr, " -Z show ZSTD headers \n");
(void) fprintf(stderr, "Specify an option more than once (e.g. -bb) "
"to make only that option verbose\n");
(void) fprintf(stderr, "Default is to dump everything non-verbosely\n");
exit(1);
}
static void
dump_debug_buffer(void)
{
if (dump_opt['G']) {
(void) printf("\n");
(void) fflush(stdout);
zfs_dbgmsg_print("zdb");
}
}
/*
* Called for usage errors that are discovered after a call to spa_open(),
* dmu_bonus_hold(), or pool_match(). abort() is called for other errors.
*/
static void
fatal(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
(void) fprintf(stderr, "%s: ", cmdname);
(void) vfprintf(stderr, fmt, ap);
va_end(ap);
(void) fprintf(stderr, "\n");
dump_debug_buffer();
exit(1);
}
/* ARGSUSED */
static void
dump_packed_nvlist(objset_t *os, uint64_t object, void *data, size_t size)
{
nvlist_t *nv;
size_t nvsize = *(uint64_t *)data;
char *packed = umem_alloc(nvsize, UMEM_NOFAIL);
VERIFY(0 == dmu_read(os, object, 0, nvsize, packed, DMU_READ_PREFETCH));
VERIFY(nvlist_unpack(packed, nvsize, &nv, 0) == 0);
umem_free(packed, nvsize);
dump_nvlist(nv, 8);
nvlist_free(nv);
}
/* ARGSUSED */
static void
dump_history_offsets(objset_t *os, uint64_t object, void *data, size_t size)
{
spa_history_phys_t *shp = data;
if (shp == NULL)
return;
(void) printf("\t\tpool_create_len = %llu\n",
(u_longlong_t)shp->sh_pool_create_len);
(void) printf("\t\tphys_max_off = %llu\n",
(u_longlong_t)shp->sh_phys_max_off);
(void) printf("\t\tbof = %llu\n",
(u_longlong_t)shp->sh_bof);
(void) printf("\t\teof = %llu\n",
(u_longlong_t)shp->sh_eof);
(void) printf("\t\trecords_lost = %llu\n",
(u_longlong_t)shp->sh_records_lost);
}
static void
zdb_nicenum(uint64_t num, char *buf, size_t buflen)
{
if (dump_opt['P'])
(void) snprintf(buf, buflen, "%llu", (longlong_t)num);
else
nicenum(num, buf, sizeof (buf));
}
static const char histo_stars[] = "****************************************";
static const uint64_t histo_width = sizeof (histo_stars) - 1;
static void
dump_histogram(const uint64_t *histo, int size, int offset)
{
int i;
int minidx = size - 1;
int maxidx = 0;
uint64_t max = 0;
for (i = 0; i < size; i++) {
if (histo[i] > max)
max = histo[i];
if (histo[i] > 0 && i > maxidx)
maxidx = i;
if (histo[i] > 0 && i < minidx)
minidx = i;
}
if (max < histo_width)
max = histo_width;
for (i = minidx; i <= maxidx; i++) {
(void) printf("\t\t\t%3u: %6llu %s\n",
i + offset, (u_longlong_t)histo[i],
&histo_stars[(max - histo[i]) * histo_width / max]);
}
}
static void
dump_zap_stats(objset_t *os, uint64_t object)
{
int error;
zap_stats_t zs;
error = zap_get_stats(os, object, &zs);
if (error)
return;
if (zs.zs_ptrtbl_len == 0) {
ASSERT(zs.zs_num_blocks == 1);
(void) printf("\tmicrozap: %llu bytes, %llu entries\n",
(u_longlong_t)zs.zs_blocksize,
(u_longlong_t)zs.zs_num_entries);
return;
}
(void) printf("\tFat ZAP stats:\n");
(void) printf("\t\tPointer table:\n");
(void) printf("\t\t\t%llu elements\n",
(u_longlong_t)zs.zs_ptrtbl_len);
(void) printf("\t\t\tzt_blk: %llu\n",
(u_longlong_t)zs.zs_ptrtbl_zt_blk);
(void) printf("\t\t\tzt_numblks: %llu\n",
(u_longlong_t)zs.zs_ptrtbl_zt_numblks);
(void) printf("\t\t\tzt_shift: %llu\n",
(u_longlong_t)zs.zs_ptrtbl_zt_shift);
(void) printf("\t\t\tzt_blks_copied: %llu\n",
(u_longlong_t)zs.zs_ptrtbl_blks_copied);
(void) printf("\t\t\tzt_nextblk: %llu\n",
(u_longlong_t)zs.zs_ptrtbl_nextblk);
(void) printf("\t\tZAP entries: %llu\n",
(u_longlong_t)zs.zs_num_entries);
(void) printf("\t\tLeaf blocks: %llu\n",
(u_longlong_t)zs.zs_num_leafs);
(void) printf("\t\tTotal blocks: %llu\n",
(u_longlong_t)zs.zs_num_blocks);
(void) printf("\t\tzap_block_type: 0x%llx\n",
(u_longlong_t)zs.zs_block_type);
(void) printf("\t\tzap_magic: 0x%llx\n",
(u_longlong_t)zs.zs_magic);
(void) printf("\t\tzap_salt: 0x%llx\n",
(u_longlong_t)zs.zs_salt);
(void) printf("\t\tLeafs with 2^n pointers:\n");
dump_histogram(zs.zs_leafs_with_2n_pointers, ZAP_HISTOGRAM_SIZE, 0);
(void) printf("\t\tBlocks with n*5 entries:\n");
dump_histogram(zs.zs_blocks_with_n5_entries, ZAP_HISTOGRAM_SIZE, 0);
(void) printf("\t\tBlocks n/10 full:\n");
dump_histogram(zs.zs_blocks_n_tenths_full, ZAP_HISTOGRAM_SIZE, 0);
(void) printf("\t\tEntries with n chunks:\n");
dump_histogram(zs.zs_entries_using_n_chunks, ZAP_HISTOGRAM_SIZE, 0);
(void) printf("\t\tBuckets with n entries:\n");
dump_histogram(zs.zs_buckets_with_n_entries, ZAP_HISTOGRAM_SIZE, 0);
}
/*ARGSUSED*/
static void
dump_none(objset_t *os, uint64_t object, void *data, size_t size)
{
}
/*ARGSUSED*/
static void
dump_unknown(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) printf("\tUNKNOWN OBJECT TYPE\n");
}
/*ARGSUSED*/
static void
dump_uint8(objset_t *os, uint64_t object, void *data, size_t size)
{
}
/*ARGSUSED*/
static void
dump_uint64(objset_t *os, uint64_t object, void *data, size_t size)
{
uint64_t *arr;
uint64_t oursize;
if (dump_opt['d'] < 6)
return;
if (data == NULL) {
dmu_object_info_t doi;
VERIFY0(dmu_object_info(os, object, &doi));
size = doi.doi_max_offset;
/*
* We cap the size at 1 mebibyte here to prevent
* allocation failures and nigh-infinite printing if the
* object is extremely large.
*/
oursize = MIN(size, 1 << 20);
arr = kmem_alloc(oursize, KM_SLEEP);
int err = dmu_read(os, object, 0, oursize, arr, 0);
if (err != 0) {
(void) printf("got error %u from dmu_read\n", err);
kmem_free(arr, oursize);
return;
}
} else {
/*
* Even though the allocation is already done in this code path,
* we still cap the size to prevent excessive printing.
*/
oursize = MIN(size, 1 << 20);
arr = data;
}
if (size == 0) {
(void) printf("\t\t[]\n");
return;
}
(void) printf("\t\t[%0llx", (u_longlong_t)arr[0]);
for (size_t i = 1; i * sizeof (uint64_t) < oursize; i++) {
if (i % 4 != 0)
(void) printf(", %0llx", (u_longlong_t)arr[i]);
else
(void) printf(",\n\t\t%0llx", (u_longlong_t)arr[i]);
}
if (oursize != size)
(void) printf(", ... ");
(void) printf("]\n");
if (data == NULL)
kmem_free(arr, oursize);
}
/*ARGSUSED*/
static void
dump_zap(objset_t *os, uint64_t object, void *data, size_t size)
{
zap_cursor_t zc;
zap_attribute_t attr;
void *prop;
unsigned i;
dump_zap_stats(os, object);
(void) printf("\n");
for (zap_cursor_init(&zc, os, object);
zap_cursor_retrieve(&zc, &attr) == 0;
zap_cursor_advance(&zc)) {
(void) printf("\t\t%s = ", attr.za_name);
if (attr.za_num_integers == 0) {
(void) printf("\n");
continue;
}
prop = umem_zalloc(attr.za_num_integers *
attr.za_integer_length, UMEM_NOFAIL);
(void) zap_lookup(os, object, attr.za_name,
attr.za_integer_length, attr.za_num_integers, prop);
if (attr.za_integer_length == 1) {
if (strcmp(attr.za_name,
DSL_CRYPTO_KEY_MASTER_KEY) == 0 ||
strcmp(attr.za_name,
DSL_CRYPTO_KEY_HMAC_KEY) == 0 ||
strcmp(attr.za_name, DSL_CRYPTO_KEY_IV) == 0 ||
strcmp(attr.za_name, DSL_CRYPTO_KEY_MAC) == 0 ||
strcmp(attr.za_name, DMU_POOL_CHECKSUM_SALT) == 0) {
uint8_t *u8 = prop;
for (i = 0; i < attr.za_num_integers; i++) {
(void) printf("%02x", u8[i]);
}
} else {
(void) printf("%s", (char *)prop);
}
} else {
for (i = 0; i < attr.za_num_integers; i++) {
switch (attr.za_integer_length) {
case 2:
(void) printf("%u ",
((uint16_t *)prop)[i]);
break;
case 4:
(void) printf("%u ",
((uint32_t *)prop)[i]);
break;
case 8:
(void) printf("%lld ",
(u_longlong_t)((int64_t *)prop)[i]);
break;
}
}
}
(void) printf("\n");
umem_free(prop, attr.za_num_integers * attr.za_integer_length);
}
zap_cursor_fini(&zc);
}
static void
dump_bpobj(objset_t *os, uint64_t object, void *data, size_t size)
{
bpobj_phys_t *bpop = data;
uint64_t i;
char bytes[32], comp[32], uncomp[32];
/* make sure the output won't get truncated */
CTASSERT(sizeof (bytes) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (comp) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (uncomp) >= NN_NUMBUF_SZ);
if (bpop == NULL)
return;
zdb_nicenum(bpop->bpo_bytes, bytes, sizeof (bytes));
zdb_nicenum(bpop->bpo_comp, comp, sizeof (comp));
zdb_nicenum(bpop->bpo_uncomp, uncomp, sizeof (uncomp));
(void) printf("\t\tnum_blkptrs = %llu\n",
(u_longlong_t)bpop->bpo_num_blkptrs);
(void) printf("\t\tbytes = %s\n", bytes);
if (size >= BPOBJ_SIZE_V1) {
(void) printf("\t\tcomp = %s\n", comp);
(void) printf("\t\tuncomp = %s\n", uncomp);
}
if (size >= BPOBJ_SIZE_V2) {
(void) printf("\t\tsubobjs = %llu\n",
(u_longlong_t)bpop->bpo_subobjs);
(void) printf("\t\tnum_subobjs = %llu\n",
(u_longlong_t)bpop->bpo_num_subobjs);
}
if (size >= sizeof (*bpop)) {
(void) printf("\t\tnum_freed = %llu\n",
(u_longlong_t)bpop->bpo_num_freed);
}
if (dump_opt['d'] < 5)
return;
for (i = 0; i < bpop->bpo_num_blkptrs; i++) {
char blkbuf[BP_SPRINTF_LEN];
blkptr_t bp;
int err = dmu_read(os, object,
i * sizeof (bp), sizeof (bp), &bp, 0);
if (err != 0) {
(void) printf("got error %u from dmu_read\n", err);
break;
}
snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), &bp,
BP_GET_FREE(&bp));
(void) printf("\t%s\n", blkbuf);
}
}
/* ARGSUSED */
static void
dump_bpobj_subobjs(objset_t *os, uint64_t object, void *data, size_t size)
{
dmu_object_info_t doi;
int64_t i;
VERIFY0(dmu_object_info(os, object, &doi));
uint64_t *subobjs = kmem_alloc(doi.doi_max_offset, KM_SLEEP);
int err = dmu_read(os, object, 0, doi.doi_max_offset, subobjs, 0);
if (err != 0) {
(void) printf("got error %u from dmu_read\n", err);
kmem_free(subobjs, doi.doi_max_offset);
return;
}
int64_t last_nonzero = -1;
for (i = 0; i < doi.doi_max_offset / 8; i++) {
if (subobjs[i] != 0)
last_nonzero = i;
}
for (i = 0; i <= last_nonzero; i++) {
(void) printf("\t%llu\n", (u_longlong_t)subobjs[i]);
}
kmem_free(subobjs, doi.doi_max_offset);
}
/*ARGSUSED*/
static void
dump_ddt_zap(objset_t *os, uint64_t object, void *data, size_t size)
{
dump_zap_stats(os, object);
/* contents are printed elsewhere, properly decoded */
}
/*ARGSUSED*/
static void
dump_sa_attrs(objset_t *os, uint64_t object, void *data, size_t size)
{
zap_cursor_t zc;
zap_attribute_t attr;
dump_zap_stats(os, object);
(void) printf("\n");
for (zap_cursor_init(&zc, os, object);
zap_cursor_retrieve(&zc, &attr) == 0;
zap_cursor_advance(&zc)) {
(void) printf("\t\t%s = ", attr.za_name);
if (attr.za_num_integers == 0) {
(void) printf("\n");
continue;
}
(void) printf(" %llx : [%d:%d:%d]\n",
(u_longlong_t)attr.za_first_integer,
(int)ATTR_LENGTH(attr.za_first_integer),
(int)ATTR_BSWAP(attr.za_first_integer),
(int)ATTR_NUM(attr.za_first_integer));
}
zap_cursor_fini(&zc);
}
/*ARGSUSED*/
static void
dump_sa_layouts(objset_t *os, uint64_t object, void *data, size_t size)
{
zap_cursor_t zc;
zap_attribute_t attr;
uint16_t *layout_attrs;
unsigned i;
dump_zap_stats(os, object);
(void) printf("\n");
for (zap_cursor_init(&zc, os, object);
zap_cursor_retrieve(&zc, &attr) == 0;
zap_cursor_advance(&zc)) {
(void) printf("\t\t%s = [", attr.za_name);
if (attr.za_num_integers == 0) {
(void) printf("\n");
continue;
}
VERIFY(attr.za_integer_length == 2);
layout_attrs = umem_zalloc(attr.za_num_integers *
attr.za_integer_length, UMEM_NOFAIL);
VERIFY(zap_lookup(os, object, attr.za_name,
attr.za_integer_length,
attr.za_num_integers, layout_attrs) == 0);
for (i = 0; i != attr.za_num_integers; i++)
(void) printf(" %d ", (int)layout_attrs[i]);
(void) printf("]\n");
umem_free(layout_attrs,
attr.za_num_integers * attr.za_integer_length);
}
zap_cursor_fini(&zc);
}
/*ARGSUSED*/
static void
dump_zpldir(objset_t *os, uint64_t object, void *data, size_t size)
{
zap_cursor_t zc;
zap_attribute_t attr;
const char *typenames[] = {
/* 0 */ "not specified",
/* 1 */ "FIFO",
/* 2 */ "Character Device",
/* 3 */ "3 (invalid)",
/* 4 */ "Directory",
/* 5 */ "5 (invalid)",
/* 6 */ "Block Device",
/* 7 */ "7 (invalid)",
/* 8 */ "Regular File",
/* 9 */ "9 (invalid)",
/* 10 */ "Symbolic Link",
/* 11 */ "11 (invalid)",
/* 12 */ "Socket",
/* 13 */ "Door",
/* 14 */ "Event Port",
/* 15 */ "15 (invalid)",
};
dump_zap_stats(os, object);
(void) printf("\n");
for (zap_cursor_init(&zc, os, object);
zap_cursor_retrieve(&zc, &attr) == 0;
zap_cursor_advance(&zc)) {
(void) printf("\t\t%s = %lld (type: %s)\n",
attr.za_name, ZFS_DIRENT_OBJ(attr.za_first_integer),
typenames[ZFS_DIRENT_TYPE(attr.za_first_integer)]);
}
zap_cursor_fini(&zc);
}
static int
get_dtl_refcount(vdev_t *vd)
{
int refcount = 0;
if (vd->vdev_ops->vdev_op_leaf) {
space_map_t *sm = vd->vdev_dtl_sm;
if (sm != NULL &&
sm->sm_dbuf->db_size == sizeof (space_map_phys_t))
return (1);
return (0);
}
for (unsigned c = 0; c < vd->vdev_children; c++)
refcount += get_dtl_refcount(vd->vdev_child[c]);
return (refcount);
}
static int
get_metaslab_refcount(vdev_t *vd)
{
int refcount = 0;
if (vd->vdev_top == vd) {
for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
space_map_t *sm = vd->vdev_ms[m]->ms_sm;
if (sm != NULL &&
sm->sm_dbuf->db_size == sizeof (space_map_phys_t))
refcount++;
}
}
for (unsigned c = 0; c < vd->vdev_children; c++)
refcount += get_metaslab_refcount(vd->vdev_child[c]);
return (refcount);
}
static int
get_obsolete_refcount(vdev_t *vd)
{
uint64_t obsolete_sm_object;
int refcount = 0;
VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
if (vd->vdev_top == vd && obsolete_sm_object != 0) {
dmu_object_info_t doi;
VERIFY0(dmu_object_info(vd->vdev_spa->spa_meta_objset,
obsolete_sm_object, &doi));
if (doi.doi_bonus_size == sizeof (space_map_phys_t)) {
refcount++;
}
} else {
ASSERT3P(vd->vdev_obsolete_sm, ==, NULL);
ASSERT3U(obsolete_sm_object, ==, 0);
}
for (unsigned c = 0; c < vd->vdev_children; c++) {
refcount += get_obsolete_refcount(vd->vdev_child[c]);
}
return (refcount);
}
static int
get_prev_obsolete_spacemap_refcount(spa_t *spa)
{
uint64_t prev_obj =
spa->spa_condensing_indirect_phys.scip_prev_obsolete_sm_object;
if (prev_obj != 0) {
dmu_object_info_t doi;
VERIFY0(dmu_object_info(spa->spa_meta_objset, prev_obj, &doi));
if (doi.doi_bonus_size == sizeof (space_map_phys_t)) {
return (1);
}
}
return (0);
}
static int
get_checkpoint_refcount(vdev_t *vd)
{
int refcount = 0;
if (vd->vdev_top == vd && vd->vdev_top_zap != 0 &&
zap_contains(spa_meta_objset(vd->vdev_spa),
vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) == 0)
refcount++;
for (uint64_t c = 0; c < vd->vdev_children; c++)
refcount += get_checkpoint_refcount(vd->vdev_child[c]);
return (refcount);
}
static int
get_log_spacemap_refcount(spa_t *spa)
{
return (avl_numnodes(&spa->spa_sm_logs_by_txg));
}
static int
verify_spacemap_refcounts(spa_t *spa)
{
uint64_t expected_refcount = 0;
uint64_t actual_refcount;
(void) feature_get_refcount(spa,
&spa_feature_table[SPA_FEATURE_SPACEMAP_HISTOGRAM],
&expected_refcount);
actual_refcount = get_dtl_refcount(spa->spa_root_vdev);
actual_refcount += get_metaslab_refcount(spa->spa_root_vdev);
actual_refcount += get_obsolete_refcount(spa->spa_root_vdev);
actual_refcount += get_prev_obsolete_spacemap_refcount(spa);
actual_refcount += get_checkpoint_refcount(spa->spa_root_vdev);
actual_refcount += get_log_spacemap_refcount(spa);
if (expected_refcount != actual_refcount) {
(void) printf("space map refcount mismatch: expected %lld != "
"actual %lld\n",
(longlong_t)expected_refcount,
(longlong_t)actual_refcount);
return (2);
}
return (0);
}
static void
dump_spacemap(objset_t *os, space_map_t *sm)
{
const char *ddata[] = { "ALLOC", "FREE", "CONDENSE", "INVALID",
"INVALID", "INVALID", "INVALID", "INVALID" };
if (sm == NULL)
return;
(void) printf("space map object %llu:\n",
(longlong_t)sm->sm_object);
(void) printf(" smp_length = 0x%llx\n",
(longlong_t)sm->sm_phys->smp_length);
(void) printf(" smp_alloc = 0x%llx\n",
(longlong_t)sm->sm_phys->smp_alloc);
if (dump_opt['d'] < 6 && dump_opt['m'] < 4)
return;
/*
* Print out the freelist entries in both encoded and decoded form.
*/
uint8_t mapshift = sm->sm_shift;
int64_t alloc = 0;
uint64_t word, entry_id = 0;
for (uint64_t offset = 0; offset < space_map_length(sm);
offset += sizeof (word)) {
VERIFY0(dmu_read(os, space_map_object(sm), offset,
sizeof (word), &word, DMU_READ_PREFETCH));
if (sm_entry_is_debug(word)) {
uint64_t de_txg = SM_DEBUG_TXG_DECODE(word);
uint64_t de_sync_pass = SM_DEBUG_SYNCPASS_DECODE(word);
if (de_txg == 0) {
(void) printf(
"\t [%6llu] PADDING\n",
(u_longlong_t)entry_id);
} else {
(void) printf(
"\t [%6llu] %s: txg %llu pass %llu\n",
(u_longlong_t)entry_id,
ddata[SM_DEBUG_ACTION_DECODE(word)],
(u_longlong_t)de_txg,
(u_longlong_t)de_sync_pass);
}
entry_id++;
continue;
}
uint8_t words;
char entry_type;
uint64_t entry_off, entry_run, entry_vdev = SM_NO_VDEVID;
if (sm_entry_is_single_word(word)) {
entry_type = (SM_TYPE_DECODE(word) == SM_ALLOC) ?
'A' : 'F';
entry_off = (SM_OFFSET_DECODE(word) << mapshift) +
sm->sm_start;
entry_run = SM_RUN_DECODE(word) << mapshift;
words = 1;
} else {
/* it is a two-word entry so we read another word */
ASSERT(sm_entry_is_double_word(word));
uint64_t extra_word;
offset += sizeof (extra_word);
VERIFY0(dmu_read(os, space_map_object(sm), offset,
sizeof (extra_word), &extra_word,
DMU_READ_PREFETCH));
ASSERT3U(offset, <=, space_map_length(sm));
entry_run = SM2_RUN_DECODE(word) << mapshift;
entry_vdev = SM2_VDEV_DECODE(word);
entry_type = (SM2_TYPE_DECODE(extra_word) == SM_ALLOC) ?
'A' : 'F';
entry_off = (SM2_OFFSET_DECODE(extra_word) <<
mapshift) + sm->sm_start;
words = 2;
}
(void) printf("\t [%6llu] %c range:"
" %010llx-%010llx size: %06llx vdev: %06llu words: %u\n",
(u_longlong_t)entry_id,
entry_type, (u_longlong_t)entry_off,
(u_longlong_t)(entry_off + entry_run),
(u_longlong_t)entry_run,
(u_longlong_t)entry_vdev, words);
if (entry_type == 'A')
alloc += entry_run;
else
alloc -= entry_run;
entry_id++;
}
if (alloc != space_map_allocated(sm)) {
(void) printf("space_map_object alloc (%lld) INCONSISTENT "
"with space map summary (%lld)\n",
(longlong_t)space_map_allocated(sm), (longlong_t)alloc);
}
}
static void
dump_metaslab_stats(metaslab_t *msp)
{
char maxbuf[32];
range_tree_t *rt = msp->ms_allocatable;
zfs_btree_t *t = &msp->ms_allocatable_by_size;
int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
/* max sure nicenum has enough space */
CTASSERT(sizeof (maxbuf) >= NN_NUMBUF_SZ);
zdb_nicenum(metaslab_largest_allocatable(msp), maxbuf, sizeof (maxbuf));
(void) printf("\t %25s %10lu %7s %6s %4s %4d%%\n",
"segments", zfs_btree_numnodes(t), "maxsize", maxbuf,
"freepct", free_pct);
(void) printf("\tIn-memory histogram:\n");
dump_histogram(rt->rt_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0);
}
static void
dump_metaslab(metaslab_t *msp)
{
vdev_t *vd = msp->ms_group->mg_vd;
spa_t *spa = vd->vdev_spa;
space_map_t *sm = msp->ms_sm;
char freebuf[32];
zdb_nicenum(msp->ms_size - space_map_allocated(sm), freebuf,
sizeof (freebuf));
(void) printf(
"\tmetaslab %6llu offset %12llx spacemap %6llu free %5s\n",
(u_longlong_t)msp->ms_id, (u_longlong_t)msp->ms_start,
(u_longlong_t)space_map_object(sm), freebuf);
if (dump_opt['m'] > 2 && !dump_opt['L']) {
mutex_enter(&msp->ms_lock);
VERIFY0(metaslab_load(msp));
range_tree_stat_verify(msp->ms_allocatable);
dump_metaslab_stats(msp);
metaslab_unload(msp);
mutex_exit(&msp->ms_lock);
}
if (dump_opt['m'] > 1 && sm != NULL &&
spa_feature_is_active(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
/*
* The space map histogram represents free space in chunks
* of sm_shift (i.e. bucket 0 refers to 2^sm_shift).
*/
(void) printf("\tOn-disk histogram:\t\tfragmentation %llu\n",
(u_longlong_t)msp->ms_fragmentation);
dump_histogram(sm->sm_phys->smp_histogram,
SPACE_MAP_HISTOGRAM_SIZE, sm->sm_shift);
}
if (vd->vdev_ops == &vdev_draid_ops)
ASSERT3U(msp->ms_size, <=, 1ULL << vd->vdev_ms_shift);
else
ASSERT3U(msp->ms_size, ==, 1ULL << vd->vdev_ms_shift);
dump_spacemap(spa->spa_meta_objset, msp->ms_sm);
if (spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) {
(void) printf("\tFlush data:\n\tunflushed txg=%llu\n\n",
(u_longlong_t)metaslab_unflushed_txg(msp));
}
}
static void
print_vdev_metaslab_header(vdev_t *vd)
{
vdev_alloc_bias_t alloc_bias = vd->vdev_alloc_bias;
const char *bias_str = "";
if (alloc_bias == VDEV_BIAS_LOG || vd->vdev_islog) {
bias_str = VDEV_ALLOC_BIAS_LOG;
} else if (alloc_bias == VDEV_BIAS_SPECIAL) {
bias_str = VDEV_ALLOC_BIAS_SPECIAL;
} else if (alloc_bias == VDEV_BIAS_DEDUP) {
bias_str = VDEV_ALLOC_BIAS_DEDUP;
}
uint64_t ms_flush_data_obj = 0;
if (vd->vdev_top_zap != 0) {
int error = zap_lookup(spa_meta_objset(vd->vdev_spa),
vd->vdev_top_zap, VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS,
sizeof (uint64_t), 1, &ms_flush_data_obj);
if (error != ENOENT) {
ASSERT0(error);
}
}
(void) printf("\tvdev %10llu %s",
(u_longlong_t)vd->vdev_id, bias_str);
if (ms_flush_data_obj != 0) {
(void) printf(" ms_unflushed_phys object %llu",
(u_longlong_t)ms_flush_data_obj);
}
(void) printf("\n\t%-10s%5llu %-19s %-15s %-12s\n",
"metaslabs", (u_longlong_t)vd->vdev_ms_count,
"offset", "spacemap", "free");
(void) printf("\t%15s %19s %15s %12s\n",
"---------------", "-------------------",
"---------------", "------------");
}
static void
dump_metaslab_groups(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
metaslab_class_t *mc = spa_normal_class(spa);
uint64_t fragmentation;
metaslab_class_histogram_verify(mc);
for (unsigned c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg;
if (mg == NULL || mg->mg_class != mc)
continue;
metaslab_group_histogram_verify(mg);
mg->mg_fragmentation = metaslab_group_fragmentation(mg);
(void) printf("\tvdev %10llu\t\tmetaslabs%5llu\t\t"
"fragmentation",
(u_longlong_t)tvd->vdev_id,
(u_longlong_t)tvd->vdev_ms_count);
if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
(void) printf("%3s\n", "-");
} else {
(void) printf("%3llu%%\n",
(u_longlong_t)mg->mg_fragmentation);
}
dump_histogram(mg->mg_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0);
}
(void) printf("\tpool %s\tfragmentation", spa_name(spa));
fragmentation = metaslab_class_fragmentation(mc);
if (fragmentation == ZFS_FRAG_INVALID)
(void) printf("\t%3s\n", "-");
else
(void) printf("\t%3llu%%\n", (u_longlong_t)fragmentation);
dump_histogram(mc->mc_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0);
}
static void
print_vdev_indirect(vdev_t *vd)
{
vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
vdev_indirect_births_t *vib = vd->vdev_indirect_births;
if (vim == NULL) {
ASSERT3P(vib, ==, NULL);
return;
}
ASSERT3U(vdev_indirect_mapping_object(vim), ==,
vic->vic_mapping_object);
ASSERT3U(vdev_indirect_births_object(vib), ==,
vic->vic_births_object);
(void) printf("indirect births obj %llu:\n",
(longlong_t)vic->vic_births_object);
(void) printf(" vib_count = %llu\n",
(longlong_t)vdev_indirect_births_count(vib));
for (uint64_t i = 0; i < vdev_indirect_births_count(vib); i++) {
vdev_indirect_birth_entry_phys_t *cur_vibe =
&vib->vib_entries[i];
(void) printf("\toffset %llx -> txg %llu\n",
(longlong_t)cur_vibe->vibe_offset,
(longlong_t)cur_vibe->vibe_phys_birth_txg);
}
(void) printf("\n");
(void) printf("indirect mapping obj %llu:\n",
(longlong_t)vic->vic_mapping_object);
(void) printf(" vim_max_offset = 0x%llx\n",
(longlong_t)vdev_indirect_mapping_max_offset(vim));
(void) printf(" vim_bytes_mapped = 0x%llx\n",
(longlong_t)vdev_indirect_mapping_bytes_mapped(vim));
(void) printf(" vim_count = %llu\n",
(longlong_t)vdev_indirect_mapping_num_entries(vim));
if (dump_opt['d'] <= 5 && dump_opt['m'] <= 3)
return;
uint32_t *counts = vdev_indirect_mapping_load_obsolete_counts(vim);
for (uint64_t i = 0; i < vdev_indirect_mapping_num_entries(vim); i++) {
vdev_indirect_mapping_entry_phys_t *vimep =
&vim->vim_entries[i];
(void) printf("\t<%llx:%llx:%llx> -> "
"<%llx:%llx:%llx> (%x obsolete)\n",
(longlong_t)vd->vdev_id,
(longlong_t)DVA_MAPPING_GET_SRC_OFFSET(vimep),
(longlong_t)DVA_GET_ASIZE(&vimep->vimep_dst),
(longlong_t)DVA_GET_VDEV(&vimep->vimep_dst),
(longlong_t)DVA_GET_OFFSET(&vimep->vimep_dst),
(longlong_t)DVA_GET_ASIZE(&vimep->vimep_dst),
counts[i]);
}
(void) printf("\n");
uint64_t obsolete_sm_object;
VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
if (obsolete_sm_object != 0) {
objset_t *mos = vd->vdev_spa->spa_meta_objset;
(void) printf("obsolete space map object %llu:\n",
(u_longlong_t)obsolete_sm_object);
ASSERT(vd->vdev_obsolete_sm != NULL);
ASSERT3U(space_map_object(vd->vdev_obsolete_sm), ==,
obsolete_sm_object);
dump_spacemap(mos, vd->vdev_obsolete_sm);
(void) printf("\n");
}
}
static void
dump_metaslabs(spa_t *spa)
{
vdev_t *vd, *rvd = spa->spa_root_vdev;
uint64_t m, c = 0, children = rvd->vdev_children;
(void) printf("\nMetaslabs:\n");
if (!dump_opt['d'] && zopt_metaslab_args > 0) {
c = zopt_metaslab[0];
if (c >= children)
(void) fatal("bad vdev id: %llu", (u_longlong_t)c);
if (zopt_metaslab_args > 1) {
vd = rvd->vdev_child[c];
print_vdev_metaslab_header(vd);
for (m = 1; m < zopt_metaslab_args; m++) {
if (zopt_metaslab[m] < vd->vdev_ms_count)
dump_metaslab(
vd->vdev_ms[zopt_metaslab[m]]);
else
(void) fprintf(stderr, "bad metaslab "
"number %llu\n",
(u_longlong_t)zopt_metaslab[m]);
}
(void) printf("\n");
return;
}
children = c + 1;
}
for (; c < children; c++) {
vd = rvd->vdev_child[c];
print_vdev_metaslab_header(vd);
print_vdev_indirect(vd);
for (m = 0; m < vd->vdev_ms_count; m++)
dump_metaslab(vd->vdev_ms[m]);
(void) printf("\n");
}
}
static void
dump_log_spacemaps(spa_t *spa)
{
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
return;
(void) printf("\nLog Space Maps in Pool:\n");
for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls)) {
space_map_t *sm = NULL;
VERIFY0(space_map_open(&sm, spa_meta_objset(spa),
sls->sls_sm_obj, 0, UINT64_MAX, SPA_MINBLOCKSHIFT));
(void) printf("Log Spacemap object %llu txg %llu\n",
(u_longlong_t)sls->sls_sm_obj, (u_longlong_t)sls->sls_txg);
dump_spacemap(spa->spa_meta_objset, sm);
space_map_close(sm);
}
(void) printf("\n");
}
static void
dump_dde(const ddt_t *ddt, const ddt_entry_t *dde, uint64_t index)
{
const ddt_phys_t *ddp = dde->dde_phys;
const ddt_key_t *ddk = &dde->dde_key;
const char *types[4] = { "ditto", "single", "double", "triple" };
char blkbuf[BP_SPRINTF_LEN];
blkptr_t blk;
int p;
for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
if (ddp->ddp_phys_birth == 0)
continue;
ddt_bp_create(ddt->ddt_checksum, ddk, ddp, &blk);
snprintf_blkptr(blkbuf, sizeof (blkbuf), &blk);
(void) printf("index %llx refcnt %llu %s %s\n",
(u_longlong_t)index, (u_longlong_t)ddp->ddp_refcnt,
types[p], blkbuf);
}
}
static void
dump_dedup_ratio(const ddt_stat_t *dds)
{
double rL, rP, rD, D, dedup, compress, copies;
if (dds->dds_blocks == 0)
return;
rL = (double)dds->dds_ref_lsize;
rP = (double)dds->dds_ref_psize;
rD = (double)dds->dds_ref_dsize;
D = (double)dds->dds_dsize;
dedup = rD / D;
compress = rL / rP;
copies = rD / rP;
(void) printf("dedup = %.2f, compress = %.2f, copies = %.2f, "
"dedup * compress / copies = %.2f\n\n",
dedup, compress, copies, dedup * compress / copies);
}
static void
dump_ddt(ddt_t *ddt, enum ddt_type type, enum ddt_class class)
{
char name[DDT_NAMELEN];
ddt_entry_t dde;
uint64_t walk = 0;
dmu_object_info_t doi;
uint64_t count, dspace, mspace;
int error;
error = ddt_object_info(ddt, type, class, &doi);
if (error == ENOENT)
return;
ASSERT(error == 0);
error = ddt_object_count(ddt, type, class, &count);
ASSERT(error == 0);
if (count == 0)
return;
dspace = doi.doi_physical_blocks_512 << 9;
mspace = doi.doi_fill_count * doi.doi_data_block_size;
ddt_object_name(ddt, type, class, name);
(void) printf("%s: %llu entries, size %llu on disk, %llu in core\n",
name,
(u_longlong_t)count,
(u_longlong_t)(dspace / count),
(u_longlong_t)(mspace / count));
if (dump_opt['D'] < 3)
return;
zpool_dump_ddt(NULL, &ddt->ddt_histogram[type][class]);
if (dump_opt['D'] < 4)
return;
if (dump_opt['D'] < 5 && class == DDT_CLASS_UNIQUE)
return;
(void) printf("%s contents:\n\n", name);
while ((error = ddt_object_walk(ddt, type, class, &walk, &dde)) == 0)
dump_dde(ddt, &dde, walk);
ASSERT3U(error, ==, ENOENT);
(void) printf("\n");
}
static void
dump_all_ddts(spa_t *spa)
{
ddt_histogram_t ddh_total;
ddt_stat_t dds_total;
bzero(&ddh_total, sizeof (ddh_total));
bzero(&dds_total, sizeof (dds_total));
for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
ddt_t *ddt = spa->spa_ddt[c];
for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
for (enum ddt_class class = 0; class < DDT_CLASSES;
class++) {
dump_ddt(ddt, type, class);
}
}
}
ddt_get_dedup_stats(spa, &dds_total);
if (dds_total.dds_blocks == 0) {
(void) printf("All DDTs are empty\n");
return;
}
(void) printf("\n");
if (dump_opt['D'] > 1) {
(void) printf("DDT histogram (aggregated over all DDTs):\n");
ddt_get_dedup_histogram(spa, &ddh_total);
zpool_dump_ddt(&dds_total, &ddh_total);
}
dump_dedup_ratio(&dds_total);
}
static void
dump_dtl_seg(void *arg, uint64_t start, uint64_t size)
{
char *prefix = arg;
(void) printf("%s [%llu,%llu) length %llu\n",
prefix,
(u_longlong_t)start,
(u_longlong_t)(start + size),
(u_longlong_t)(size));
}
static void
dump_dtl(vdev_t *vd, int indent)
{
spa_t *spa = vd->vdev_spa;
boolean_t required;
const char *name[DTL_TYPES] = { "missing", "partial", "scrub",
"outage" };
char prefix[256];
spa_vdev_state_enter(spa, SCL_NONE);
required = vdev_dtl_required(vd);
(void) spa_vdev_state_exit(spa, NULL, 0);
if (indent == 0)
(void) printf("\nDirty time logs:\n\n");
(void) printf("\t%*s%s [%s]\n", indent, "",
vd->vdev_path ? vd->vdev_path :
vd->vdev_parent ? vd->vdev_ops->vdev_op_type : spa_name(spa),
required ? "DTL-required" : "DTL-expendable");
for (int t = 0; t < DTL_TYPES; t++) {
range_tree_t *rt = vd->vdev_dtl[t];
if (range_tree_space(rt) == 0)
continue;
(void) snprintf(prefix, sizeof (prefix), "\t%*s%s",
indent + 2, "", name[t]);
range_tree_walk(rt, dump_dtl_seg, prefix);
if (dump_opt['d'] > 5 && vd->vdev_children == 0)
dump_spacemap(spa->spa_meta_objset,
vd->vdev_dtl_sm);
}
for (unsigned c = 0; c < vd->vdev_children; c++)
dump_dtl(vd->vdev_child[c], indent + 4);
}
static void
dump_history(spa_t *spa)
{
nvlist_t **events = NULL;
char *buf;
uint64_t resid, len, off = 0;
uint_t num = 0;
int error;
char tbuf[30];
if ((buf = malloc(SPA_OLD_MAXBLOCKSIZE)) == NULL) {
(void) fprintf(stderr, "%s: unable to allocate I/O buffer\n",
__func__);
return;
}
do {
len = SPA_OLD_MAXBLOCKSIZE;
if ((error = spa_history_get(spa, &off, &len, buf)) != 0) {
(void) fprintf(stderr, "Unable to read history: "
"error %d\n", error);
free(buf);
return;
}
if (zpool_history_unpack(buf, len, &resid, &events, &num) != 0)
break;
off -= resid;
} while (len != 0);
(void) printf("\nHistory:\n");
for (unsigned i = 0; i < num; i++) {
boolean_t printed = B_FALSE;
if (nvlist_exists(events[i], ZPOOL_HIST_TIME)) {
time_t tsec;
struct tm t;
tsec = fnvlist_lookup_uint64(events[i],
ZPOOL_HIST_TIME);
(void) localtime_r(&tsec, &t);
(void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t);
} else {
tbuf[0] = '\0';
}
if (nvlist_exists(events[i], ZPOOL_HIST_CMD)) {
(void) printf("%s %s\n", tbuf,
fnvlist_lookup_string(events[i], ZPOOL_HIST_CMD));
} else if (nvlist_exists(events[i], ZPOOL_HIST_INT_EVENT)) {
uint64_t ievent;
ievent = fnvlist_lookup_uint64(events[i],
ZPOOL_HIST_INT_EVENT);
if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS)
goto next;
(void) printf(" %s [internal %s txg:%ju] %s\n",
tbuf,
zfs_history_event_names[ievent],
fnvlist_lookup_uint64(events[i],
ZPOOL_HIST_TXG),
fnvlist_lookup_string(events[i],
ZPOOL_HIST_INT_STR));
} else if (nvlist_exists(events[i], ZPOOL_HIST_INT_NAME)) {
(void) printf("%s [txg:%ju] %s", tbuf,
fnvlist_lookup_uint64(events[i],
ZPOOL_HIST_TXG),
fnvlist_lookup_string(events[i],
ZPOOL_HIST_INT_NAME));
if (nvlist_exists(events[i], ZPOOL_HIST_DSNAME)) {
(void) printf(" %s (%llu)",
fnvlist_lookup_string(events[i],
ZPOOL_HIST_DSNAME),
(u_longlong_t)fnvlist_lookup_uint64(
events[i],
ZPOOL_HIST_DSID));
}
(void) printf(" %s\n", fnvlist_lookup_string(events[i],
ZPOOL_HIST_INT_STR));
} else if (nvlist_exists(events[i], ZPOOL_HIST_IOCTL)) {
(void) printf("%s ioctl %s\n", tbuf,
fnvlist_lookup_string(events[i],
ZPOOL_HIST_IOCTL));
if (nvlist_exists(events[i], ZPOOL_HIST_INPUT_NVL)) {
(void) printf(" input:\n");
dump_nvlist(fnvlist_lookup_nvlist(events[i],
ZPOOL_HIST_INPUT_NVL), 8);
}
if (nvlist_exists(events[i], ZPOOL_HIST_OUTPUT_NVL)) {
(void) printf(" output:\n");
dump_nvlist(fnvlist_lookup_nvlist(events[i],
ZPOOL_HIST_OUTPUT_NVL), 8);
}
if (nvlist_exists(events[i], ZPOOL_HIST_ERRNO)) {
(void) printf(" errno: %lld\n",
(longlong_t)fnvlist_lookup_int64(events[i],
ZPOOL_HIST_ERRNO));
}
} else {
goto next;
}
printed = B_TRUE;
next:
if (dump_opt['h'] > 1) {
if (!printed)
(void) printf("unrecognized record:\n");
dump_nvlist(events[i], 2);
}
}
free(buf);
}
/*ARGSUSED*/
static void
dump_dnode(objset_t *os, uint64_t object, void *data, size_t size)
{
}
static uint64_t
blkid2offset(const dnode_phys_t *dnp, const blkptr_t *bp,
const zbookmark_phys_t *zb)
{
if (dnp == NULL) {
ASSERT(zb->zb_level < 0);
if (zb->zb_object == 0)
return (zb->zb_blkid);
return (zb->zb_blkid * BP_GET_LSIZE(bp));
}
ASSERT(zb->zb_level >= 0);
return ((zb->zb_blkid <<
(zb->zb_level * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT))) *
dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
}
static void
snprintf_zstd_header(spa_t *spa, char *blkbuf, size_t buflen,
const blkptr_t *bp)
{
abd_t *pabd;
void *buf;
zio_t *zio;
zfs_zstdhdr_t zstd_hdr;
int error;
if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_ZSTD)
return;
if (BP_IS_HOLE(bp))
return;
if (BP_IS_EMBEDDED(bp)) {
buf = malloc(SPA_MAXBLOCKSIZE);
if (buf == NULL) {
(void) fprintf(stderr, "out of memory\n");
exit(1);
}
decode_embedded_bp_compressed(bp, buf);
memcpy(&zstd_hdr, buf, sizeof (zstd_hdr));
free(buf);
zstd_hdr.c_len = BE_32(zstd_hdr.c_len);
zstd_hdr.raw_version_level = BE_32(zstd_hdr.raw_version_level);
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf),
" ZSTD:size=%u:version=%u:level=%u:EMBEDDED",
zstd_hdr.c_len, zstd_hdr.version, zstd_hdr.level);
return;
}
pabd = abd_alloc_for_io(SPA_MAXBLOCKSIZE, B_FALSE);
zio = zio_root(spa, NULL, NULL, 0);
/* Decrypt but don't decompress so we can read the compression header */
zio_nowait(zio_read(zio, spa, bp, pabd, BP_GET_PSIZE(bp), NULL, NULL,
ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW_COMPRESS,
NULL));
error = zio_wait(zio);
if (error) {
(void) fprintf(stderr, "read failed: %d\n", error);
return;
}
buf = abd_borrow_buf_copy(pabd, BP_GET_LSIZE(bp));
memcpy(&zstd_hdr, buf, sizeof (zstd_hdr));
zstd_hdr.c_len = BE_32(zstd_hdr.c_len);
zstd_hdr.raw_version_level = BE_32(zstd_hdr.raw_version_level);
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf),
" ZSTD:size=%u:version=%u:level=%u:NORMAL",
zstd_hdr.c_len, zstd_hdr.version, zstd_hdr.level);
abd_return_buf_copy(pabd, buf, BP_GET_LSIZE(bp));
}
static void
snprintf_blkptr_compact(char *blkbuf, size_t buflen, const blkptr_t *bp,
boolean_t bp_freed)
{
const dva_t *dva = bp->blk_dva;
int ndvas = dump_opt['d'] > 5 ? BP_GET_NDVAS(bp) : 1;
int i;
if (dump_opt['b'] >= 6) {
snprintf_blkptr(blkbuf, buflen, bp);
if (bp_freed) {
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf), " %s", "FREE");
}
return;
}
if (BP_IS_EMBEDDED(bp)) {
(void) sprintf(blkbuf,
"EMBEDDED et=%u %llxL/%llxP B=%llu",
(int)BPE_GET_ETYPE(bp),
(u_longlong_t)BPE_GET_LSIZE(bp),
(u_longlong_t)BPE_GET_PSIZE(bp),
(u_longlong_t)bp->blk_birth);
return;
}
blkbuf[0] = '\0';
for (i = 0; i < ndvas; i++)
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf), "%llu:%llx:%llx ",
(u_longlong_t)DVA_GET_VDEV(&dva[i]),
(u_longlong_t)DVA_GET_OFFSET(&dva[i]),
(u_longlong_t)DVA_GET_ASIZE(&dva[i]));
if (BP_IS_HOLE(bp)) {
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf),
"%llxL B=%llu",
(u_longlong_t)BP_GET_LSIZE(bp),
(u_longlong_t)bp->blk_birth);
} else {
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf),
"%llxL/%llxP F=%llu B=%llu/%llu",
(u_longlong_t)BP_GET_LSIZE(bp),
(u_longlong_t)BP_GET_PSIZE(bp),
(u_longlong_t)BP_GET_FILL(bp),
(u_longlong_t)bp->blk_birth,
(u_longlong_t)BP_PHYSICAL_BIRTH(bp));
if (bp_freed)
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf), " %s", "FREE");
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf), " cksum=%llx:%llx:%llx:%llx",
(u_longlong_t)bp->blk_cksum.zc_word[0],
(u_longlong_t)bp->blk_cksum.zc_word[1],
(u_longlong_t)bp->blk_cksum.zc_word[2],
(u_longlong_t)bp->blk_cksum.zc_word[3]);
}
}
static void
print_indirect(spa_t *spa, blkptr_t *bp, const zbookmark_phys_t *zb,
const dnode_phys_t *dnp)
{
char blkbuf[BP_SPRINTF_LEN];
int l;
if (!BP_IS_EMBEDDED(bp)) {
ASSERT3U(BP_GET_TYPE(bp), ==, dnp->dn_type);
ASSERT3U(BP_GET_LEVEL(bp), ==, zb->zb_level);
}
(void) printf("%16llx ", (u_longlong_t)blkid2offset(dnp, bp, zb));
ASSERT(zb->zb_level >= 0);
for (l = dnp->dn_nlevels - 1; l >= -1; l--) {
if (l == zb->zb_level) {
(void) printf("L%llx", (u_longlong_t)zb->zb_level);
} else {
(void) printf(" ");
}
}
snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), bp, B_FALSE);
if (dump_opt['Z'] && BP_GET_COMPRESS(bp) == ZIO_COMPRESS_ZSTD)
snprintf_zstd_header(spa, blkbuf, sizeof (blkbuf), bp);
(void) printf("%s\n", blkbuf);
}
static int
visit_indirect(spa_t *spa, const dnode_phys_t *dnp,
blkptr_t *bp, const zbookmark_phys_t *zb)
{
int err = 0;
if (bp->blk_birth == 0)
return (0);
print_indirect(spa, bp, zb, dnp);
if (BP_GET_LEVEL(bp) > 0 && !BP_IS_HOLE(bp)) {
arc_flags_t flags = ARC_FLAG_WAIT;
int i;
blkptr_t *cbp;
int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
arc_buf_t *buf;
uint64_t fill = 0;
ASSERT(!BP_IS_REDACTED(bp));
err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf,
ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
if (err)
return (err);
ASSERT(buf->b_data);
/* recursively visit blocks below this */
cbp = buf->b_data;
for (i = 0; i < epb; i++, cbp++) {
zbookmark_phys_t czb;
SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
zb->zb_level - 1,
zb->zb_blkid * epb + i);
err = visit_indirect(spa, dnp, cbp, &czb);
if (err)
break;
fill += BP_GET_FILL(cbp);
}
if (!err)
ASSERT3U(fill, ==, BP_GET_FILL(bp));
arc_buf_destroy(buf, &buf);
}
return (err);
}
/*ARGSUSED*/
static void
dump_indirect(dnode_t *dn)
{
dnode_phys_t *dnp = dn->dn_phys;
int j;
zbookmark_phys_t czb;
(void) printf("Indirect blocks:\n");
SET_BOOKMARK(&czb, dmu_objset_id(dn->dn_objset),
dn->dn_object, dnp->dn_nlevels - 1, 0);
for (j = 0; j < dnp->dn_nblkptr; j++) {
czb.zb_blkid = j;
(void) visit_indirect(dmu_objset_spa(dn->dn_objset), dnp,
&dnp->dn_blkptr[j], &czb);
}
(void) printf("\n");
}
/*ARGSUSED*/
static void
dump_dsl_dir(objset_t *os, uint64_t object, void *data, size_t size)
{
dsl_dir_phys_t *dd = data;
time_t crtime;
char nice[32];
/* make sure nicenum has enough space */
CTASSERT(sizeof (nice) >= NN_NUMBUF_SZ);
if (dd == NULL)
return;
ASSERT3U(size, >=, sizeof (dsl_dir_phys_t));
crtime = dd->dd_creation_time;
(void) printf("\t\tcreation_time = %s", ctime(&crtime));
(void) printf("\t\thead_dataset_obj = %llu\n",
(u_longlong_t)dd->dd_head_dataset_obj);
(void) printf("\t\tparent_dir_obj = %llu\n",
(u_longlong_t)dd->dd_parent_obj);
(void) printf("\t\torigin_obj = %llu\n",
(u_longlong_t)dd->dd_origin_obj);
(void) printf("\t\tchild_dir_zapobj = %llu\n",
(u_longlong_t)dd->dd_child_dir_zapobj);
zdb_nicenum(dd->dd_used_bytes, nice, sizeof (nice));
(void) printf("\t\tused_bytes = %s\n", nice);
zdb_nicenum(dd->dd_compressed_bytes, nice, sizeof (nice));
(void) printf("\t\tcompressed_bytes = %s\n", nice);
zdb_nicenum(dd->dd_uncompressed_bytes, nice, sizeof (nice));
(void) printf("\t\tuncompressed_bytes = %s\n", nice);
zdb_nicenum(dd->dd_quota, nice, sizeof (nice));
(void) printf("\t\tquota = %s\n", nice);
zdb_nicenum(dd->dd_reserved, nice, sizeof (nice));
(void) printf("\t\treserved = %s\n", nice);
(void) printf("\t\tprops_zapobj = %llu\n",
(u_longlong_t)dd->dd_props_zapobj);
(void) printf("\t\tdeleg_zapobj = %llu\n",
(u_longlong_t)dd->dd_deleg_zapobj);
(void) printf("\t\tflags = %llx\n",
(u_longlong_t)dd->dd_flags);
#define DO(which) \
zdb_nicenum(dd->dd_used_breakdown[DD_USED_ ## which], nice, \
sizeof (nice)); \
(void) printf("\t\tused_breakdown[" #which "] = %s\n", nice)
DO(HEAD);
DO(SNAP);
DO(CHILD);
DO(CHILD_RSRV);
DO(REFRSRV);
#undef DO
(void) printf("\t\tclones = %llu\n",
(u_longlong_t)dd->dd_clones);
}
/*ARGSUSED*/
static void
dump_dsl_dataset(objset_t *os, uint64_t object, void *data, size_t size)
{
dsl_dataset_phys_t *ds = data;
time_t crtime;
char used[32], compressed[32], uncompressed[32], unique[32];
char blkbuf[BP_SPRINTF_LEN];
/* make sure nicenum has enough space */
CTASSERT(sizeof (used) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (compressed) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (uncompressed) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (unique) >= NN_NUMBUF_SZ);
if (ds == NULL)
return;
ASSERT(size == sizeof (*ds));
crtime = ds->ds_creation_time;
zdb_nicenum(ds->ds_referenced_bytes, used, sizeof (used));
zdb_nicenum(ds->ds_compressed_bytes, compressed, sizeof (compressed));
zdb_nicenum(ds->ds_uncompressed_bytes, uncompressed,
sizeof (uncompressed));
zdb_nicenum(ds->ds_unique_bytes, unique, sizeof (unique));
snprintf_blkptr(blkbuf, sizeof (blkbuf), &ds->ds_bp);
(void) printf("\t\tdir_obj = %llu\n",
(u_longlong_t)ds->ds_dir_obj);
(void) printf("\t\tprev_snap_obj = %llu\n",
(u_longlong_t)ds->ds_prev_snap_obj);
(void) printf("\t\tprev_snap_txg = %llu\n",
(u_longlong_t)ds->ds_prev_snap_txg);
(void) printf("\t\tnext_snap_obj = %llu\n",
(u_longlong_t)ds->ds_next_snap_obj);
(void) printf("\t\tsnapnames_zapobj = %llu\n",
(u_longlong_t)ds->ds_snapnames_zapobj);
(void) printf("\t\tnum_children = %llu\n",
(u_longlong_t)ds->ds_num_children);
(void) printf("\t\tuserrefs_obj = %llu\n",
(u_longlong_t)ds->ds_userrefs_obj);
(void) printf("\t\tcreation_time = %s", ctime(&crtime));
(void) printf("\t\tcreation_txg = %llu\n",
(u_longlong_t)ds->ds_creation_txg);
(void) printf("\t\tdeadlist_obj = %llu\n",
(u_longlong_t)ds->ds_deadlist_obj);
(void) printf("\t\tused_bytes = %s\n", used);
(void) printf("\t\tcompressed_bytes = %s\n", compressed);
(void) printf("\t\tuncompressed_bytes = %s\n", uncompressed);
(void) printf("\t\tunique = %s\n", unique);
(void) printf("\t\tfsid_guid = %llu\n",
(u_longlong_t)ds->ds_fsid_guid);
(void) printf("\t\tguid = %llu\n",
(u_longlong_t)ds->ds_guid);
(void) printf("\t\tflags = %llx\n",
(u_longlong_t)ds->ds_flags);
(void) printf("\t\tnext_clones_obj = %llu\n",
(u_longlong_t)ds->ds_next_clones_obj);
(void) printf("\t\tprops_obj = %llu\n",
(u_longlong_t)ds->ds_props_obj);
(void) printf("\t\tbp = %s\n", blkbuf);
}
/* ARGSUSED */
static int
dump_bptree_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
char blkbuf[BP_SPRINTF_LEN];
if (bp->blk_birth != 0) {
snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
(void) printf("\t%s\n", blkbuf);
}
return (0);
}
static void
dump_bptree(objset_t *os, uint64_t obj, const char *name)
{
char bytes[32];
bptree_phys_t *bt;
dmu_buf_t *db;
/* make sure nicenum has enough space */
CTASSERT(sizeof (bytes) >= NN_NUMBUF_SZ);
if (dump_opt['d'] < 3)
return;
VERIFY3U(0, ==, dmu_bonus_hold(os, obj, FTAG, &db));
bt = db->db_data;
zdb_nicenum(bt->bt_bytes, bytes, sizeof (bytes));
(void) printf("\n %s: %llu datasets, %s\n",
name, (unsigned long long)(bt->bt_end - bt->bt_begin), bytes);
dmu_buf_rele(db, FTAG);
if (dump_opt['d'] < 5)
return;
(void) printf("\n");
(void) bptree_iterate(os, obj, B_FALSE, dump_bptree_cb, NULL, NULL);
}
/* ARGSUSED */
static int
dump_bpobj_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, dmu_tx_t *tx)
{
char blkbuf[BP_SPRINTF_LEN];
ASSERT(bp->blk_birth != 0);
snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), bp, bp_freed);
(void) printf("\t%s\n", blkbuf);
return (0);
}
static void
dump_full_bpobj(bpobj_t *bpo, const char *name, int indent)
{
char bytes[32];
char comp[32];
char uncomp[32];
uint64_t i;
/* make sure nicenum has enough space */
CTASSERT(sizeof (bytes) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (comp) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (uncomp) >= NN_NUMBUF_SZ);
if (dump_opt['d'] < 3)
return;
zdb_nicenum(bpo->bpo_phys->bpo_bytes, bytes, sizeof (bytes));
if (bpo->bpo_havesubobj && bpo->bpo_phys->bpo_subobjs != 0) {
zdb_nicenum(bpo->bpo_phys->bpo_comp, comp, sizeof (comp));
zdb_nicenum(bpo->bpo_phys->bpo_uncomp, uncomp, sizeof (uncomp));
if (bpo->bpo_havefreed) {
(void) printf(" %*s: object %llu, %llu local "
"blkptrs, %llu freed, %llu subobjs in object %llu, "
"%s (%s/%s comp)\n",
indent * 8, name,
(u_longlong_t)bpo->bpo_object,
(u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
(u_longlong_t)bpo->bpo_phys->bpo_num_freed,
(u_longlong_t)bpo->bpo_phys->bpo_num_subobjs,
(u_longlong_t)bpo->bpo_phys->bpo_subobjs,
bytes, comp, uncomp);
} else {
(void) printf(" %*s: object %llu, %llu local "
"blkptrs, %llu subobjs in object %llu, "
"%s (%s/%s comp)\n",
indent * 8, name,
(u_longlong_t)bpo->bpo_object,
(u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
(u_longlong_t)bpo->bpo_phys->bpo_num_subobjs,
(u_longlong_t)bpo->bpo_phys->bpo_subobjs,
bytes, comp, uncomp);
}
for (i = 0; i < bpo->bpo_phys->bpo_num_subobjs; i++) {
uint64_t subobj;
bpobj_t subbpo;
int error;
VERIFY0(dmu_read(bpo->bpo_os,
bpo->bpo_phys->bpo_subobjs,
i * sizeof (subobj), sizeof (subobj), &subobj, 0));
error = bpobj_open(&subbpo, bpo->bpo_os, subobj);
if (error != 0) {
(void) printf("ERROR %u while trying to open "
"subobj id %llu\n",
error, (u_longlong_t)subobj);
continue;
}
dump_full_bpobj(&subbpo, "subobj", indent + 1);
bpobj_close(&subbpo);
}
} else {
if (bpo->bpo_havefreed) {
(void) printf(" %*s: object %llu, %llu blkptrs, "
"%llu freed, %s\n",
indent * 8, name,
(u_longlong_t)bpo->bpo_object,
(u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
(u_longlong_t)bpo->bpo_phys->bpo_num_freed,
bytes);
} else {
(void) printf(" %*s: object %llu, %llu blkptrs, "
"%s\n",
indent * 8, name,
(u_longlong_t)bpo->bpo_object,
(u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
bytes);
}
}
if (dump_opt['d'] < 5)
return;
if (indent == 0) {
(void) bpobj_iterate_nofree(bpo, dump_bpobj_cb, NULL, NULL);
(void) printf("\n");
}
}
static int
dump_bookmark(dsl_pool_t *dp, char *name, boolean_t print_redact,
boolean_t print_list)
{
int err = 0;
zfs_bookmark_phys_t prop;
objset_t *mos = dp->dp_spa->spa_meta_objset;
err = dsl_bookmark_lookup(dp, name, NULL, &prop);
if (err != 0) {
return (err);
}
(void) printf("\t#%s: ", strchr(name, '#') + 1);
(void) printf("{guid: %llx creation_txg: %llu creation_time: "
"%llu redaction_obj: %llu}\n", (u_longlong_t)prop.zbm_guid,
(u_longlong_t)prop.zbm_creation_txg,
(u_longlong_t)prop.zbm_creation_time,
(u_longlong_t)prop.zbm_redaction_obj);
IMPLY(print_list, print_redact);
if (!print_redact || prop.zbm_redaction_obj == 0)
return (0);
redaction_list_t *rl;
VERIFY0(dsl_redaction_list_hold_obj(dp,
prop.zbm_redaction_obj, FTAG, &rl));
redaction_list_phys_t *rlp = rl->rl_phys;
(void) printf("\tRedacted:\n\t\tProgress: ");
if (rlp->rlp_last_object != UINT64_MAX ||
rlp->rlp_last_blkid != UINT64_MAX) {
(void) printf("%llu %llu (incomplete)\n",
(u_longlong_t)rlp->rlp_last_object,
(u_longlong_t)rlp->rlp_last_blkid);
} else {
(void) printf("complete\n");
}
(void) printf("\t\tSnapshots: [");
for (unsigned int i = 0; i < rlp->rlp_num_snaps; i++) {
if (i > 0)
(void) printf(", ");
(void) printf("%0llu",
(u_longlong_t)rlp->rlp_snaps[i]);
}
(void) printf("]\n\t\tLength: %llu\n",
(u_longlong_t)rlp->rlp_num_entries);
if (!print_list) {
dsl_redaction_list_rele(rl, FTAG);
return (0);
}
if (rlp->rlp_num_entries == 0) {
dsl_redaction_list_rele(rl, FTAG);
(void) printf("\t\tRedaction List: []\n\n");
return (0);
}
redact_block_phys_t *rbp_buf;
uint64_t size;
dmu_object_info_t doi;
VERIFY0(dmu_object_info(mos, prop.zbm_redaction_obj, &doi));
size = doi.doi_max_offset;
rbp_buf = kmem_alloc(size, KM_SLEEP);
err = dmu_read(mos, prop.zbm_redaction_obj, 0, size,
rbp_buf, 0);
if (err != 0) {
dsl_redaction_list_rele(rl, FTAG);
kmem_free(rbp_buf, size);
return (err);
}
(void) printf("\t\tRedaction List: [{object: %llx, offset: "
"%llx, blksz: %x, count: %llx}",
(u_longlong_t)rbp_buf[0].rbp_object,
(u_longlong_t)rbp_buf[0].rbp_blkid,
(uint_t)(redact_block_get_size(&rbp_buf[0])),
(u_longlong_t)redact_block_get_count(&rbp_buf[0]));
for (size_t i = 1; i < rlp->rlp_num_entries; i++) {
(void) printf(",\n\t\t{object: %llx, offset: %llx, "
"blksz: %x, count: %llx}",
(u_longlong_t)rbp_buf[i].rbp_object,
(u_longlong_t)rbp_buf[i].rbp_blkid,
(uint_t)(redact_block_get_size(&rbp_buf[i])),
(u_longlong_t)redact_block_get_count(&rbp_buf[i]));
}
dsl_redaction_list_rele(rl, FTAG);
kmem_free(rbp_buf, size);
(void) printf("]\n\n");
return (0);
}
static void
dump_bookmarks(objset_t *os, int verbosity)
{
zap_cursor_t zc;
zap_attribute_t attr;
dsl_dataset_t *ds = dmu_objset_ds(os);
dsl_pool_t *dp = spa_get_dsl(os->os_spa);
objset_t *mos = os->os_spa->spa_meta_objset;
if (verbosity < 4)
return;
dsl_pool_config_enter(dp, FTAG);
for (zap_cursor_init(&zc, mos, ds->ds_bookmarks_obj);
zap_cursor_retrieve(&zc, &attr) == 0;
zap_cursor_advance(&zc)) {
char osname[ZFS_MAX_DATASET_NAME_LEN];
char buf[ZFS_MAX_DATASET_NAME_LEN];
dmu_objset_name(os, osname);
VERIFY3S(0, <=, snprintf(buf, sizeof (buf), "%s#%s", osname,
attr.za_name));
(void) dump_bookmark(dp, buf, verbosity >= 5, verbosity >= 6);
}
zap_cursor_fini(&zc);
dsl_pool_config_exit(dp, FTAG);
}
static void
bpobj_count_refd(bpobj_t *bpo)
{
mos_obj_refd(bpo->bpo_object);
if (bpo->bpo_havesubobj && bpo->bpo_phys->bpo_subobjs != 0) {
mos_obj_refd(bpo->bpo_phys->bpo_subobjs);
for (uint64_t i = 0; i < bpo->bpo_phys->bpo_num_subobjs; i++) {
uint64_t subobj;
bpobj_t subbpo;
int error;
VERIFY0(dmu_read(bpo->bpo_os,
bpo->bpo_phys->bpo_subobjs,
i * sizeof (subobj), sizeof (subobj), &subobj, 0));
error = bpobj_open(&subbpo, bpo->bpo_os, subobj);
if (error != 0) {
(void) printf("ERROR %u while trying to open "
"subobj id %llu\n",
error, (u_longlong_t)subobj);
continue;
}
bpobj_count_refd(&subbpo);
bpobj_close(&subbpo);
}
}
}
static int
dsl_deadlist_entry_count_refd(void *arg, dsl_deadlist_entry_t *dle)
{
spa_t *spa = arg;
uint64_t empty_bpobj = spa->spa_dsl_pool->dp_empty_bpobj;
if (dle->dle_bpobj.bpo_object != empty_bpobj)
bpobj_count_refd(&dle->dle_bpobj);
return (0);
}
static int
dsl_deadlist_entry_dump(void *arg, dsl_deadlist_entry_t *dle)
{
ASSERT(arg == NULL);
if (dump_opt['d'] >= 5) {
char buf[128];
(void) snprintf(buf, sizeof (buf),
"mintxg %llu -> obj %llu",
(longlong_t)dle->dle_mintxg,
(longlong_t)dle->dle_bpobj.bpo_object);
dump_full_bpobj(&dle->dle_bpobj, buf, 0);
} else {
(void) printf("mintxg %llu -> obj %llu\n",
(longlong_t)dle->dle_mintxg,
(longlong_t)dle->dle_bpobj.bpo_object);
}
return (0);
}
static void
dump_blkptr_list(dsl_deadlist_t *dl, char *name)
{
char bytes[32];
char comp[32];
char uncomp[32];
char entries[32];
spa_t *spa = dmu_objset_spa(dl->dl_os);
uint64_t empty_bpobj = spa->spa_dsl_pool->dp_empty_bpobj;
if (dl->dl_oldfmt) {
if (dl->dl_bpobj.bpo_object != empty_bpobj)
bpobj_count_refd(&dl->dl_bpobj);
} else {
mos_obj_refd(dl->dl_object);
dsl_deadlist_iterate(dl, dsl_deadlist_entry_count_refd, spa);
}
/* make sure nicenum has enough space */
CTASSERT(sizeof (bytes) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (comp) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (uncomp) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (entries) >= NN_NUMBUF_SZ);
if (dump_opt['d'] < 3)
return;
if (dl->dl_oldfmt) {
dump_full_bpobj(&dl->dl_bpobj, "old-format deadlist", 0);
return;
}
zdb_nicenum(dl->dl_phys->dl_used, bytes, sizeof (bytes));
zdb_nicenum(dl->dl_phys->dl_comp, comp, sizeof (comp));
zdb_nicenum(dl->dl_phys->dl_uncomp, uncomp, sizeof (uncomp));
zdb_nicenum(avl_numnodes(&dl->dl_tree), entries, sizeof (entries));
(void) printf("\n %s: %s (%s/%s comp), %s entries\n",
name, bytes, comp, uncomp, entries);
if (dump_opt['d'] < 4)
return;
(void) printf("\n");
dsl_deadlist_iterate(dl, dsl_deadlist_entry_dump, NULL);
}
static int
verify_dd_livelist(objset_t *os)
{
uint64_t ll_used, used, ll_comp, comp, ll_uncomp, uncomp;
dsl_pool_t *dp = spa_get_dsl(os->os_spa);
dsl_dir_t *dd = os->os_dsl_dataset->ds_dir;
ASSERT(!dmu_objset_is_snapshot(os));
if (!dsl_deadlist_is_open(&dd->dd_livelist))
return (0);
/* Iterate through the livelist to check for duplicates */
dsl_deadlist_iterate(&dd->dd_livelist, sublivelist_verify_lightweight,
NULL);
dsl_pool_config_enter(dp, FTAG);
dsl_deadlist_space(&dd->dd_livelist, &ll_used,
&ll_comp, &ll_uncomp);
dsl_dataset_t *origin_ds;
ASSERT(dsl_pool_config_held(dp));
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dir_phys(dd)->dd_origin_obj, FTAG, &origin_ds));
VERIFY0(dsl_dataset_space_written(origin_ds, os->os_dsl_dataset,
&used, &comp, &uncomp));
dsl_dataset_rele(origin_ds, FTAG);
dsl_pool_config_exit(dp, FTAG);
/*
* It's possible that the dataset's uncomp space is larger than the
* livelist's because livelists do not track embedded block pointers
*/
if (used != ll_used || comp != ll_comp || uncomp < ll_uncomp) {
char nice_used[32], nice_comp[32], nice_uncomp[32];
(void) printf("Discrepancy in space accounting:\n");
zdb_nicenum(used, nice_used, sizeof (nice_used));
zdb_nicenum(comp, nice_comp, sizeof (nice_comp));
zdb_nicenum(uncomp, nice_uncomp, sizeof (nice_uncomp));
(void) printf("dir: used %s, comp %s, uncomp %s\n",
nice_used, nice_comp, nice_uncomp);
zdb_nicenum(ll_used, nice_used, sizeof (nice_used));
zdb_nicenum(ll_comp, nice_comp, sizeof (nice_comp));
zdb_nicenum(ll_uncomp, nice_uncomp, sizeof (nice_uncomp));
(void) printf("livelist: used %s, comp %s, uncomp %s\n",
nice_used, nice_comp, nice_uncomp);
return (1);
}
return (0);
}
static avl_tree_t idx_tree;
static avl_tree_t domain_tree;
static boolean_t fuid_table_loaded;
static objset_t *sa_os = NULL;
static sa_attr_type_t *sa_attr_table = NULL;
static int
open_objset(const char *path, void *tag, objset_t **osp)
{
int err;
uint64_t sa_attrs = 0;
uint64_t version = 0;
VERIFY3P(sa_os, ==, NULL);
/*
* We can't own an objset if it's redacted. Therefore, we do this
* dance: hold the objset, then acquire a long hold on its dataset, then
* release the pool (which is held as part of holding the objset).
*/
err = dmu_objset_hold(path, tag, osp);
if (err != 0) {
(void) fprintf(stderr, "failed to hold dataset '%s': %s\n",
path, strerror(err));
return (err);
}
dsl_dataset_long_hold(dmu_objset_ds(*osp), tag);
dsl_pool_rele(dmu_objset_pool(*osp), tag);
if (dmu_objset_type(*osp) == DMU_OST_ZFS && !(*osp)->os_encrypted) {
(void) zap_lookup(*osp, MASTER_NODE_OBJ, ZPL_VERSION_STR,
8, 1, &version);
if (version >= ZPL_VERSION_SA) {
(void) zap_lookup(*osp, MASTER_NODE_OBJ, ZFS_SA_ATTRS,
8, 1, &sa_attrs);
}
err = sa_setup(*osp, sa_attrs, zfs_attr_table, ZPL_END,
&sa_attr_table);
if (err != 0) {
(void) fprintf(stderr, "sa_setup failed: %s\n",
strerror(err));
dsl_dataset_long_rele(dmu_objset_ds(*osp), tag);
dsl_dataset_rele(dmu_objset_ds(*osp), tag);
*osp = NULL;
}
}
sa_os = *osp;
return (0);
}
static void
close_objset(objset_t *os, void *tag)
{
VERIFY3P(os, ==, sa_os);
if (os->os_sa != NULL)
sa_tear_down(os);
dsl_dataset_long_rele(dmu_objset_ds(os), tag);
dsl_dataset_rele(dmu_objset_ds(os), tag);
sa_attr_table = NULL;
sa_os = NULL;
}
static void
fuid_table_destroy(void)
{
if (fuid_table_loaded) {
zfs_fuid_table_destroy(&idx_tree, &domain_tree);
fuid_table_loaded = B_FALSE;
}
}
/*
* print uid or gid information.
* For normal POSIX id just the id is printed in decimal format.
* For CIFS files with FUID the fuid is printed in hex followed by
* the domain-rid string.
*/
static void
print_idstr(uint64_t id, const char *id_type)
{
if (FUID_INDEX(id)) {
char *domain;
domain = zfs_fuid_idx_domain(&idx_tree, FUID_INDEX(id));
(void) printf("\t%s %llx [%s-%d]\n", id_type,
(u_longlong_t)id, domain, (int)FUID_RID(id));
} else {
(void) printf("\t%s %llu\n", id_type, (u_longlong_t)id);
}
}
static void
dump_uidgid(objset_t *os, uint64_t uid, uint64_t gid)
{
uint32_t uid_idx, gid_idx;
uid_idx = FUID_INDEX(uid);
gid_idx = FUID_INDEX(gid);
/* Load domain table, if not already loaded */
if (!fuid_table_loaded && (uid_idx || gid_idx)) {
uint64_t fuid_obj;
/* first find the fuid object. It lives in the master node */
VERIFY(zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES,
8, 1, &fuid_obj) == 0);
zfs_fuid_avl_tree_create(&idx_tree, &domain_tree);
(void) zfs_fuid_table_load(os, fuid_obj,
&idx_tree, &domain_tree);
fuid_table_loaded = B_TRUE;
}
print_idstr(uid, "uid");
print_idstr(gid, "gid");
}
static void
dump_znode_sa_xattr(sa_handle_t *hdl)
{
nvlist_t *sa_xattr;
nvpair_t *elem = NULL;
int sa_xattr_size = 0;
int sa_xattr_entries = 0;
int error;
char *sa_xattr_packed;
error = sa_size(hdl, sa_attr_table[ZPL_DXATTR], &sa_xattr_size);
if (error || sa_xattr_size == 0)
return;
sa_xattr_packed = malloc(sa_xattr_size);
if (sa_xattr_packed == NULL)
return;
error = sa_lookup(hdl, sa_attr_table[ZPL_DXATTR],
sa_xattr_packed, sa_xattr_size);
if (error) {
free(sa_xattr_packed);
return;
}
error = nvlist_unpack(sa_xattr_packed, sa_xattr_size, &sa_xattr, 0);
if (error) {
free(sa_xattr_packed);
return;
}
while ((elem = nvlist_next_nvpair(sa_xattr, elem)) != NULL)
sa_xattr_entries++;
(void) printf("\tSA xattrs: %d bytes, %d entries\n\n",
sa_xattr_size, sa_xattr_entries);
while ((elem = nvlist_next_nvpair(sa_xattr, elem)) != NULL) {
uchar_t *value;
uint_t cnt, idx;
(void) printf("\t\t%s = ", nvpair_name(elem));
nvpair_value_byte_array(elem, &value, &cnt);
for (idx = 0; idx < cnt; ++idx) {
if (isprint(value[idx]))
(void) putchar(value[idx]);
else
(void) printf("\\%3.3o", value[idx]);
}
(void) putchar('\n');
}
nvlist_free(sa_xattr);
free(sa_xattr_packed);
}
static void
dump_znode_symlink(sa_handle_t *hdl)
{
int sa_symlink_size = 0;
char linktarget[MAXPATHLEN];
linktarget[0] = '\0';
int error;
error = sa_size(hdl, sa_attr_table[ZPL_SYMLINK], &sa_symlink_size);
if (error || sa_symlink_size == 0) {
return;
}
if (sa_lookup(hdl, sa_attr_table[ZPL_SYMLINK],
&linktarget, sa_symlink_size) == 0)
(void) printf("\ttarget %s\n", linktarget);
}
/*ARGSUSED*/
static void
dump_znode(objset_t *os, uint64_t object, void *data, size_t size)
{
char path[MAXPATHLEN * 2]; /* allow for xattr and failure prefix */
sa_handle_t *hdl;
uint64_t xattr, rdev, gen;
uint64_t uid, gid, mode, fsize, parent, links;
uint64_t pflags;
uint64_t acctm[2], modtm[2], chgtm[2], crtm[2];
time_t z_crtime, z_atime, z_mtime, z_ctime;
sa_bulk_attr_t bulk[12];
int idx = 0;
int error;
VERIFY3P(os, ==, sa_os);
if (sa_handle_get(os, object, NULL, SA_HDL_PRIVATE, &hdl)) {
(void) printf("Failed to get handle for SA znode\n");
return;
}
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_UID], NULL, &uid, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_GID], NULL, &gid, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_LINKS], NULL,
&links, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_GEN], NULL, &gen, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_MODE], NULL,
&mode, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_PARENT],
NULL, &parent, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_SIZE], NULL,
&fsize, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_ATIME], NULL,
acctm, 16);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_MTIME], NULL,
modtm, 16);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_CRTIME], NULL,
crtm, 16);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_CTIME], NULL,
chgtm, 16);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_FLAGS], NULL,
&pflags, 8);
if (sa_bulk_lookup(hdl, bulk, idx)) {
(void) sa_handle_destroy(hdl);
return;
}
z_crtime = (time_t)crtm[0];
z_atime = (time_t)acctm[0];
z_mtime = (time_t)modtm[0];
z_ctime = (time_t)chgtm[0];
if (dump_opt['d'] > 4) {
error = zfs_obj_to_path(os, object, path, sizeof (path));
if (error == ESTALE) {
(void) snprintf(path, sizeof (path), "on delete queue");
} else if (error != 0) {
leaked_objects++;
(void) snprintf(path, sizeof (path),
"path not found, possibly leaked");
}
(void) printf("\tpath %s\n", path);
}
if (S_ISLNK(mode))
dump_znode_symlink(hdl);
dump_uidgid(os, uid, gid);
(void) printf("\tatime %s", ctime(&z_atime));
(void) printf("\tmtime %s", ctime(&z_mtime));
(void) printf("\tctime %s", ctime(&z_ctime));
(void) printf("\tcrtime %s", ctime(&z_crtime));
(void) printf("\tgen %llu\n", (u_longlong_t)gen);
(void) printf("\tmode %llo\n", (u_longlong_t)mode);
(void) printf("\tsize %llu\n", (u_longlong_t)fsize);
(void) printf("\tparent %llu\n", (u_longlong_t)parent);
(void) printf("\tlinks %llu\n", (u_longlong_t)links);
(void) printf("\tpflags %llx\n", (u_longlong_t)pflags);
if (dmu_objset_projectquota_enabled(os) && (pflags & ZFS_PROJID)) {
uint64_t projid;
if (sa_lookup(hdl, sa_attr_table[ZPL_PROJID], &projid,
sizeof (uint64_t)) == 0)
(void) printf("\tprojid %llu\n", (u_longlong_t)projid);
}
if (sa_lookup(hdl, sa_attr_table[ZPL_XATTR], &xattr,
sizeof (uint64_t)) == 0)
(void) printf("\txattr %llu\n", (u_longlong_t)xattr);
if (sa_lookup(hdl, sa_attr_table[ZPL_RDEV], &rdev,
sizeof (uint64_t)) == 0)
(void) printf("\trdev 0x%016llx\n", (u_longlong_t)rdev);
dump_znode_sa_xattr(hdl);
sa_handle_destroy(hdl);
}
/*ARGSUSED*/
static void
dump_acl(objset_t *os, uint64_t object, void *data, size_t size)
{
}
/*ARGSUSED*/
static void
dump_dmu_objset(objset_t *os, uint64_t object, void *data, size_t size)
{
}
static object_viewer_t *object_viewer[DMU_OT_NUMTYPES + 1] = {
dump_none, /* unallocated */
dump_zap, /* object directory */
dump_uint64, /* object array */
dump_none, /* packed nvlist */
dump_packed_nvlist, /* packed nvlist size */
dump_none, /* bpobj */
dump_bpobj, /* bpobj header */
dump_none, /* SPA space map header */
dump_none, /* SPA space map */
dump_none, /* ZIL intent log */
dump_dnode, /* DMU dnode */
dump_dmu_objset, /* DMU objset */
dump_dsl_dir, /* DSL directory */
dump_zap, /* DSL directory child map */
dump_zap, /* DSL dataset snap map */
dump_zap, /* DSL props */
dump_dsl_dataset, /* DSL dataset */
dump_znode, /* ZFS znode */
dump_acl, /* ZFS V0 ACL */
dump_uint8, /* ZFS plain file */
dump_zpldir, /* ZFS directory */
dump_zap, /* ZFS master node */
dump_zap, /* ZFS delete queue */
dump_uint8, /* zvol object */
dump_zap, /* zvol prop */
dump_uint8, /* other uint8[] */
dump_uint64, /* other uint64[] */
dump_zap, /* other ZAP */
dump_zap, /* persistent error log */
dump_uint8, /* SPA history */
dump_history_offsets, /* SPA history offsets */
dump_zap, /* Pool properties */
dump_zap, /* DSL permissions */
dump_acl, /* ZFS ACL */
dump_uint8, /* ZFS SYSACL */
dump_none, /* FUID nvlist */
dump_packed_nvlist, /* FUID nvlist size */
dump_zap, /* DSL dataset next clones */
dump_zap, /* DSL scrub queue */
dump_zap, /* ZFS user/group/project used */
dump_zap, /* ZFS user/group/project quota */
dump_zap, /* snapshot refcount tags */
dump_ddt_zap, /* DDT ZAP object */
dump_zap, /* DDT statistics */
dump_znode, /* SA object */
dump_zap, /* SA Master Node */
dump_sa_attrs, /* SA attribute registration */
dump_sa_layouts, /* SA attribute layouts */
dump_zap, /* DSL scrub translations */
dump_none, /* fake dedup BP */
dump_zap, /* deadlist */
dump_none, /* deadlist hdr */
dump_zap, /* dsl clones */
dump_bpobj_subobjs, /* bpobj subobjs */
dump_unknown, /* Unknown type, must be last */
};
static boolean_t
match_object_type(dmu_object_type_t obj_type, uint64_t flags)
{
boolean_t match = B_TRUE;
switch (obj_type) {
case DMU_OT_DIRECTORY_CONTENTS:
if (!(flags & ZOR_FLAG_DIRECTORY))
match = B_FALSE;
break;
case DMU_OT_PLAIN_FILE_CONTENTS:
if (!(flags & ZOR_FLAG_PLAIN_FILE))
match = B_FALSE;
break;
case DMU_OT_SPACE_MAP:
if (!(flags & ZOR_FLAG_SPACE_MAP))
match = B_FALSE;
break;
default:
if (strcmp(zdb_ot_name(obj_type), "zap") == 0) {
if (!(flags & ZOR_FLAG_ZAP))
match = B_FALSE;
break;
}
/*
* If all bits except some of the supported flags are
* set, the user combined the all-types flag (A) with
* a negated flag to exclude some types (e.g. A-f to
* show all object types except plain files).
*/
if ((flags | ZOR_SUPPORTED_FLAGS) != ZOR_FLAG_ALL_TYPES)
match = B_FALSE;
break;
}
return (match);
}
static void
dump_object(objset_t *os, uint64_t object, int verbosity,
boolean_t *print_header, uint64_t *dnode_slots_used, uint64_t flags)
{
dmu_buf_t *db = NULL;
dmu_object_info_t doi;
dnode_t *dn;
boolean_t dnode_held = B_FALSE;
void *bonus = NULL;
size_t bsize = 0;
char iblk[32], dblk[32], lsize[32], asize[32], fill[32], dnsize[32];
char bonus_size[32];
char aux[50];
int error;
/* make sure nicenum has enough space */
CTASSERT(sizeof (iblk) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (dblk) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (lsize) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (asize) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (bonus_size) >= NN_NUMBUF_SZ);
if (*print_header) {
(void) printf("\n%10s %3s %5s %5s %5s %6s %5s %6s %s\n",
"Object", "lvl", "iblk", "dblk", "dsize", "dnsize",
"lsize", "%full", "type");
*print_header = 0;
}
if (object == 0) {
dn = DMU_META_DNODE(os);
dmu_object_info_from_dnode(dn, &doi);
} else {
/*
* Encrypted datasets will have sensitive bonus buffers
* encrypted. Therefore we cannot hold the bonus buffer and
* must hold the dnode itself instead.
*/
error = dmu_object_info(os, object, &doi);
if (error)
fatal("dmu_object_info() failed, errno %u", error);
if (os->os_encrypted &&
DMU_OT_IS_ENCRYPTED(doi.doi_bonus_type)) {
error = dnode_hold(os, object, FTAG, &dn);
if (error)
fatal("dnode_hold() failed, errno %u", error);
dnode_held = B_TRUE;
} else {
error = dmu_bonus_hold(os, object, FTAG, &db);
if (error)
fatal("dmu_bonus_hold(%llu) failed, errno %u",
object, error);
bonus = db->db_data;
bsize = db->db_size;
dn = DB_DNODE((dmu_buf_impl_t *)db);
}
}
/*
* Default to showing all object types if no flags were specified.
*/
if (flags != 0 && flags != ZOR_FLAG_ALL_TYPES &&
!match_object_type(doi.doi_type, flags))
goto out;
if (dnode_slots_used)
*dnode_slots_used = doi.doi_dnodesize / DNODE_MIN_SIZE;
zdb_nicenum(doi.doi_metadata_block_size, iblk, sizeof (iblk));
zdb_nicenum(doi.doi_data_block_size, dblk, sizeof (dblk));
zdb_nicenum(doi.doi_max_offset, lsize, sizeof (lsize));
zdb_nicenum(doi.doi_physical_blocks_512 << 9, asize, sizeof (asize));
zdb_nicenum(doi.doi_bonus_size, bonus_size, sizeof (bonus_size));
zdb_nicenum(doi.doi_dnodesize, dnsize, sizeof (dnsize));
(void) sprintf(fill, "%6.2f", 100.0 * doi.doi_fill_count *
doi.doi_data_block_size / (object == 0 ? DNODES_PER_BLOCK : 1) /
doi.doi_max_offset);
aux[0] = '\0';
if (doi.doi_checksum != ZIO_CHECKSUM_INHERIT || verbosity >= 6) {
(void) snprintf(aux + strlen(aux), sizeof (aux) - strlen(aux),
" (K=%s)", ZDB_CHECKSUM_NAME(doi.doi_checksum));
}
if (doi.doi_compress == ZIO_COMPRESS_INHERIT &&
ZIO_COMPRESS_HASLEVEL(os->os_compress) && verbosity >= 6) {
const char *compname = NULL;
if (zfs_prop_index_to_string(ZFS_PROP_COMPRESSION,
ZIO_COMPRESS_RAW(os->os_compress, os->os_complevel),
&compname) == 0) {
(void) snprintf(aux + strlen(aux),
sizeof (aux) - strlen(aux), " (Z=inherit=%s)",
compname);
} else {
(void) snprintf(aux + strlen(aux),
sizeof (aux) - strlen(aux),
" (Z=inherit=%s-unknown)",
ZDB_COMPRESS_NAME(os->os_compress));
}
} else if (doi.doi_compress == ZIO_COMPRESS_INHERIT && verbosity >= 6) {
(void) snprintf(aux + strlen(aux), sizeof (aux) - strlen(aux),
" (Z=inherit=%s)", ZDB_COMPRESS_NAME(os->os_compress));
} else if (doi.doi_compress != ZIO_COMPRESS_INHERIT || verbosity >= 6) {
(void) snprintf(aux + strlen(aux), sizeof (aux) - strlen(aux),
" (Z=%s)", ZDB_COMPRESS_NAME(doi.doi_compress));
}
(void) printf("%10lld %3u %5s %5s %5s %6s %5s %6s %s%s\n",
(u_longlong_t)object, doi.doi_indirection, iblk, dblk,
asize, dnsize, lsize, fill, zdb_ot_name(doi.doi_type), aux);
if (doi.doi_bonus_type != DMU_OT_NONE && verbosity > 3) {
(void) printf("%10s %3s %5s %5s %5s %5s %5s %6s %s\n",
"", "", "", "", "", "", bonus_size, "bonus",
zdb_ot_name(doi.doi_bonus_type));
}
if (verbosity >= 4) {
(void) printf("\tdnode flags: %s%s%s%s\n",
(dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES) ?
"USED_BYTES " : "",
(dn->dn_phys->dn_flags & DNODE_FLAG_USERUSED_ACCOUNTED) ?
"USERUSED_ACCOUNTED " : "",
(dn->dn_phys->dn_flags & DNODE_FLAG_USEROBJUSED_ACCOUNTED) ?
"USEROBJUSED_ACCOUNTED " : "",
(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR) ?
"SPILL_BLKPTR" : "");
(void) printf("\tdnode maxblkid: %llu\n",
(longlong_t)dn->dn_phys->dn_maxblkid);
if (!dnode_held) {
object_viewer[ZDB_OT_TYPE(doi.doi_bonus_type)](os,
object, bonus, bsize);
} else {
(void) printf("\t\t(bonus encrypted)\n");
}
if (!os->os_encrypted || !DMU_OT_IS_ENCRYPTED(doi.doi_type)) {
object_viewer[ZDB_OT_TYPE(doi.doi_type)](os, object,
NULL, 0);
} else {
(void) printf("\t\t(object encrypted)\n");
}
*print_header = B_TRUE;
}
if (verbosity >= 5)
dump_indirect(dn);
if (verbosity >= 5) {
/*
* Report the list of segments that comprise the object.
*/
uint64_t start = 0;
uint64_t end;
uint64_t blkfill = 1;
int minlvl = 1;
if (dn->dn_type == DMU_OT_DNODE) {
minlvl = 0;
blkfill = DNODES_PER_BLOCK;
}
for (;;) {
char segsize[32];
/* make sure nicenum has enough space */
CTASSERT(sizeof (segsize) >= NN_NUMBUF_SZ);
error = dnode_next_offset(dn,
0, &start, minlvl, blkfill, 0);
if (error)
break;
end = start;
error = dnode_next_offset(dn,
DNODE_FIND_HOLE, &end, minlvl, blkfill, 0);
zdb_nicenum(end - start, segsize, sizeof (segsize));
(void) printf("\t\tsegment [%016llx, %016llx)"
" size %5s\n", (u_longlong_t)start,
(u_longlong_t)end, segsize);
if (error)
break;
start = end;
}
}
out:
if (db != NULL)
dmu_buf_rele(db, FTAG);
if (dnode_held)
dnode_rele(dn, FTAG);
}
static void
count_dir_mos_objects(dsl_dir_t *dd)
{
mos_obj_refd(dd->dd_object);
mos_obj_refd(dsl_dir_phys(dd)->dd_child_dir_zapobj);
mos_obj_refd(dsl_dir_phys(dd)->dd_deleg_zapobj);
mos_obj_refd(dsl_dir_phys(dd)->dd_props_zapobj);
mos_obj_refd(dsl_dir_phys(dd)->dd_clones);
/*
* The dd_crypto_obj can be referenced by multiple dsl_dir's.
* Ignore the references after the first one.
*/
mos_obj_refd_multiple(dd->dd_crypto_obj);
}
static void
count_ds_mos_objects(dsl_dataset_t *ds)
{
mos_obj_refd(ds->ds_object);
mos_obj_refd(dsl_dataset_phys(ds)->ds_next_clones_obj);
mos_obj_refd(dsl_dataset_phys(ds)->ds_props_obj);
mos_obj_refd(dsl_dataset_phys(ds)->ds_userrefs_obj);
mos_obj_refd(dsl_dataset_phys(ds)->ds_snapnames_zapobj);
mos_obj_refd(ds->ds_bookmarks_obj);
if (!dsl_dataset_is_snapshot(ds)) {
count_dir_mos_objects(ds->ds_dir);
}
}
static const char *objset_types[DMU_OST_NUMTYPES] = {
"NONE", "META", "ZPL", "ZVOL", "OTHER", "ANY" };
/*
* Parse a string denoting a range of object IDs of the form
* <start>[:<end>[:flags]], and store the results in zor.
* Return 0 on success. On error, return 1 and update the msg
* pointer to point to a descriptive error message.
*/
static int
parse_object_range(char *range, zopt_object_range_t *zor, char **msg)
{
uint64_t flags = 0;
char *p, *s, *dup, *flagstr, *tmp = NULL;
size_t len;
int i;
int rc = 0;
if (strchr(range, ':') == NULL) {
zor->zor_obj_start = strtoull(range, &p, 0);
if (*p != '\0') {
*msg = "Invalid characters in object ID";
rc = 1;
}
zor->zor_obj_end = zor->zor_obj_start;
return (rc);
}
if (strchr(range, ':') == range) {
*msg = "Invalid leading colon";
rc = 1;
return (rc);
}
len = strlen(range);
if (range[len - 1] == ':') {
*msg = "Invalid trailing colon";
rc = 1;
return (rc);
}
dup = strdup(range);
s = strtok_r(dup, ":", &tmp);
zor->zor_obj_start = strtoull(s, &p, 0);
if (*p != '\0') {
*msg = "Invalid characters in start object ID";
rc = 1;
goto out;
}
s = strtok_r(NULL, ":", &tmp);
zor->zor_obj_end = strtoull(s, &p, 0);
if (*p != '\0') {
*msg = "Invalid characters in end object ID";
rc = 1;
goto out;
}
if (zor->zor_obj_start > zor->zor_obj_end) {
*msg = "Start object ID may not exceed end object ID";
rc = 1;
goto out;
}
s = strtok_r(NULL, ":", &tmp);
if (s == NULL) {
zor->zor_flags = ZOR_FLAG_ALL_TYPES;
goto out;
} else if (strtok_r(NULL, ":", &tmp) != NULL) {
*msg = "Invalid colon-delimited field after flags";
rc = 1;
goto out;
}
flagstr = s;
for (i = 0; flagstr[i]; i++) {
int bit;
boolean_t negation = (flagstr[i] == '-');
if (negation) {
i++;
if (flagstr[i] == '\0') {
*msg = "Invalid trailing negation operator";
rc = 1;
goto out;
}
}
bit = flagbits[(uchar_t)flagstr[i]];
if (bit == 0) {
*msg = "Invalid flag";
rc = 1;
goto out;
}
if (negation)
flags &= ~bit;
else
flags |= bit;
}
zor->zor_flags = flags;
out:
free(dup);
return (rc);
}
static void
dump_objset(objset_t *os)
{
dmu_objset_stats_t dds = { 0 };
uint64_t object, object_count;
uint64_t refdbytes, usedobjs, scratch;
char numbuf[32];
char blkbuf[BP_SPRINTF_LEN + 20];
char osname[ZFS_MAX_DATASET_NAME_LEN];
const char *type = "UNKNOWN";
int verbosity = dump_opt['d'];
boolean_t print_header;
unsigned i;
int error;
uint64_t total_slots_used = 0;
uint64_t max_slot_used = 0;
uint64_t dnode_slots;
uint64_t obj_start;
uint64_t obj_end;
uint64_t flags;
/* make sure nicenum has enough space */
CTASSERT(sizeof (numbuf) >= NN_NUMBUF_SZ);
dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
dmu_objset_fast_stat(os, &dds);
dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
print_header = B_TRUE;
if (dds.dds_type < DMU_OST_NUMTYPES)
type = objset_types[dds.dds_type];
if (dds.dds_type == DMU_OST_META) {
dds.dds_creation_txg = TXG_INITIAL;
usedobjs = BP_GET_FILL(os->os_rootbp);
refdbytes = dsl_dir_phys(os->os_spa->spa_dsl_pool->dp_mos_dir)->
dd_used_bytes;
} else {
dmu_objset_space(os, &refdbytes, &scratch, &usedobjs, &scratch);
}
ASSERT3U(usedobjs, ==, BP_GET_FILL(os->os_rootbp));
zdb_nicenum(refdbytes, numbuf, sizeof (numbuf));
if (verbosity >= 4) {
(void) snprintf(blkbuf, sizeof (blkbuf), ", rootbp ");
(void) snprintf_blkptr(blkbuf + strlen(blkbuf),
sizeof (blkbuf) - strlen(blkbuf), os->os_rootbp);
} else {
blkbuf[0] = '\0';
}
dmu_objset_name(os, osname);
(void) printf("Dataset %s [%s], ID %llu, cr_txg %llu, "
"%s, %llu objects%s%s\n",
osname, type, (u_longlong_t)dmu_objset_id(os),
(u_longlong_t)dds.dds_creation_txg,
numbuf, (u_longlong_t)usedobjs, blkbuf,
(dds.dds_inconsistent) ? " (inconsistent)" : "");
for (i = 0; i < zopt_object_args; i++) {
obj_start = zopt_object_ranges[i].zor_obj_start;
obj_end = zopt_object_ranges[i].zor_obj_end;
flags = zopt_object_ranges[i].zor_flags;
object = obj_start;
if (object == 0 || obj_start == obj_end)
dump_object(os, object, verbosity, &print_header, NULL,
flags);
else
object--;
while ((dmu_object_next(os, &object, B_FALSE, 0) == 0) &&
object <= obj_end) {
dump_object(os, object, verbosity, &print_header, NULL,
flags);
}
}
if (zopt_object_args > 0) {
(void) printf("\n");
return;
}
if (dump_opt['i'] != 0 || verbosity >= 2)
dump_intent_log(dmu_objset_zil(os));
if (dmu_objset_ds(os) != NULL) {
dsl_dataset_t *ds = dmu_objset_ds(os);
dump_blkptr_list(&ds->ds_deadlist, "Deadlist");
if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) &&
!dmu_objset_is_snapshot(os)) {
dump_blkptr_list(&ds->ds_dir->dd_livelist, "Livelist");
if (verify_dd_livelist(os) != 0)
fatal("livelist is incorrect");
}
if (dsl_dataset_remap_deadlist_exists(ds)) {
(void) printf("ds_remap_deadlist:\n");
dump_blkptr_list(&ds->ds_remap_deadlist, "Deadlist");
}
count_ds_mos_objects(ds);
}
if (dmu_objset_ds(os) != NULL)
dump_bookmarks(os, verbosity);
if (verbosity < 2)
return;
if (BP_IS_HOLE(os->os_rootbp))
return;
dump_object(os, 0, verbosity, &print_header, NULL, 0);
object_count = 0;
if (DMU_USERUSED_DNODE(os) != NULL &&
DMU_USERUSED_DNODE(os)->dn_type != 0) {
dump_object(os, DMU_USERUSED_OBJECT, verbosity, &print_header,
NULL, 0);
dump_object(os, DMU_GROUPUSED_OBJECT, verbosity, &print_header,
NULL, 0);
}
if (DMU_PROJECTUSED_DNODE(os) != NULL &&
DMU_PROJECTUSED_DNODE(os)->dn_type != 0)
dump_object(os, DMU_PROJECTUSED_OBJECT, verbosity,
&print_header, NULL, 0);
object = 0;
while ((error = dmu_object_next(os, &object, B_FALSE, 0)) == 0) {
dump_object(os, object, verbosity, &print_header, &dnode_slots,
0);
object_count++;
total_slots_used += dnode_slots;
max_slot_used = object + dnode_slots - 1;
}
(void) printf("\n");
(void) printf(" Dnode slots:\n");
(void) printf("\tTotal used: %10llu\n",
(u_longlong_t)total_slots_used);
(void) printf("\tMax used: %10llu\n",
(u_longlong_t)max_slot_used);
(void) printf("\tPercent empty: %10lf\n",
(double)(max_slot_used - total_slots_used)*100 /
(double)max_slot_used);
(void) printf("\n");
if (error != ESRCH) {
(void) fprintf(stderr, "dmu_object_next() = %d\n", error);
abort();
}
ASSERT3U(object_count, ==, usedobjs);
if (leaked_objects != 0) {
(void) printf("%d potentially leaked objects detected\n",
leaked_objects);
leaked_objects = 0;
}
}
static void
dump_uberblock(uberblock_t *ub, const char *header, const char *footer)
{
time_t timestamp = ub->ub_timestamp;
(void) printf("%s", header ? header : "");
(void) printf("\tmagic = %016llx\n", (u_longlong_t)ub->ub_magic);
(void) printf("\tversion = %llu\n", (u_longlong_t)ub->ub_version);
(void) printf("\ttxg = %llu\n", (u_longlong_t)ub->ub_txg);
(void) printf("\tguid_sum = %llu\n", (u_longlong_t)ub->ub_guid_sum);
(void) printf("\ttimestamp = %llu UTC = %s",
(u_longlong_t)ub->ub_timestamp, asctime(localtime(&timestamp)));
(void) printf("\tmmp_magic = %016llx\n",
(u_longlong_t)ub->ub_mmp_magic);
if (MMP_VALID(ub)) {
(void) printf("\tmmp_delay = %0llu\n",
(u_longlong_t)ub->ub_mmp_delay);
if (MMP_SEQ_VALID(ub))
(void) printf("\tmmp_seq = %u\n",
(unsigned int) MMP_SEQ(ub));
if (MMP_FAIL_INT_VALID(ub))
(void) printf("\tmmp_fail = %u\n",
(unsigned int) MMP_FAIL_INT(ub));
if (MMP_INTERVAL_VALID(ub))
(void) printf("\tmmp_write = %u\n",
(unsigned int) MMP_INTERVAL(ub));
/* After MMP_* to make summarize_uberblock_mmp cleaner */
(void) printf("\tmmp_valid = %x\n",
(unsigned int) ub->ub_mmp_config & 0xFF);
}
if (dump_opt['u'] >= 4) {
char blkbuf[BP_SPRINTF_LEN];
snprintf_blkptr(blkbuf, sizeof (blkbuf), &ub->ub_rootbp);
(void) printf("\trootbp = %s\n", blkbuf);
}
(void) printf("\tcheckpoint_txg = %llu\n",
(u_longlong_t)ub->ub_checkpoint_txg);
(void) printf("%s", footer ? footer : "");
}
static void
dump_config(spa_t *spa)
{
dmu_buf_t *db;
size_t nvsize = 0;
int error = 0;
error = dmu_bonus_hold(spa->spa_meta_objset,
spa->spa_config_object, FTAG, &db);
if (error == 0) {
nvsize = *(uint64_t *)db->db_data;
dmu_buf_rele(db, FTAG);
(void) printf("\nMOS Configuration:\n");
dump_packed_nvlist(spa->spa_meta_objset,
spa->spa_config_object, (void *)&nvsize, 1);
} else {
(void) fprintf(stderr, "dmu_bonus_hold(%llu) failed, errno %d",
(u_longlong_t)spa->spa_config_object, error);
}
}
static void
dump_cachefile(const char *cachefile)
{
int fd;
struct stat64 statbuf;
char *buf;
nvlist_t *config;
if ((fd = open64(cachefile, O_RDONLY)) < 0) {
(void) printf("cannot open '%s': %s\n", cachefile,
strerror(errno));
exit(1);
}
if (fstat64(fd, &statbuf) != 0) {
(void) printf("failed to stat '%s': %s\n", cachefile,
strerror(errno));
exit(1);
}
if ((buf = malloc(statbuf.st_size)) == NULL) {
(void) fprintf(stderr, "failed to allocate %llu bytes\n",
(u_longlong_t)statbuf.st_size);
exit(1);
}
if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
(void) fprintf(stderr, "failed to read %llu bytes\n",
(u_longlong_t)statbuf.st_size);
exit(1);
}
(void) close(fd);
if (nvlist_unpack(buf, statbuf.st_size, &config, 0) != 0) {
(void) fprintf(stderr, "failed to unpack nvlist\n");
exit(1);
}
free(buf);
dump_nvlist(config, 0);
nvlist_free(config);
}
/*
* ZFS label nvlist stats
*/
typedef struct zdb_nvl_stats {
int zns_list_count;
int zns_leaf_count;
size_t zns_leaf_largest;
size_t zns_leaf_total;
nvlist_t *zns_string;
nvlist_t *zns_uint64;
nvlist_t *zns_boolean;
} zdb_nvl_stats_t;
static void
collect_nvlist_stats(nvlist_t *nvl, zdb_nvl_stats_t *stats)
{
nvlist_t *list, **array;
nvpair_t *nvp = NULL;
char *name;
uint_t i, items;
stats->zns_list_count++;
while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
name = nvpair_name(nvp);
switch (nvpair_type(nvp)) {
case DATA_TYPE_STRING:
fnvlist_add_string(stats->zns_string, name,
fnvpair_value_string(nvp));
break;
case DATA_TYPE_UINT64:
fnvlist_add_uint64(stats->zns_uint64, name,
fnvpair_value_uint64(nvp));
break;
case DATA_TYPE_BOOLEAN:
fnvlist_add_boolean(stats->zns_boolean, name);
break;
case DATA_TYPE_NVLIST:
if (nvpair_value_nvlist(nvp, &list) == 0)
collect_nvlist_stats(list, stats);
break;
case DATA_TYPE_NVLIST_ARRAY:
if (nvpair_value_nvlist_array(nvp, &array, &items) != 0)
break;
for (i = 0; i < items; i++) {
collect_nvlist_stats(array[i], stats);
/* collect stats on leaf vdev */
if (strcmp(name, "children") == 0) {
size_t size;
(void) nvlist_size(array[i], &size,
NV_ENCODE_XDR);
stats->zns_leaf_total += size;
if (size > stats->zns_leaf_largest)
stats->zns_leaf_largest = size;
stats->zns_leaf_count++;
}
}
break;
default:
(void) printf("skip type %d!\n", (int)nvpair_type(nvp));
}
}
}
static void
dump_nvlist_stats(nvlist_t *nvl, size_t cap)
{
zdb_nvl_stats_t stats = { 0 };
size_t size, sum = 0, total;
size_t noise;
/* requires nvlist with non-unique names for stat collection */
VERIFY0(nvlist_alloc(&stats.zns_string, 0, 0));
VERIFY0(nvlist_alloc(&stats.zns_uint64, 0, 0));
VERIFY0(nvlist_alloc(&stats.zns_boolean, 0, 0));
VERIFY0(nvlist_size(stats.zns_boolean, &noise, NV_ENCODE_XDR));
(void) printf("\n\nZFS Label NVList Config Stats:\n");
VERIFY0(nvlist_size(nvl, &total, NV_ENCODE_XDR));
(void) printf(" %d bytes used, %d bytes free (using %4.1f%%)\n\n",
(int)total, (int)(cap - total), 100.0 * total / cap);
collect_nvlist_stats(nvl, &stats);
VERIFY0(nvlist_size(stats.zns_uint64, &size, NV_ENCODE_XDR));
size -= noise;
sum += size;
(void) printf("%12s %4d %6d bytes (%5.2f%%)\n", "integers:",
(int)fnvlist_num_pairs(stats.zns_uint64),
(int)size, 100.0 * size / total);
VERIFY0(nvlist_size(stats.zns_string, &size, NV_ENCODE_XDR));
size -= noise;
sum += size;
(void) printf("%12s %4d %6d bytes (%5.2f%%)\n", "strings:",
(int)fnvlist_num_pairs(stats.zns_string),
(int)size, 100.0 * size / total);
VERIFY0(nvlist_size(stats.zns_boolean, &size, NV_ENCODE_XDR));
size -= noise;
sum += size;
(void) printf("%12s %4d %6d bytes (%5.2f%%)\n", "booleans:",
(int)fnvlist_num_pairs(stats.zns_boolean),
(int)size, 100.0 * size / total);
size = total - sum; /* treat remainder as nvlist overhead */
(void) printf("%12s %4d %6d bytes (%5.2f%%)\n\n", "nvlists:",
stats.zns_list_count, (int)size, 100.0 * size / total);
if (stats.zns_leaf_count > 0) {
size_t average = stats.zns_leaf_total / stats.zns_leaf_count;
(void) printf("%12s %4d %6d bytes average\n", "leaf vdevs:",
stats.zns_leaf_count, (int)average);
(void) printf("%24d bytes largest\n",
(int)stats.zns_leaf_largest);
if (dump_opt['l'] >= 3 && average > 0)
(void) printf(" space for %d additional leaf vdevs\n",
(int)((cap - total) / average));
}
(void) printf("\n");
nvlist_free(stats.zns_string);
nvlist_free(stats.zns_uint64);
nvlist_free(stats.zns_boolean);
}
typedef struct cksum_record {
zio_cksum_t cksum;
boolean_t labels[VDEV_LABELS];
avl_node_t link;
} cksum_record_t;
static int
cksum_record_compare(const void *x1, const void *x2)
{
const cksum_record_t *l = (cksum_record_t *)x1;
const cksum_record_t *r = (cksum_record_t *)x2;
int arraysize = ARRAY_SIZE(l->cksum.zc_word);
int difference;
for (int i = 0; i < arraysize; i++) {
difference = TREE_CMP(l->cksum.zc_word[i], r->cksum.zc_word[i]);
if (difference)
break;
}
return (difference);
}
static cksum_record_t *
cksum_record_alloc(zio_cksum_t *cksum, int l)
{
cksum_record_t *rec;
rec = umem_zalloc(sizeof (*rec), UMEM_NOFAIL);
rec->cksum = *cksum;
rec->labels[l] = B_TRUE;
return (rec);
}
static cksum_record_t *
cksum_record_lookup(avl_tree_t *tree, zio_cksum_t *cksum)
{
cksum_record_t lookup = { .cksum = *cksum };
avl_index_t where;
return (avl_find(tree, &lookup, &where));
}
static cksum_record_t *
cksum_record_insert(avl_tree_t *tree, zio_cksum_t *cksum, int l)
{
cksum_record_t *rec;
rec = cksum_record_lookup(tree, cksum);
if (rec) {
rec->labels[l] = B_TRUE;
} else {
rec = cksum_record_alloc(cksum, l);
avl_add(tree, rec);
}
return (rec);
}
static int
first_label(cksum_record_t *rec)
{
for (int i = 0; i < VDEV_LABELS; i++)
if (rec->labels[i])
return (i);
return (-1);
}
static void
print_label_numbers(char *prefix, cksum_record_t *rec)
{
printf("%s", prefix);
for (int i = 0; i < VDEV_LABELS; i++)
if (rec->labels[i] == B_TRUE)
printf("%d ", i);
printf("\n");
}
#define MAX_UBERBLOCK_COUNT (VDEV_UBERBLOCK_RING >> UBERBLOCK_SHIFT)
typedef struct zdb_label {
vdev_label_t label;
nvlist_t *config_nv;
cksum_record_t *config;
cksum_record_t *uberblocks[MAX_UBERBLOCK_COUNT];
boolean_t header_printed;
boolean_t read_failed;
} zdb_label_t;
static void
print_label_header(zdb_label_t *label, int l)
{
if (dump_opt['q'])
return;
if (label->header_printed == B_TRUE)
return;
(void) printf("------------------------------------\n");
(void) printf("LABEL %d\n", l);
(void) printf("------------------------------------\n");
label->header_printed = B_TRUE;
}
static void
print_l2arc_header(void)
{
(void) printf("------------------------------------\n");
(void) printf("L2ARC device header\n");
(void) printf("------------------------------------\n");
}
static void
print_l2arc_log_blocks(void)
{
(void) printf("------------------------------------\n");
(void) printf("L2ARC device log blocks\n");
(void) printf("------------------------------------\n");
}
static void
dump_l2arc_log_entries(uint64_t log_entries,
l2arc_log_ent_phys_t *le, uint64_t i)
{
for (int j = 0; j < log_entries; j++) {
dva_t dva = le[j].le_dva;
(void) printf("lb[%4llu]\tle[%4d]\tDVA asize: %llu, "
"vdev: %llu, offset: %llu\n",
(u_longlong_t)i, j + 1,
(u_longlong_t)DVA_GET_ASIZE(&dva),
(u_longlong_t)DVA_GET_VDEV(&dva),
(u_longlong_t)DVA_GET_OFFSET(&dva));
(void) printf("|\t\t\t\tbirth: %llu\n",
(u_longlong_t)le[j].le_birth);
(void) printf("|\t\t\t\tlsize: %llu\n",
(u_longlong_t)L2BLK_GET_LSIZE((&le[j])->le_prop));
(void) printf("|\t\t\t\tpsize: %llu\n",
(u_longlong_t)L2BLK_GET_PSIZE((&le[j])->le_prop));
(void) printf("|\t\t\t\tcompr: %llu\n",
(u_longlong_t)L2BLK_GET_COMPRESS((&le[j])->le_prop));
(void) printf("|\t\t\t\tcomplevel: %llu\n",
(u_longlong_t)(&le[j])->le_complevel);
(void) printf("|\t\t\t\ttype: %llu\n",
(u_longlong_t)L2BLK_GET_TYPE((&le[j])->le_prop));
(void) printf("|\t\t\t\tprotected: %llu\n",
(u_longlong_t)L2BLK_GET_PROTECTED((&le[j])->le_prop));
(void) printf("|\t\t\t\tprefetch: %llu\n",
(u_longlong_t)L2BLK_GET_PREFETCH((&le[j])->le_prop));
(void) printf("|\t\t\t\taddress: %llu\n",
(u_longlong_t)le[j].le_daddr);
(void) printf("|\t\t\t\tARC state: %llu\n",
(u_longlong_t)L2BLK_GET_STATE((&le[j])->le_prop));
(void) printf("|\n");
}
(void) printf("\n");
}
static void
dump_l2arc_log_blkptr(l2arc_log_blkptr_t lbps)
{
(void) printf("|\t\tdaddr: %llu\n", (u_longlong_t)lbps.lbp_daddr);
(void) printf("|\t\tpayload_asize: %llu\n",
(u_longlong_t)lbps.lbp_payload_asize);
(void) printf("|\t\tpayload_start: %llu\n",
(u_longlong_t)lbps.lbp_payload_start);
(void) printf("|\t\tlsize: %llu\n",
(u_longlong_t)L2BLK_GET_LSIZE((&lbps)->lbp_prop));
(void) printf("|\t\tasize: %llu\n",
(u_longlong_t)L2BLK_GET_PSIZE((&lbps)->lbp_prop));
(void) printf("|\t\tcompralgo: %llu\n",
(u_longlong_t)L2BLK_GET_COMPRESS((&lbps)->lbp_prop));
(void) printf("|\t\tcksumalgo: %llu\n",
(u_longlong_t)L2BLK_GET_CHECKSUM((&lbps)->lbp_prop));
(void) printf("|\n\n");
}
static void
dump_l2arc_log_blocks(int fd, l2arc_dev_hdr_phys_t l2dhdr,
l2arc_dev_hdr_phys_t *rebuild)
{
l2arc_log_blk_phys_t this_lb;
uint64_t asize;
l2arc_log_blkptr_t lbps[2];
abd_t *abd;
zio_cksum_t cksum;
int failed = 0;
l2arc_dev_t dev;
if (!dump_opt['q'])
print_l2arc_log_blocks();
bcopy((&l2dhdr)->dh_start_lbps, lbps, sizeof (lbps));
dev.l2ad_evict = l2dhdr.dh_evict;
dev.l2ad_start = l2dhdr.dh_start;
dev.l2ad_end = l2dhdr.dh_end;
if (l2dhdr.dh_start_lbps[0].lbp_daddr == 0) {
/* no log blocks to read */
if (!dump_opt['q']) {
(void) printf("No log blocks to read\n");
(void) printf("\n");
}
return;
} else {
dev.l2ad_hand = lbps[0].lbp_daddr +
L2BLK_GET_PSIZE((&lbps[0])->lbp_prop);
}
dev.l2ad_first = !!(l2dhdr.dh_flags & L2ARC_DEV_HDR_EVICT_FIRST);
for (;;) {
if (!l2arc_log_blkptr_valid(&dev, &lbps[0]))
break;
/* L2BLK_GET_PSIZE returns aligned size for log blocks */
asize = L2BLK_GET_PSIZE((&lbps[0])->lbp_prop);
if (pread64(fd, &this_lb, asize, lbps[0].lbp_daddr) != asize) {
if (!dump_opt['q']) {
(void) printf("Error while reading next log "
"block\n\n");
}
break;
}
fletcher_4_native_varsize(&this_lb, asize, &cksum);
if (!ZIO_CHECKSUM_EQUAL(cksum, lbps[0].lbp_cksum)) {
failed++;
if (!dump_opt['q']) {
(void) printf("Invalid cksum\n");
dump_l2arc_log_blkptr(lbps[0]);
}
break;
}
switch (L2BLK_GET_COMPRESS((&lbps[0])->lbp_prop)) {
case ZIO_COMPRESS_OFF:
break;
default:
abd = abd_alloc_for_io(asize, B_TRUE);
abd_copy_from_buf_off(abd, &this_lb, 0, asize);
zio_decompress_data(L2BLK_GET_COMPRESS(
(&lbps[0])->lbp_prop), abd, &this_lb,
asize, sizeof (this_lb), NULL);
abd_free(abd);
break;
}
if (this_lb.lb_magic == BSWAP_64(L2ARC_LOG_BLK_MAGIC))
byteswap_uint64_array(&this_lb, sizeof (this_lb));
if (this_lb.lb_magic != L2ARC_LOG_BLK_MAGIC) {
if (!dump_opt['q'])
(void) printf("Invalid log block magic\n\n");
break;
}
rebuild->dh_lb_count++;
rebuild->dh_lb_asize += asize;
if (dump_opt['l'] > 1 && !dump_opt['q']) {
(void) printf("lb[%4llu]\tmagic: %llu\n",
(u_longlong_t)rebuild->dh_lb_count,
(u_longlong_t)this_lb.lb_magic);
dump_l2arc_log_blkptr(lbps[0]);
}
if (dump_opt['l'] > 2 && !dump_opt['q'])
dump_l2arc_log_entries(l2dhdr.dh_log_entries,
this_lb.lb_entries,
rebuild->dh_lb_count);
if (l2arc_range_check_overlap(lbps[1].lbp_payload_start,
lbps[0].lbp_payload_start, dev.l2ad_evict) &&
!dev.l2ad_first)
break;
lbps[0] = lbps[1];
lbps[1] = this_lb.lb_prev_lbp;
}
if (!dump_opt['q']) {
(void) printf("log_blk_count:\t %llu with valid cksum\n",
(u_longlong_t)rebuild->dh_lb_count);
(void) printf("\t\t %d with invalid cksum\n", failed);
(void) printf("log_blk_asize:\t %llu\n\n",
(u_longlong_t)rebuild->dh_lb_asize);
}
}
static int
dump_l2arc_header(int fd)
{
l2arc_dev_hdr_phys_t l2dhdr, rebuild;
int error = B_FALSE;
bzero(&l2dhdr, sizeof (l2dhdr));
bzero(&rebuild, sizeof (rebuild));
if (pread64(fd, &l2dhdr, sizeof (l2dhdr),
VDEV_LABEL_START_SIZE) != sizeof (l2dhdr)) {
error = B_TRUE;
} else {
if (l2dhdr.dh_magic == BSWAP_64(L2ARC_DEV_HDR_MAGIC))
byteswap_uint64_array(&l2dhdr, sizeof (l2dhdr));
if (l2dhdr.dh_magic != L2ARC_DEV_HDR_MAGIC)
error = B_TRUE;
}
if (error) {
(void) printf("L2ARC device header not found\n\n");
/* Do not return an error here for backward compatibility */
return (0);
} else if (!dump_opt['q']) {
print_l2arc_header();
(void) printf(" magic: %llu\n",
(u_longlong_t)l2dhdr.dh_magic);
(void) printf(" version: %llu\n",
(u_longlong_t)l2dhdr.dh_version);
(void) printf(" pool_guid: %llu\n",
(u_longlong_t)l2dhdr.dh_spa_guid);
(void) printf(" flags: %llu\n",
(u_longlong_t)l2dhdr.dh_flags);
(void) printf(" start_lbps[0]: %llu\n",
(u_longlong_t)
l2dhdr.dh_start_lbps[0].lbp_daddr);
(void) printf(" start_lbps[1]: %llu\n",
(u_longlong_t)
l2dhdr.dh_start_lbps[1].lbp_daddr);
(void) printf(" log_blk_ent: %llu\n",
(u_longlong_t)l2dhdr.dh_log_entries);
(void) printf(" start: %llu\n",
(u_longlong_t)l2dhdr.dh_start);
(void) printf(" end: %llu\n",
(u_longlong_t)l2dhdr.dh_end);
(void) printf(" evict: %llu\n",
(u_longlong_t)l2dhdr.dh_evict);
(void) printf(" lb_asize_refcount: %llu\n",
(u_longlong_t)l2dhdr.dh_lb_asize);
(void) printf(" lb_count_refcount: %llu\n",
(u_longlong_t)l2dhdr.dh_lb_count);
(void) printf(" trim_action_time: %llu\n",
(u_longlong_t)l2dhdr.dh_trim_action_time);
(void) printf(" trim_state: %llu\n\n",
(u_longlong_t)l2dhdr.dh_trim_state);
}
dump_l2arc_log_blocks(fd, l2dhdr, &rebuild);
/*
* The total aligned size of log blocks and the number of log blocks
* reported in the header of the device may be less than what zdb
* reports by dump_l2arc_log_blocks() which emulates l2arc_rebuild().
* This happens because dump_l2arc_log_blocks() lacks the memory
* pressure valve that l2arc_rebuild() has. Thus, if we are on a system
* with low memory, l2arc_rebuild will exit prematurely and dh_lb_asize
* and dh_lb_count will be lower to begin with than what exists on the
* device. This is normal and zdb should not exit with an error. The
* opposite case should never happen though, the values reported in the
* header should never be higher than what dump_l2arc_log_blocks() and
* l2arc_rebuild() report. If this happens there is a leak in the
* accounting of log blocks.
*/
if (l2dhdr.dh_lb_asize > rebuild.dh_lb_asize ||
l2dhdr.dh_lb_count > rebuild.dh_lb_count)
return (1);
return (0);
}
static void
dump_config_from_label(zdb_label_t *label, size_t buflen, int l)
{
if (dump_opt['q'])
return;
if ((dump_opt['l'] < 3) && (first_label(label->config) != l))
return;
print_label_header(label, l);
dump_nvlist(label->config_nv, 4);
print_label_numbers(" labels = ", label->config);
if (dump_opt['l'] >= 2)
dump_nvlist_stats(label->config_nv, buflen);
}
#define ZDB_MAX_UB_HEADER_SIZE 32
static void
dump_label_uberblocks(zdb_label_t *label, uint64_t ashift, int label_num)
{
vdev_t vd;
char header[ZDB_MAX_UB_HEADER_SIZE];
vd.vdev_ashift = ashift;
vd.vdev_top = &vd;
for (int i = 0; i < VDEV_UBERBLOCK_COUNT(&vd); i++) {
uint64_t uoff = VDEV_UBERBLOCK_OFFSET(&vd, i);
uberblock_t *ub = (void *)((char *)&label->label + uoff);
cksum_record_t *rec = label->uberblocks[i];
if (rec == NULL) {
if (dump_opt['u'] >= 2) {
print_label_header(label, label_num);
(void) printf(" Uberblock[%d] invalid\n", i);
}
continue;
}
if ((dump_opt['u'] < 3) && (first_label(rec) != label_num))
continue;
if ((dump_opt['u'] < 4) &&
(ub->ub_mmp_magic == MMP_MAGIC) && ub->ub_mmp_delay &&
(i >= VDEV_UBERBLOCK_COUNT(&vd) - MMP_BLOCKS_PER_LABEL))
continue;
print_label_header(label, label_num);
(void) snprintf(header, ZDB_MAX_UB_HEADER_SIZE,
" Uberblock[%d]\n", i);
dump_uberblock(ub, header, "");
print_label_numbers(" labels = ", rec);
}
}
static char curpath[PATH_MAX];
/*
* Iterate through the path components, recursively passing
* current one's obj and remaining path until we find the obj
* for the last one.
*/
static int
dump_path_impl(objset_t *os, uint64_t obj, char *name, uint64_t *retobj)
{
int err;
boolean_t header = B_TRUE;
uint64_t child_obj;
char *s;
dmu_buf_t *db;
dmu_object_info_t doi;
if ((s = strchr(name, '/')) != NULL)
*s = '\0';
err = zap_lookup(os, obj, name, 8, 1, &child_obj);
(void) strlcat(curpath, name, sizeof (curpath));
if (err != 0) {
(void) fprintf(stderr, "failed to lookup %s: %s\n",
curpath, strerror(err));
return (err);
}
child_obj = ZFS_DIRENT_OBJ(child_obj);
err = sa_buf_hold(os, child_obj, FTAG, &db);
if (err != 0) {
(void) fprintf(stderr,
"failed to get SA dbuf for obj %llu: %s\n",
(u_longlong_t)child_obj, strerror(err));
return (EINVAL);
}
dmu_object_info_from_db(db, &doi);
sa_buf_rele(db, FTAG);
if (doi.doi_bonus_type != DMU_OT_SA &&
doi.doi_bonus_type != DMU_OT_ZNODE) {
(void) fprintf(stderr, "invalid bonus type %d for obj %llu\n",
doi.doi_bonus_type, (u_longlong_t)child_obj);
return (EINVAL);
}
if (dump_opt['v'] > 6) {
(void) printf("obj=%llu %s type=%d bonustype=%d\n",
(u_longlong_t)child_obj, curpath, doi.doi_type,
doi.doi_bonus_type);
}
(void) strlcat(curpath, "/", sizeof (curpath));
switch (doi.doi_type) {
case DMU_OT_DIRECTORY_CONTENTS:
if (s != NULL && *(s + 1) != '\0')
return (dump_path_impl(os, child_obj, s + 1, retobj));
- /*FALLTHROUGH*/
+ /* FALLTHROUGH */
case DMU_OT_PLAIN_FILE_CONTENTS:
if (retobj != NULL) {
*retobj = child_obj;
} else {
dump_object(os, child_obj, dump_opt['v'], &header,
NULL, 0);
}
return (0);
default:
(void) fprintf(stderr, "object %llu has non-file/directory "
"type %d\n", (u_longlong_t)obj, doi.doi_type);
break;
}
return (EINVAL);
}
/*
* Dump the blocks for the object specified by path inside the dataset.
*/
static int
dump_path(char *ds, char *path, uint64_t *retobj)
{
int err;
objset_t *os;
uint64_t root_obj;
err = open_objset(ds, FTAG, &os);
if (err != 0)
return (err);
err = zap_lookup(os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ, 8, 1, &root_obj);
if (err != 0) {
(void) fprintf(stderr, "can't lookup root znode: %s\n",
strerror(err));
close_objset(os, FTAG);
return (EINVAL);
}
(void) snprintf(curpath, sizeof (curpath), "dataset=%s path=/", ds);
err = dump_path_impl(os, root_obj, path, retobj);
close_objset(os, FTAG);
return (err);
}
static int
zdb_copy_object(objset_t *os, uint64_t srcobj, char *destfile)
{
int err = 0;
uint64_t size, readsize, oursize, offset;
ssize_t writesize;
sa_handle_t *hdl;
(void) printf("Copying object %" PRIu64 " to file %s\n", srcobj,
destfile);
VERIFY3P(os, ==, sa_os);
if ((err = sa_handle_get(os, srcobj, NULL, SA_HDL_PRIVATE, &hdl))) {
(void) printf("Failed to get handle for SA znode\n");
return (err);
}
if ((err = sa_lookup(hdl, sa_attr_table[ZPL_SIZE], &size, 8))) {
(void) sa_handle_destroy(hdl);
return (err);
}
(void) sa_handle_destroy(hdl);
(void) printf("Object %" PRIu64 " is %" PRIu64 " bytes\n", srcobj,
size);
if (size == 0) {
return (EINVAL);
}
int fd = open(destfile, O_WRONLY | O_CREAT | O_TRUNC, 0644);
/*
* We cap the size at 1 mebibyte here to prevent
* allocation failures and nigh-infinite printing if the
* object is extremely large.
*/
oursize = MIN(size, 1 << 20);
offset = 0;
char *buf = kmem_alloc(oursize, KM_NOSLEEP);
if (buf == NULL) {
return (ENOMEM);
}
while (offset < size) {
readsize = MIN(size - offset, 1 << 20);
err = dmu_read(os, srcobj, offset, readsize, buf, 0);
if (err != 0) {
(void) printf("got error %u from dmu_read\n", err);
kmem_free(buf, oursize);
return (err);
}
if (dump_opt['v'] > 3) {
(void) printf("Read offset=%" PRIu64 " size=%" PRIu64
" error=%d\n", offset, readsize, err);
}
writesize = write(fd, buf, readsize);
if (writesize < 0) {
err = errno;
break;
} else if (writesize != readsize) {
/* Incomplete write */
(void) fprintf(stderr, "Short write, only wrote %llu of"
" %" PRIu64 " bytes, exiting...\n",
(u_longlong_t)writesize, readsize);
break;
}
offset += readsize;
}
(void) close(fd);
if (buf != NULL)
kmem_free(buf, oursize);
return (err);
}
static int
dump_label(const char *dev)
{
char path[MAXPATHLEN];
zdb_label_t labels[VDEV_LABELS];
uint64_t psize, ashift, l2cache;
struct stat64 statbuf;
boolean_t config_found = B_FALSE;
boolean_t error = B_FALSE;
boolean_t read_l2arc_header = B_FALSE;
avl_tree_t config_tree;
avl_tree_t uberblock_tree;
void *node, *cookie;
int fd;
bzero(labels, sizeof (labels));
/*
* Check if we were given absolute path and use it as is.
* Otherwise if the provided vdev name doesn't point to a file,
* try prepending expected disk paths and partition numbers.
*/
(void) strlcpy(path, dev, sizeof (path));
if (dev[0] != '/' && stat64(path, &statbuf) != 0) {
int error;
error = zfs_resolve_shortname(dev, path, MAXPATHLEN);
if (error == 0 && zfs_dev_is_whole_disk(path)) {
if (zfs_append_partition(path, MAXPATHLEN) == -1)
error = ENOENT;
}
if (error || (stat64(path, &statbuf) != 0)) {
(void) printf("failed to find device %s, try "
"specifying absolute path instead\n", dev);
return (1);
}
}
if ((fd = open64(path, O_RDONLY)) < 0) {
(void) printf("cannot open '%s': %s\n", path, strerror(errno));
exit(1);
}
if (fstat64_blk(fd, &statbuf) != 0) {
(void) printf("failed to stat '%s': %s\n", path,
strerror(errno));
(void) close(fd);
exit(1);
}
if (S_ISBLK(statbuf.st_mode) && zfs_dev_flush(fd) != 0)
(void) printf("failed to invalidate cache '%s' : %s\n", path,
strerror(errno));
avl_create(&config_tree, cksum_record_compare,
sizeof (cksum_record_t), offsetof(cksum_record_t, link));
avl_create(&uberblock_tree, cksum_record_compare,
sizeof (cksum_record_t), offsetof(cksum_record_t, link));
psize = statbuf.st_size;
psize = P2ALIGN(psize, (uint64_t)sizeof (vdev_label_t));
ashift = SPA_MINBLOCKSHIFT;
/*
* 1. Read the label from disk
* 2. Unpack the configuration and insert in config tree.
* 3. Traverse all uberblocks and insert in uberblock tree.
*/
for (int l = 0; l < VDEV_LABELS; l++) {
zdb_label_t *label = &labels[l];
char *buf = label->label.vl_vdev_phys.vp_nvlist;
size_t buflen = sizeof (label->label.vl_vdev_phys.vp_nvlist);
nvlist_t *config;
cksum_record_t *rec;
zio_cksum_t cksum;
vdev_t vd;
if (pread64(fd, &label->label, sizeof (label->label),
vdev_label_offset(psize, l, 0)) != sizeof (label->label)) {
if (!dump_opt['q'])
(void) printf("failed to read label %d\n", l);
label->read_failed = B_TRUE;
error = B_TRUE;
continue;
}
label->read_failed = B_FALSE;
if (nvlist_unpack(buf, buflen, &config, 0) == 0) {
nvlist_t *vdev_tree = NULL;
size_t size;
if ((nvlist_lookup_nvlist(config,
ZPOOL_CONFIG_VDEV_TREE, &vdev_tree) != 0) ||
(nvlist_lookup_uint64(vdev_tree,
ZPOOL_CONFIG_ASHIFT, &ashift) != 0))
ashift = SPA_MINBLOCKSHIFT;
if (nvlist_size(config, &size, NV_ENCODE_XDR) != 0)
size = buflen;
/* If the device is a cache device clear the header. */
if (!read_l2arc_header) {
if (nvlist_lookup_uint64(config,
ZPOOL_CONFIG_POOL_STATE, &l2cache) == 0 &&
l2cache == POOL_STATE_L2CACHE) {
read_l2arc_header = B_TRUE;
}
}
fletcher_4_native_varsize(buf, size, &cksum);
rec = cksum_record_insert(&config_tree, &cksum, l);
label->config = rec;
label->config_nv = config;
config_found = B_TRUE;
} else {
error = B_TRUE;
}
vd.vdev_ashift = ashift;
vd.vdev_top = &vd;
for (int i = 0; i < VDEV_UBERBLOCK_COUNT(&vd); i++) {
uint64_t uoff = VDEV_UBERBLOCK_OFFSET(&vd, i);
uberblock_t *ub = (void *)((char *)label + uoff);
if (uberblock_verify(ub))
continue;
fletcher_4_native_varsize(ub, sizeof (*ub), &cksum);
rec = cksum_record_insert(&uberblock_tree, &cksum, l);
label->uberblocks[i] = rec;
}
}
/*
* Dump the label and uberblocks.
*/
for (int l = 0; l < VDEV_LABELS; l++) {
zdb_label_t *label = &labels[l];
size_t buflen = sizeof (label->label.vl_vdev_phys.vp_nvlist);
if (label->read_failed == B_TRUE)
continue;
if (label->config_nv) {
dump_config_from_label(label, buflen, l);
} else {
if (!dump_opt['q'])
(void) printf("failed to unpack label %d\n", l);
}
if (dump_opt['u'])
dump_label_uberblocks(label, ashift, l);
nvlist_free(label->config_nv);
}
/*
* Dump the L2ARC header, if existent.
*/
if (read_l2arc_header)
error |= dump_l2arc_header(fd);
cookie = NULL;
while ((node = avl_destroy_nodes(&config_tree, &cookie)) != NULL)
umem_free(node, sizeof (cksum_record_t));
cookie = NULL;
while ((node = avl_destroy_nodes(&uberblock_tree, &cookie)) != NULL)
umem_free(node, sizeof (cksum_record_t));
avl_destroy(&config_tree);
avl_destroy(&uberblock_tree);
(void) close(fd);
return (config_found == B_FALSE ? 2 :
(error == B_TRUE ? 1 : 0));
}
static uint64_t dataset_feature_count[SPA_FEATURES];
static uint64_t global_feature_count[SPA_FEATURES];
static uint64_t remap_deadlist_count = 0;
/*ARGSUSED*/
static int
dump_one_objset(const char *dsname, void *arg)
{
int error;
objset_t *os;
spa_feature_t f;
error = open_objset(dsname, FTAG, &os);
if (error != 0)
return (0);
for (f = 0; f < SPA_FEATURES; f++) {
if (!dsl_dataset_feature_is_active(dmu_objset_ds(os), f))
continue;
ASSERT(spa_feature_table[f].fi_flags &
ZFEATURE_FLAG_PER_DATASET);
dataset_feature_count[f]++;
}
if (dsl_dataset_remap_deadlist_exists(dmu_objset_ds(os))) {
remap_deadlist_count++;
}
for (dsl_bookmark_node_t *dbn =
avl_first(&dmu_objset_ds(os)->ds_bookmarks); dbn != NULL;
dbn = AVL_NEXT(&dmu_objset_ds(os)->ds_bookmarks, dbn)) {
mos_obj_refd(dbn->dbn_phys.zbm_redaction_obj);
if (dbn->dbn_phys.zbm_redaction_obj != 0)
global_feature_count[SPA_FEATURE_REDACTION_BOOKMARKS]++;
if (dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN)
global_feature_count[SPA_FEATURE_BOOKMARK_WRITTEN]++;
}
if (dsl_deadlist_is_open(&dmu_objset_ds(os)->ds_dir->dd_livelist) &&
!dmu_objset_is_snapshot(os)) {
global_feature_count[SPA_FEATURE_LIVELIST]++;
}
dump_objset(os);
close_objset(os, FTAG);
fuid_table_destroy();
return (0);
}
/*
* Block statistics.
*/
#define PSIZE_HISTO_SIZE (SPA_OLD_MAXBLOCKSIZE / SPA_MINBLOCKSIZE + 2)
typedef struct zdb_blkstats {
uint64_t zb_asize;
uint64_t zb_lsize;
uint64_t zb_psize;
uint64_t zb_count;
uint64_t zb_gangs;
uint64_t zb_ditto_samevdev;
uint64_t zb_ditto_same_ms;
uint64_t zb_psize_histogram[PSIZE_HISTO_SIZE];
} zdb_blkstats_t;
/*
* Extended object types to report deferred frees and dedup auto-ditto blocks.
*/
#define ZDB_OT_DEFERRED (DMU_OT_NUMTYPES + 0)
#define ZDB_OT_DITTO (DMU_OT_NUMTYPES + 1)
#define ZDB_OT_OTHER (DMU_OT_NUMTYPES + 2)
#define ZDB_OT_TOTAL (DMU_OT_NUMTYPES + 3)
static const char *zdb_ot_extname[] = {
"deferred free",
"dedup ditto",
"other",
"Total",
};
#define ZB_TOTAL DN_MAX_LEVELS
#define SPA_MAX_FOR_16M (SPA_MAXBLOCKSHIFT+1)
typedef struct zdb_cb {
zdb_blkstats_t zcb_type[ZB_TOTAL + 1][ZDB_OT_TOTAL + 1];
uint64_t zcb_removing_size;
uint64_t zcb_checkpoint_size;
uint64_t zcb_dedup_asize;
uint64_t zcb_dedup_blocks;
uint64_t zcb_psize_count[SPA_MAX_FOR_16M];
uint64_t zcb_lsize_count[SPA_MAX_FOR_16M];
uint64_t zcb_asize_count[SPA_MAX_FOR_16M];
uint64_t zcb_psize_len[SPA_MAX_FOR_16M];
uint64_t zcb_lsize_len[SPA_MAX_FOR_16M];
uint64_t zcb_asize_len[SPA_MAX_FOR_16M];
uint64_t zcb_psize_total;
uint64_t zcb_lsize_total;
uint64_t zcb_asize_total;
uint64_t zcb_embedded_blocks[NUM_BP_EMBEDDED_TYPES];
uint64_t zcb_embedded_histogram[NUM_BP_EMBEDDED_TYPES]
[BPE_PAYLOAD_SIZE + 1];
uint64_t zcb_start;
hrtime_t zcb_lastprint;
uint64_t zcb_totalasize;
uint64_t zcb_errors[256];
int zcb_readfails;
int zcb_haderrors;
spa_t *zcb_spa;
uint32_t **zcb_vd_obsolete_counts;
} zdb_cb_t;
/* test if two DVA offsets from same vdev are within the same metaslab */
static boolean_t
same_metaslab(spa_t *spa, uint64_t vdev, uint64_t off1, uint64_t off2)
{
vdev_t *vd = vdev_lookup_top(spa, vdev);
uint64_t ms_shift = vd->vdev_ms_shift;
return ((off1 >> ms_shift) == (off2 >> ms_shift));
}
/*
* Used to simplify reporting of the histogram data.
*/
typedef struct one_histo {
char *name;
uint64_t *count;
uint64_t *len;
uint64_t cumulative;
} one_histo_t;
/*
* The number of separate histograms processed for psize, lsize and asize.
*/
#define NUM_HISTO 3
/*
* This routine will create a fixed column size output of three different
* histograms showing by blocksize of 512 - 2^ SPA_MAX_FOR_16M
* the count, length and cumulative length of the psize, lsize and
* asize blocks.
*
* All three types of blocks are listed on a single line
*
* By default the table is printed in nicenumber format (e.g. 123K) but
* if the '-P' parameter is specified then the full raw number (parseable)
* is printed out.
*/
static void
dump_size_histograms(zdb_cb_t *zcb)
{
/*
* A temporary buffer that allows us to convert a number into
* a string using zdb_nicenumber to allow either raw or human
* readable numbers to be output.
*/
char numbuf[32];
/*
* Define titles which are used in the headers of the tables
* printed by this routine.
*/
const char blocksize_title1[] = "block";
const char blocksize_title2[] = "size";
const char count_title[] = "Count";
const char length_title[] = "Size";
const char cumulative_title[] = "Cum.";
/*
* Setup the histogram arrays (psize, lsize, and asize).
*/
one_histo_t parm_histo[NUM_HISTO];
parm_histo[0].name = "psize";
parm_histo[0].count = zcb->zcb_psize_count;
parm_histo[0].len = zcb->zcb_psize_len;
parm_histo[0].cumulative = 0;
parm_histo[1].name = "lsize";
parm_histo[1].count = zcb->zcb_lsize_count;
parm_histo[1].len = zcb->zcb_lsize_len;
parm_histo[1].cumulative = 0;
parm_histo[2].name = "asize";
parm_histo[2].count = zcb->zcb_asize_count;
parm_histo[2].len = zcb->zcb_asize_len;
parm_histo[2].cumulative = 0;
(void) printf("\nBlock Size Histogram\n");
/*
* Print the first line titles
*/
if (dump_opt['P'])
(void) printf("\n%s\t", blocksize_title1);
else
(void) printf("\n%7s ", blocksize_title1);
for (int j = 0; j < NUM_HISTO; j++) {
if (dump_opt['P']) {
if (j < NUM_HISTO - 1) {
(void) printf("%s\t\t\t", parm_histo[j].name);
} else {
/* Don't print trailing spaces */
(void) printf(" %s", parm_histo[j].name);
}
} else {
if (j < NUM_HISTO - 1) {
/* Left aligned strings in the output */
(void) printf("%-7s ",
parm_histo[j].name);
} else {
/* Don't print trailing spaces */
(void) printf("%s", parm_histo[j].name);
}
}
}
(void) printf("\n");
/*
* Print the second line titles
*/
if (dump_opt['P']) {
(void) printf("%s\t", blocksize_title2);
} else {
(void) printf("%7s ", blocksize_title2);
}
for (int i = 0; i < NUM_HISTO; i++) {
if (dump_opt['P']) {
(void) printf("%s\t%s\t%s\t",
count_title, length_title, cumulative_title);
} else {
(void) printf("%7s%7s%7s",
count_title, length_title, cumulative_title);
}
}
(void) printf("\n");
/*
* Print the rows
*/
for (int i = SPA_MINBLOCKSHIFT; i < SPA_MAX_FOR_16M; i++) {
/*
* Print the first column showing the blocksize
*/
zdb_nicenum((1ULL << i), numbuf, sizeof (numbuf));
if (dump_opt['P']) {
printf("%s", numbuf);
} else {
printf("%7s:", numbuf);
}
/*
* Print the remaining set of 3 columns per size:
* for psize, lsize and asize
*/
for (int j = 0; j < NUM_HISTO; j++) {
parm_histo[j].cumulative += parm_histo[j].len[i];
zdb_nicenum(parm_histo[j].count[i],
numbuf, sizeof (numbuf));
if (dump_opt['P'])
(void) printf("\t%s", numbuf);
else
(void) printf("%7s", numbuf);
zdb_nicenum(parm_histo[j].len[i],
numbuf, sizeof (numbuf));
if (dump_opt['P'])
(void) printf("\t%s", numbuf);
else
(void) printf("%7s", numbuf);
zdb_nicenum(parm_histo[j].cumulative,
numbuf, sizeof (numbuf));
if (dump_opt['P'])
(void) printf("\t%s", numbuf);
else
(void) printf("%7s", numbuf);
}
(void) printf("\n");
}
}
static void
zdb_count_block(zdb_cb_t *zcb, zilog_t *zilog, const blkptr_t *bp,
dmu_object_type_t type)
{
uint64_t refcnt = 0;
int i;
ASSERT(type < ZDB_OT_TOTAL);
if (zilog && zil_bp_tree_add(zilog, bp) != 0)
return;
spa_config_enter(zcb->zcb_spa, SCL_CONFIG, FTAG, RW_READER);
for (i = 0; i < 4; i++) {
int l = (i < 2) ? BP_GET_LEVEL(bp) : ZB_TOTAL;
int t = (i & 1) ? type : ZDB_OT_TOTAL;
int equal;
zdb_blkstats_t *zb = &zcb->zcb_type[l][t];
zb->zb_asize += BP_GET_ASIZE(bp);
zb->zb_lsize += BP_GET_LSIZE(bp);
zb->zb_psize += BP_GET_PSIZE(bp);
zb->zb_count++;
/*
* The histogram is only big enough to record blocks up to
* SPA_OLD_MAXBLOCKSIZE; larger blocks go into the last,
* "other", bucket.
*/
unsigned idx = BP_GET_PSIZE(bp) >> SPA_MINBLOCKSHIFT;
idx = MIN(idx, SPA_OLD_MAXBLOCKSIZE / SPA_MINBLOCKSIZE + 1);
zb->zb_psize_histogram[idx]++;
zb->zb_gangs += BP_COUNT_GANG(bp);
switch (BP_GET_NDVAS(bp)) {
case 2:
if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[1])) {
zb->zb_ditto_samevdev++;
if (same_metaslab(zcb->zcb_spa,
DVA_GET_VDEV(&bp->blk_dva[0]),
DVA_GET_OFFSET(&bp->blk_dva[0]),
DVA_GET_OFFSET(&bp->blk_dva[1])))
zb->zb_ditto_same_ms++;
}
break;
case 3:
equal = (DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[1])) +
(DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[2])) +
(DVA_GET_VDEV(&bp->blk_dva[1]) ==
DVA_GET_VDEV(&bp->blk_dva[2]));
if (equal != 0) {
zb->zb_ditto_samevdev++;
if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[1]) &&
same_metaslab(zcb->zcb_spa,
DVA_GET_VDEV(&bp->blk_dva[0]),
DVA_GET_OFFSET(&bp->blk_dva[0]),
DVA_GET_OFFSET(&bp->blk_dva[1])))
zb->zb_ditto_same_ms++;
else if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[2]) &&
same_metaslab(zcb->zcb_spa,
DVA_GET_VDEV(&bp->blk_dva[0]),
DVA_GET_OFFSET(&bp->blk_dva[0]),
DVA_GET_OFFSET(&bp->blk_dva[2])))
zb->zb_ditto_same_ms++;
else if (DVA_GET_VDEV(&bp->blk_dva[1]) ==
DVA_GET_VDEV(&bp->blk_dva[2]) &&
same_metaslab(zcb->zcb_spa,
DVA_GET_VDEV(&bp->blk_dva[1]),
DVA_GET_OFFSET(&bp->blk_dva[1]),
DVA_GET_OFFSET(&bp->blk_dva[2])))
zb->zb_ditto_same_ms++;
}
break;
}
}
spa_config_exit(zcb->zcb_spa, SCL_CONFIG, FTAG);
if (BP_IS_EMBEDDED(bp)) {
zcb->zcb_embedded_blocks[BPE_GET_ETYPE(bp)]++;
zcb->zcb_embedded_histogram[BPE_GET_ETYPE(bp)]
[BPE_GET_PSIZE(bp)]++;
return;
}
/*
* The binning histogram bins by powers of two up to
* SPA_MAXBLOCKSIZE rather than creating bins for
* every possible blocksize found in the pool.
*/
int bin = highbit64(BP_GET_PSIZE(bp)) - 1;
zcb->zcb_psize_count[bin]++;
zcb->zcb_psize_len[bin] += BP_GET_PSIZE(bp);
zcb->zcb_psize_total += BP_GET_PSIZE(bp);
bin = highbit64(BP_GET_LSIZE(bp)) - 1;
zcb->zcb_lsize_count[bin]++;
zcb->zcb_lsize_len[bin] += BP_GET_LSIZE(bp);
zcb->zcb_lsize_total += BP_GET_LSIZE(bp);
bin = highbit64(BP_GET_ASIZE(bp)) - 1;
zcb->zcb_asize_count[bin]++;
zcb->zcb_asize_len[bin] += BP_GET_ASIZE(bp);
zcb->zcb_asize_total += BP_GET_ASIZE(bp);
if (dump_opt['L'])
return;
if (BP_GET_DEDUP(bp)) {
ddt_t *ddt;
ddt_entry_t *dde;
ddt = ddt_select(zcb->zcb_spa, bp);
ddt_enter(ddt);
dde = ddt_lookup(ddt, bp, B_FALSE);
if (dde == NULL) {
refcnt = 0;
} else {
ddt_phys_t *ddp = ddt_phys_select(dde, bp);
ddt_phys_decref(ddp);
refcnt = ddp->ddp_refcnt;
if (ddt_phys_total_refcnt(dde) == 0)
ddt_remove(ddt, dde);
}
ddt_exit(ddt);
}
VERIFY3U(zio_wait(zio_claim(NULL, zcb->zcb_spa,
refcnt ? 0 : spa_min_claim_txg(zcb->zcb_spa),
bp, NULL, NULL, ZIO_FLAG_CANFAIL)), ==, 0);
}
static void
zdb_blkptr_done(zio_t *zio)
{
spa_t *spa = zio->io_spa;
blkptr_t *bp = zio->io_bp;
int ioerr = zio->io_error;
zdb_cb_t *zcb = zio->io_private;
zbookmark_phys_t *zb = &zio->io_bookmark;
mutex_enter(&spa->spa_scrub_lock);
spa->spa_load_verify_bytes -= BP_GET_PSIZE(bp);
cv_broadcast(&spa->spa_scrub_io_cv);
if (ioerr && !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
char blkbuf[BP_SPRINTF_LEN];
zcb->zcb_haderrors = 1;
zcb->zcb_errors[ioerr]++;
if (dump_opt['b'] >= 2)
snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
else
blkbuf[0] = '\0';
(void) printf("zdb_blkptr_cb: "
"Got error %d reading "
"<%llu, %llu, %lld, %llx> %s -- skipping\n",
ioerr,
(u_longlong_t)zb->zb_objset,
(u_longlong_t)zb->zb_object,
(u_longlong_t)zb->zb_level,
(u_longlong_t)zb->zb_blkid,
blkbuf);
}
mutex_exit(&spa->spa_scrub_lock);
abd_free(zio->io_abd);
}
static int
zdb_blkptr_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
{
zdb_cb_t *zcb = arg;
dmu_object_type_t type;
boolean_t is_metadata;
if (zb->zb_level == ZB_DNODE_LEVEL)
return (0);
if (dump_opt['b'] >= 5 && bp->blk_birth > 0) {
char blkbuf[BP_SPRINTF_LEN];
snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
(void) printf("objset %llu object %llu "
"level %lld offset 0x%llx %s\n",
(u_longlong_t)zb->zb_objset,
(u_longlong_t)zb->zb_object,
(longlong_t)zb->zb_level,
(u_longlong_t)blkid2offset(dnp, bp, zb),
blkbuf);
}
if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp))
return (0);
type = BP_GET_TYPE(bp);
zdb_count_block(zcb, zilog, bp,
(type & DMU_OT_NEWTYPE) ? ZDB_OT_OTHER : type);
is_metadata = (BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type));
if (!BP_IS_EMBEDDED(bp) &&
(dump_opt['c'] > 1 || (dump_opt['c'] && is_metadata))) {
size_t size = BP_GET_PSIZE(bp);
abd_t *abd = abd_alloc(size, B_FALSE);
int flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCRUB | ZIO_FLAG_RAW;
/* If it's an intent log block, failure is expected. */
if (zb->zb_level == ZB_ZIL_LEVEL)
flags |= ZIO_FLAG_SPECULATIVE;
mutex_enter(&spa->spa_scrub_lock);
while (spa->spa_load_verify_bytes > max_inflight_bytes)
cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
spa->spa_load_verify_bytes += size;
mutex_exit(&spa->spa_scrub_lock);
zio_nowait(zio_read(NULL, spa, bp, abd, size,
zdb_blkptr_done, zcb, ZIO_PRIORITY_ASYNC_READ, flags, zb));
}
zcb->zcb_readfails = 0;
/* only call gethrtime() every 100 blocks */
static int iters;
if (++iters > 100)
iters = 0;
else
return (0);
if (dump_opt['b'] < 5 && gethrtime() > zcb->zcb_lastprint + NANOSEC) {
uint64_t now = gethrtime();
char buf[10];
uint64_t bytes = zcb->zcb_type[ZB_TOTAL][ZDB_OT_TOTAL].zb_asize;
int kb_per_sec =
1 + bytes / (1 + ((now - zcb->zcb_start) / 1000 / 1000));
int sec_remaining =
(zcb->zcb_totalasize - bytes) / 1024 / kb_per_sec;
/* make sure nicenum has enough space */
CTASSERT(sizeof (buf) >= NN_NUMBUF_SZ);
zfs_nicebytes(bytes, buf, sizeof (buf));
(void) fprintf(stderr,
"\r%5s completed (%4dMB/s) "
"estimated time remaining: %uhr %02umin %02usec ",
buf, kb_per_sec / 1024,
sec_remaining / 60 / 60,
sec_remaining / 60 % 60,
sec_remaining % 60);
zcb->zcb_lastprint = now;
}
return (0);
}
static void
zdb_leak(void *arg, uint64_t start, uint64_t size)
{
vdev_t *vd = arg;
(void) printf("leaked space: vdev %llu, offset 0x%llx, size %llu\n",
(u_longlong_t)vd->vdev_id, (u_longlong_t)start, (u_longlong_t)size);
}
static metaslab_ops_t zdb_metaslab_ops = {
NULL /* alloc */
};
/* ARGSUSED */
static int
load_unflushed_svr_segs_cb(spa_t *spa, space_map_entry_t *sme,
uint64_t txg, void *arg)
{
spa_vdev_removal_t *svr = arg;
uint64_t offset = sme->sme_offset;
uint64_t size = sme->sme_run;
/* skip vdevs we don't care about */
if (sme->sme_vdev != svr->svr_vdev_id)
return (0);
vdev_t *vd = vdev_lookup_top(spa, sme->sme_vdev);
metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE);
if (txg < metaslab_unflushed_txg(ms))
return (0);
if (sme->sme_type == SM_ALLOC)
range_tree_add(svr->svr_allocd_segs, offset, size);
else
range_tree_remove(svr->svr_allocd_segs, offset, size);
return (0);
}
/* ARGSUSED */
static void
claim_segment_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
uint64_t size, void *arg)
{
/*
* This callback was called through a remap from
* a device being removed. Therefore, the vdev that
* this callback is applied to is a concrete
* vdev.
*/
ASSERT(vdev_is_concrete(vd));
VERIFY0(metaslab_claim_impl(vd, offset, size,
spa_min_claim_txg(vd->vdev_spa)));
}
static void
claim_segment_cb(void *arg, uint64_t offset, uint64_t size)
{
vdev_t *vd = arg;
vdev_indirect_ops.vdev_op_remap(vd, offset, size,
claim_segment_impl_cb, NULL);
}
/*
* After accounting for all allocated blocks that are directly referenced,
* we might have missed a reference to a block from a partially complete
* (and thus unused) indirect mapping object. We perform a secondary pass
* through the metaslabs we have already mapped and claim the destination
* blocks.
*/
static void
zdb_claim_removing(spa_t *spa, zdb_cb_t *zcb)
{
if (dump_opt['L'])
return;
if (spa->spa_vdev_removal == NULL)
return;
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
spa_vdev_removal_t *svr = spa->spa_vdev_removal;
vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
ASSERT0(range_tree_space(svr->svr_allocd_segs));
range_tree_t *allocs = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
for (uint64_t msi = 0; msi < vd->vdev_ms_count; msi++) {
metaslab_t *msp = vd->vdev_ms[msi];
ASSERT0(range_tree_space(allocs));
if (msp->ms_sm != NULL)
VERIFY0(space_map_load(msp->ms_sm, allocs, SM_ALLOC));
range_tree_vacate(allocs, range_tree_add, svr->svr_allocd_segs);
}
range_tree_destroy(allocs);
iterate_through_spacemap_logs(spa, load_unflushed_svr_segs_cb, svr);
/*
* Clear everything past what has been synced,
* because we have not allocated mappings for
* it yet.
*/
range_tree_clear(svr->svr_allocd_segs,
vdev_indirect_mapping_max_offset(vim),
vd->vdev_asize - vdev_indirect_mapping_max_offset(vim));
zcb->zcb_removing_size += range_tree_space(svr->svr_allocd_segs);
range_tree_vacate(svr->svr_allocd_segs, claim_segment_cb, vd);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
/* ARGSUSED */
static int
increment_indirect_mapping_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
zdb_cb_t *zcb = arg;
spa_t *spa = zcb->zcb_spa;
vdev_t *vd;
const dva_t *dva = &bp->blk_dva[0];
ASSERT(!bp_freed);
ASSERT(!dump_opt['L']);
ASSERT3U(BP_GET_NDVAS(bp), ==, 1);
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
vd = vdev_lookup_top(zcb->zcb_spa, DVA_GET_VDEV(dva));
ASSERT3P(vd, !=, NULL);
spa_config_exit(spa, SCL_VDEV, FTAG);
ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0);
ASSERT3P(zcb->zcb_vd_obsolete_counts[vd->vdev_id], !=, NULL);
vdev_indirect_mapping_increment_obsolete_count(
vd->vdev_indirect_mapping,
DVA_GET_OFFSET(dva), DVA_GET_ASIZE(dva),
zcb->zcb_vd_obsolete_counts[vd->vdev_id]);
return (0);
}
static uint32_t *
zdb_load_obsolete_counts(vdev_t *vd)
{
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
spa_t *spa = vd->vdev_spa;
spa_condensing_indirect_phys_t *scip =
&spa->spa_condensing_indirect_phys;
uint64_t obsolete_sm_object;
uint32_t *counts;
VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
EQUIV(obsolete_sm_object != 0, vd->vdev_obsolete_sm != NULL);
counts = vdev_indirect_mapping_load_obsolete_counts(vim);
if (vd->vdev_obsolete_sm != NULL) {
vdev_indirect_mapping_load_obsolete_spacemap(vim, counts,
vd->vdev_obsolete_sm);
}
if (scip->scip_vdev == vd->vdev_id &&
scip->scip_prev_obsolete_sm_object != 0) {
space_map_t *prev_obsolete_sm = NULL;
VERIFY0(space_map_open(&prev_obsolete_sm, spa->spa_meta_objset,
scip->scip_prev_obsolete_sm_object, 0, vd->vdev_asize, 0));
vdev_indirect_mapping_load_obsolete_spacemap(vim, counts,
prev_obsolete_sm);
space_map_close(prev_obsolete_sm);
}
return (counts);
}
static void
zdb_ddt_leak_init(spa_t *spa, zdb_cb_t *zcb)
{
ddt_bookmark_t ddb;
ddt_entry_t dde;
int error;
int p;
ASSERT(!dump_opt['L']);
bzero(&ddb, sizeof (ddb));
while ((error = ddt_walk(spa, &ddb, &dde)) == 0) {
blkptr_t blk;
ddt_phys_t *ddp = dde.dde_phys;
if (ddb.ddb_class == DDT_CLASS_UNIQUE)
return;
ASSERT(ddt_phys_total_refcnt(&dde) > 1);
for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
if (ddp->ddp_phys_birth == 0)
continue;
ddt_bp_create(ddb.ddb_checksum,
&dde.dde_key, ddp, &blk);
if (p == DDT_PHYS_DITTO) {
zdb_count_block(zcb, NULL, &blk, ZDB_OT_DITTO);
} else {
zcb->zcb_dedup_asize +=
BP_GET_ASIZE(&blk) * (ddp->ddp_refcnt - 1);
zcb->zcb_dedup_blocks++;
}
}
ddt_t *ddt = spa->spa_ddt[ddb.ddb_checksum];
ddt_enter(ddt);
VERIFY(ddt_lookup(ddt, &blk, B_TRUE) != NULL);
ddt_exit(ddt);
}
ASSERT(error == ENOENT);
}
typedef struct checkpoint_sm_exclude_entry_arg {
vdev_t *cseea_vd;
uint64_t cseea_checkpoint_size;
} checkpoint_sm_exclude_entry_arg_t;
static int
checkpoint_sm_exclude_entry_cb(space_map_entry_t *sme, void *arg)
{
checkpoint_sm_exclude_entry_arg_t *cseea = arg;
vdev_t *vd = cseea->cseea_vd;
metaslab_t *ms = vd->vdev_ms[sme->sme_offset >> vd->vdev_ms_shift];
uint64_t end = sme->sme_offset + sme->sme_run;
ASSERT(sme->sme_type == SM_FREE);
/*
* Since the vdev_checkpoint_sm exists in the vdev level
* and the ms_sm space maps exist in the metaslab level,
* an entry in the checkpoint space map could theoretically
* cross the boundaries of the metaslab that it belongs.
*
* In reality, because of the way that we populate and
* manipulate the checkpoint's space maps currently,
* there shouldn't be any entries that cross metaslabs.
* Hence the assertion below.
*
* That said, there is no fundamental requirement that
* the checkpoint's space map entries should not cross
* metaslab boundaries. So if needed we could add code
* that handles metaslab-crossing segments in the future.
*/
VERIFY3U(sme->sme_offset, >=, ms->ms_start);
VERIFY3U(end, <=, ms->ms_start + ms->ms_size);
/*
* By removing the entry from the allocated segments we
* also verify that the entry is there to begin with.
*/
mutex_enter(&ms->ms_lock);
range_tree_remove(ms->ms_allocatable, sme->sme_offset, sme->sme_run);
mutex_exit(&ms->ms_lock);
cseea->cseea_checkpoint_size += sme->sme_run;
return (0);
}
static void
zdb_leak_init_vdev_exclude_checkpoint(vdev_t *vd, zdb_cb_t *zcb)
{
spa_t *spa = vd->vdev_spa;
space_map_t *checkpoint_sm = NULL;
uint64_t checkpoint_sm_obj;
/*
* If there is no vdev_top_zap, we are in a pool whose
* version predates the pool checkpoint feature.
*/
if (vd->vdev_top_zap == 0)
return;
/*
* If there is no reference of the vdev_checkpoint_sm in
* the vdev_top_zap, then one of the following scenarios
* is true:
*
* 1] There is no checkpoint
* 2] There is a checkpoint, but no checkpointed blocks
* have been freed yet
* 3] The current vdev is indirect
*
* In these cases we return immediately.
*/
if (zap_contains(spa_meta_objset(spa), vd->vdev_top_zap,
VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) != 0)
return;
VERIFY0(zap_lookup(spa_meta_objset(spa), vd->vdev_top_zap,
VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, sizeof (uint64_t), 1,
&checkpoint_sm_obj));
checkpoint_sm_exclude_entry_arg_t cseea;
cseea.cseea_vd = vd;
cseea.cseea_checkpoint_size = 0;
VERIFY0(space_map_open(&checkpoint_sm, spa_meta_objset(spa),
checkpoint_sm_obj, 0, vd->vdev_asize, vd->vdev_ashift));
VERIFY0(space_map_iterate(checkpoint_sm,
space_map_length(checkpoint_sm),
checkpoint_sm_exclude_entry_cb, &cseea));
space_map_close(checkpoint_sm);
zcb->zcb_checkpoint_size += cseea.cseea_checkpoint_size;
}
static void
zdb_leak_init_exclude_checkpoint(spa_t *spa, zdb_cb_t *zcb)
{
ASSERT(!dump_opt['L']);
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
ASSERT3U(c, ==, rvd->vdev_child[c]->vdev_id);
zdb_leak_init_vdev_exclude_checkpoint(rvd->vdev_child[c], zcb);
}
}
static int
count_unflushed_space_cb(spa_t *spa, space_map_entry_t *sme,
uint64_t txg, void *arg)
{
int64_t *ualloc_space = arg;
uint64_t offset = sme->sme_offset;
uint64_t vdev_id = sme->sme_vdev;
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
if (!vdev_is_concrete(vd))
return (0);
metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE);
if (txg < metaslab_unflushed_txg(ms))
return (0);
if (sme->sme_type == SM_ALLOC)
*ualloc_space += sme->sme_run;
else
*ualloc_space -= sme->sme_run;
return (0);
}
static int64_t
get_unflushed_alloc_space(spa_t *spa)
{
if (dump_opt['L'])
return (0);
int64_t ualloc_space = 0;
iterate_through_spacemap_logs(spa, count_unflushed_space_cb,
&ualloc_space);
return (ualloc_space);
}
static int
load_unflushed_cb(spa_t *spa, space_map_entry_t *sme, uint64_t txg, void *arg)
{
maptype_t *uic_maptype = arg;
uint64_t offset = sme->sme_offset;
uint64_t size = sme->sme_run;
uint64_t vdev_id = sme->sme_vdev;
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
/* skip indirect vdevs */
if (!vdev_is_concrete(vd))
return (0);
metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE);
ASSERT(*uic_maptype == SM_ALLOC || *uic_maptype == SM_FREE);
if (txg < metaslab_unflushed_txg(ms))
return (0);
if (*uic_maptype == sme->sme_type)
range_tree_add(ms->ms_allocatable, offset, size);
else
range_tree_remove(ms->ms_allocatable, offset, size);
return (0);
}
static void
load_unflushed_to_ms_allocatables(spa_t *spa, maptype_t maptype)
{
iterate_through_spacemap_logs(spa, load_unflushed_cb, &maptype);
}
static void
load_concrete_ms_allocatable_trees(spa_t *spa, maptype_t maptype)
{
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t i = 0; i < rvd->vdev_children; i++) {
vdev_t *vd = rvd->vdev_child[i];
ASSERT3U(i, ==, vd->vdev_id);
if (vd->vdev_ops == &vdev_indirect_ops)
continue;
for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *msp = vd->vdev_ms[m];
(void) fprintf(stderr,
"\rloading concrete vdev %llu, "
"metaslab %llu of %llu ...",
(longlong_t)vd->vdev_id,
(longlong_t)msp->ms_id,
(longlong_t)vd->vdev_ms_count);
mutex_enter(&msp->ms_lock);
range_tree_vacate(msp->ms_allocatable, NULL, NULL);
/*
* We don't want to spend the CPU manipulating the
* size-ordered tree, so clear the range_tree ops.
*/
msp->ms_allocatable->rt_ops = NULL;
if (msp->ms_sm != NULL) {
VERIFY0(space_map_load(msp->ms_sm,
msp->ms_allocatable, maptype));
}
if (!msp->ms_loaded)
msp->ms_loaded = B_TRUE;
mutex_exit(&msp->ms_lock);
}
}
load_unflushed_to_ms_allocatables(spa, maptype);
}
/*
* vm_idxp is an in-out parameter which (for indirect vdevs) is the
* index in vim_entries that has the first entry in this metaslab.
* On return, it will be set to the first entry after this metaslab.
*/
static void
load_indirect_ms_allocatable_tree(vdev_t *vd, metaslab_t *msp,
uint64_t *vim_idxp)
{
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
mutex_enter(&msp->ms_lock);
range_tree_vacate(msp->ms_allocatable, NULL, NULL);
/*
* We don't want to spend the CPU manipulating the
* size-ordered tree, so clear the range_tree ops.
*/
msp->ms_allocatable->rt_ops = NULL;
for (; *vim_idxp < vdev_indirect_mapping_num_entries(vim);
(*vim_idxp)++) {
vdev_indirect_mapping_entry_phys_t *vimep =
&vim->vim_entries[*vim_idxp];
uint64_t ent_offset = DVA_MAPPING_GET_SRC_OFFSET(vimep);
uint64_t ent_len = DVA_GET_ASIZE(&vimep->vimep_dst);
ASSERT3U(ent_offset, >=, msp->ms_start);
if (ent_offset >= msp->ms_start + msp->ms_size)
break;
/*
* Mappings do not cross metaslab boundaries,
* because we create them by walking the metaslabs.
*/
ASSERT3U(ent_offset + ent_len, <=,
msp->ms_start + msp->ms_size);
range_tree_add(msp->ms_allocatable, ent_offset, ent_len);
}
if (!msp->ms_loaded)
msp->ms_loaded = B_TRUE;
mutex_exit(&msp->ms_lock);
}
static void
zdb_leak_init_prepare_indirect_vdevs(spa_t *spa, zdb_cb_t *zcb)
{
ASSERT(!dump_opt['L']);
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
ASSERT3U(c, ==, vd->vdev_id);
if (vd->vdev_ops != &vdev_indirect_ops)
continue;
/*
* Note: we don't check for mapping leaks on
* removing vdevs because their ms_allocatable's
* are used to look for leaks in allocated space.
*/
zcb->zcb_vd_obsolete_counts[c] = zdb_load_obsolete_counts(vd);
/*
* Normally, indirect vdevs don't have any
* metaslabs. We want to set them up for
* zio_claim().
*/
vdev_metaslab_group_create(vd);
VERIFY0(vdev_metaslab_init(vd, 0));
vdev_indirect_mapping_t *vim __maybe_unused =
vd->vdev_indirect_mapping;
uint64_t vim_idx = 0;
for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
(void) fprintf(stderr,
"\rloading indirect vdev %llu, "
"metaslab %llu of %llu ...",
(longlong_t)vd->vdev_id,
(longlong_t)vd->vdev_ms[m]->ms_id,
(longlong_t)vd->vdev_ms_count);
load_indirect_ms_allocatable_tree(vd, vd->vdev_ms[m],
&vim_idx);
}
ASSERT3U(vim_idx, ==, vdev_indirect_mapping_num_entries(vim));
}
}
static void
zdb_leak_init(spa_t *spa, zdb_cb_t *zcb)
{
zcb->zcb_spa = spa;
if (dump_opt['L'])
return;
dsl_pool_t *dp = spa->spa_dsl_pool;
vdev_t *rvd = spa->spa_root_vdev;
/*
* We are going to be changing the meaning of the metaslab's
* ms_allocatable. Ensure that the allocator doesn't try to
* use the tree.
*/
spa->spa_normal_class->mc_ops = &zdb_metaslab_ops;
spa->spa_log_class->mc_ops = &zdb_metaslab_ops;
spa->spa_embedded_log_class->mc_ops = &zdb_metaslab_ops;
zcb->zcb_vd_obsolete_counts =
umem_zalloc(rvd->vdev_children * sizeof (uint32_t *),
UMEM_NOFAIL);
/*
* For leak detection, we overload the ms_allocatable trees
* to contain allocated segments instead of free segments.
* As a result, we can't use the normal metaslab_load/unload
* interfaces.
*/
zdb_leak_init_prepare_indirect_vdevs(spa, zcb);
load_concrete_ms_allocatable_trees(spa, SM_ALLOC);
/*
* On load_concrete_ms_allocatable_trees() we loaded all the
* allocated entries from the ms_sm to the ms_allocatable for
* each metaslab. If the pool has a checkpoint or is in the
* middle of discarding a checkpoint, some of these blocks
* may have been freed but their ms_sm may not have been
* updated because they are referenced by the checkpoint. In
* order to avoid false-positives during leak-detection, we
* go through the vdev's checkpoint space map and exclude all
* its entries from their relevant ms_allocatable.
*
* We also aggregate the space held by the checkpoint and add
* it to zcb_checkpoint_size.
*
* Note that at this point we are also verifying that all the
* entries on the checkpoint_sm are marked as allocated in
* the ms_sm of their relevant metaslab.
* [see comment in checkpoint_sm_exclude_entry_cb()]
*/
zdb_leak_init_exclude_checkpoint(spa, zcb);
ASSERT3U(zcb->zcb_checkpoint_size, ==, spa_get_checkpoint_space(spa));
/* for cleaner progress output */
(void) fprintf(stderr, "\n");
if (bpobj_is_open(&dp->dp_obsolete_bpobj)) {
ASSERT(spa_feature_is_enabled(spa,
SPA_FEATURE_DEVICE_REMOVAL));
(void) bpobj_iterate_nofree(&dp->dp_obsolete_bpobj,
increment_indirect_mapping_cb, zcb, NULL);
}
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
zdb_ddt_leak_init(spa, zcb);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
static boolean_t
zdb_check_for_obsolete_leaks(vdev_t *vd, zdb_cb_t *zcb)
{
boolean_t leaks = B_FALSE;
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
uint64_t total_leaked = 0;
boolean_t are_precise = B_FALSE;
ASSERT(vim != NULL);
for (uint64_t i = 0; i < vdev_indirect_mapping_num_entries(vim); i++) {
vdev_indirect_mapping_entry_phys_t *vimep =
&vim->vim_entries[i];
uint64_t obsolete_bytes = 0;
uint64_t offset = DVA_MAPPING_GET_SRC_OFFSET(vimep);
metaslab_t *msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
/*
* This is not very efficient but it's easy to
* verify correctness.
*/
for (uint64_t inner_offset = 0;
inner_offset < DVA_GET_ASIZE(&vimep->vimep_dst);
inner_offset += 1 << vd->vdev_ashift) {
if (range_tree_contains(msp->ms_allocatable,
offset + inner_offset, 1 << vd->vdev_ashift)) {
obsolete_bytes += 1 << vd->vdev_ashift;
}
}
int64_t bytes_leaked = obsolete_bytes -
zcb->zcb_vd_obsolete_counts[vd->vdev_id][i];
ASSERT3U(DVA_GET_ASIZE(&vimep->vimep_dst), >=,
zcb->zcb_vd_obsolete_counts[vd->vdev_id][i]);
VERIFY0(vdev_obsolete_counts_are_precise(vd, &are_precise));
if (bytes_leaked != 0 && (are_precise || dump_opt['d'] >= 5)) {
(void) printf("obsolete indirect mapping count "
"mismatch on %llu:%llx:%llx : %llx bytes leaked\n",
(u_longlong_t)vd->vdev_id,
(u_longlong_t)DVA_MAPPING_GET_SRC_OFFSET(vimep),
(u_longlong_t)DVA_GET_ASIZE(&vimep->vimep_dst),
(u_longlong_t)bytes_leaked);
}
total_leaked += ABS(bytes_leaked);
}
VERIFY0(vdev_obsolete_counts_are_precise(vd, &are_precise));
if (!are_precise && total_leaked > 0) {
int pct_leaked = total_leaked * 100 /
vdev_indirect_mapping_bytes_mapped(vim);
(void) printf("cannot verify obsolete indirect mapping "
"counts of vdev %llu because precise feature was not "
"enabled when it was removed: %d%% (%llx bytes) of mapping"
"unreferenced\n",
(u_longlong_t)vd->vdev_id, pct_leaked,
(u_longlong_t)total_leaked);
} else if (total_leaked > 0) {
(void) printf("obsolete indirect mapping count mismatch "
"for vdev %llu -- %llx total bytes mismatched\n",
(u_longlong_t)vd->vdev_id,
(u_longlong_t)total_leaked);
leaks |= B_TRUE;
}
vdev_indirect_mapping_free_obsolete_counts(vim,
zcb->zcb_vd_obsolete_counts[vd->vdev_id]);
zcb->zcb_vd_obsolete_counts[vd->vdev_id] = NULL;
return (leaks);
}
static boolean_t
zdb_leak_fini(spa_t *spa, zdb_cb_t *zcb)
{
if (dump_opt['L'])
return (B_FALSE);
boolean_t leaks = B_FALSE;
vdev_t *rvd = spa->spa_root_vdev;
for (unsigned c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
if (zcb->zcb_vd_obsolete_counts[c] != NULL) {
leaks |= zdb_check_for_obsolete_leaks(vd, zcb);
}
for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *msp = vd->vdev_ms[m];
ASSERT3P(msp->ms_group, ==, (msp->ms_group->mg_class ==
spa_embedded_log_class(spa)) ?
vd->vdev_log_mg : vd->vdev_mg);
/*
* ms_allocatable has been overloaded
* to contain allocated segments. Now that
* we finished traversing all blocks, any
* block that remains in the ms_allocatable
* represents an allocated block that we
* did not claim during the traversal.
* Claimed blocks would have been removed
* from the ms_allocatable. For indirect
* vdevs, space remaining in the tree
* represents parts of the mapping that are
* not referenced, which is not a bug.
*/
if (vd->vdev_ops == &vdev_indirect_ops) {
range_tree_vacate(msp->ms_allocatable,
NULL, NULL);
} else {
range_tree_vacate(msp->ms_allocatable,
zdb_leak, vd);
}
if (msp->ms_loaded) {
msp->ms_loaded = B_FALSE;
}
}
}
umem_free(zcb->zcb_vd_obsolete_counts,
rvd->vdev_children * sizeof (uint32_t *));
zcb->zcb_vd_obsolete_counts = NULL;
return (leaks);
}
/* ARGSUSED */
static int
count_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
zdb_cb_t *zcb = arg;
if (dump_opt['b'] >= 5) {
char blkbuf[BP_SPRINTF_LEN];
snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
(void) printf("[%s] %s\n",
"deferred free", blkbuf);
}
zdb_count_block(zcb, NULL, bp, ZDB_OT_DEFERRED);
return (0);
}
/*
* Iterate over livelists which have been destroyed by the user but
* are still present in the MOS, waiting to be freed
*/
static void
iterate_deleted_livelists(spa_t *spa, ll_iter_t func, void *arg)
{
objset_t *mos = spa->spa_meta_objset;
uint64_t zap_obj;
int err = zap_lookup(mos, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_DELETED_CLONES, sizeof (uint64_t), 1, &zap_obj);
if (err == ENOENT)
return;
ASSERT0(err);
zap_cursor_t zc;
zap_attribute_t attr;
dsl_deadlist_t ll;
/* NULL out os prior to dsl_deadlist_open in case it's garbage */
ll.dl_os = NULL;
for (zap_cursor_init(&zc, mos, zap_obj);
zap_cursor_retrieve(&zc, &attr) == 0;
(void) zap_cursor_advance(&zc)) {
dsl_deadlist_open(&ll, mos, attr.za_first_integer);
func(&ll, arg);
dsl_deadlist_close(&ll);
}
zap_cursor_fini(&zc);
}
static int
bpobj_count_block_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
ASSERT(!bp_freed);
return (count_block_cb(arg, bp, tx));
}
static int
livelist_entry_count_blocks_cb(void *args, dsl_deadlist_entry_t *dle)
{
zdb_cb_t *zbc = args;
bplist_t blks;
bplist_create(&blks);
/* determine which blocks have been alloc'd but not freed */
VERIFY0(dsl_process_sub_livelist(&dle->dle_bpobj, &blks, NULL, NULL));
/* count those blocks */
(void) bplist_iterate(&blks, count_block_cb, zbc, NULL);
bplist_destroy(&blks);
return (0);
}
static void
livelist_count_blocks(dsl_deadlist_t *ll, void *arg)
{
dsl_deadlist_iterate(ll, livelist_entry_count_blocks_cb, arg);
}
/*
* Count the blocks in the livelists that have been destroyed by the user
* but haven't yet been freed.
*/
static void
deleted_livelists_count_blocks(spa_t *spa, zdb_cb_t *zbc)
{
iterate_deleted_livelists(spa, livelist_count_blocks, zbc);
}
static void
dump_livelist_cb(dsl_deadlist_t *ll, void *arg)
{
ASSERT3P(arg, ==, NULL);
global_feature_count[SPA_FEATURE_LIVELIST]++;
dump_blkptr_list(ll, "Deleted Livelist");
dsl_deadlist_iterate(ll, sublivelist_verify_lightweight, NULL);
}
/*
* Print out, register object references to, and increment feature counts for
* livelists that have been destroyed by the user but haven't yet been freed.
*/
static void
deleted_livelists_dump_mos(spa_t *spa)
{
uint64_t zap_obj;
objset_t *mos = spa->spa_meta_objset;
int err = zap_lookup(mos, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_DELETED_CLONES, sizeof (uint64_t), 1, &zap_obj);
if (err == ENOENT)
return;
mos_obj_refd(zap_obj);
iterate_deleted_livelists(spa, dump_livelist_cb, NULL);
}
static int
dump_block_stats(spa_t *spa)
{
zdb_cb_t zcb;
zdb_blkstats_t *zb, *tzb;
uint64_t norm_alloc, norm_space, total_alloc, total_found;
int flags = TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA |
TRAVERSE_NO_DECRYPT | TRAVERSE_HARD;
boolean_t leaks = B_FALSE;
int e, c, err;
bp_embedded_type_t i;
bzero(&zcb, sizeof (zcb));
(void) printf("\nTraversing all blocks %s%s%s%s%s...\n\n",
(dump_opt['c'] || !dump_opt['L']) ? "to verify " : "",
(dump_opt['c'] == 1) ? "metadata " : "",
dump_opt['c'] ? "checksums " : "",
(dump_opt['c'] && !dump_opt['L']) ? "and verify " : "",
!dump_opt['L'] ? "nothing leaked " : "");
/*
* When leak detection is enabled we load all space maps as SM_ALLOC
* maps, then traverse the pool claiming each block we discover. If
* the pool is perfectly consistent, the segment trees will be empty
* when we're done. Anything left over is a leak; any block we can't
* claim (because it's not part of any space map) is a double
* allocation, reference to a freed block, or an unclaimed log block.
*
* When leak detection is disabled (-L option) we still traverse the
* pool claiming each block we discover, but we skip opening any space
* maps.
*/
bzero(&zcb, sizeof (zdb_cb_t));
zdb_leak_init(spa, &zcb);
/*
* If there's a deferred-free bplist, process that first.
*/
(void) bpobj_iterate_nofree(&spa->spa_deferred_bpobj,
bpobj_count_block_cb, &zcb, NULL);
if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
(void) bpobj_iterate_nofree(&spa->spa_dsl_pool->dp_free_bpobj,
bpobj_count_block_cb, &zcb, NULL);
}
zdb_claim_removing(spa, &zcb);
if (spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) {
VERIFY3U(0, ==, bptree_iterate(spa->spa_meta_objset,
spa->spa_dsl_pool->dp_bptree_obj, B_FALSE, count_block_cb,
&zcb, NULL));
}
deleted_livelists_count_blocks(spa, &zcb);
if (dump_opt['c'] > 1)
flags |= TRAVERSE_PREFETCH_DATA;
zcb.zcb_totalasize = metaslab_class_get_alloc(spa_normal_class(spa));
zcb.zcb_totalasize += metaslab_class_get_alloc(spa_special_class(spa));
zcb.zcb_totalasize += metaslab_class_get_alloc(spa_dedup_class(spa));
zcb.zcb_totalasize +=
metaslab_class_get_alloc(spa_embedded_log_class(spa));
zcb.zcb_start = zcb.zcb_lastprint = gethrtime();
err = traverse_pool(spa, 0, flags, zdb_blkptr_cb, &zcb);
/*
* If we've traversed the data blocks then we need to wait for those
* I/Os to complete. We leverage "The Godfather" zio to wait on
* all async I/Os to complete.
*/
if (dump_opt['c']) {
for (c = 0; c < max_ncpus; c++) {
(void) zio_wait(spa->spa_async_zio_root[c]);
spa->spa_async_zio_root[c] = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_GODFATHER);
}
}
ASSERT0(spa->spa_load_verify_bytes);
/*
* Done after zio_wait() since zcb_haderrors is modified in
* zdb_blkptr_done()
*/
zcb.zcb_haderrors |= err;
if (zcb.zcb_haderrors) {
(void) printf("\nError counts:\n\n");
(void) printf("\t%5s %s\n", "errno", "count");
for (e = 0; e < 256; e++) {
if (zcb.zcb_errors[e] != 0) {
(void) printf("\t%5d %llu\n",
e, (u_longlong_t)zcb.zcb_errors[e]);
}
}
}
/*
* Report any leaked segments.
*/
leaks |= zdb_leak_fini(spa, &zcb);
tzb = &zcb.zcb_type[ZB_TOTAL][ZDB_OT_TOTAL];
norm_alloc = metaslab_class_get_alloc(spa_normal_class(spa));
norm_space = metaslab_class_get_space(spa_normal_class(spa));
total_alloc = norm_alloc +
metaslab_class_get_alloc(spa_log_class(spa)) +
metaslab_class_get_alloc(spa_embedded_log_class(spa)) +
metaslab_class_get_alloc(spa_special_class(spa)) +
metaslab_class_get_alloc(spa_dedup_class(spa)) +
get_unflushed_alloc_space(spa);
total_found = tzb->zb_asize - zcb.zcb_dedup_asize +
zcb.zcb_removing_size + zcb.zcb_checkpoint_size;
if (total_found == total_alloc && !dump_opt['L']) {
(void) printf("\n\tNo leaks (block sum matches space"
" maps exactly)\n");
} else if (!dump_opt['L']) {
(void) printf("block traversal size %llu != alloc %llu "
"(%s %lld)\n",
(u_longlong_t)total_found,
(u_longlong_t)total_alloc,
(dump_opt['L']) ? "unreachable" : "leaked",
(longlong_t)(total_alloc - total_found));
leaks = B_TRUE;
}
if (tzb->zb_count == 0)
return (2);
(void) printf("\n");
(void) printf("\t%-16s %14llu\n", "bp count:",
(u_longlong_t)tzb->zb_count);
(void) printf("\t%-16s %14llu\n", "ganged count:",
(longlong_t)tzb->zb_gangs);
(void) printf("\t%-16s %14llu avg: %6llu\n", "bp logical:",
(u_longlong_t)tzb->zb_lsize,
(u_longlong_t)(tzb->zb_lsize / tzb->zb_count));
(void) printf("\t%-16s %14llu avg: %6llu compression: %6.2f\n",
"bp physical:", (u_longlong_t)tzb->zb_psize,
(u_longlong_t)(tzb->zb_psize / tzb->zb_count),
(double)tzb->zb_lsize / tzb->zb_psize);
(void) printf("\t%-16s %14llu avg: %6llu compression: %6.2f\n",
"bp allocated:", (u_longlong_t)tzb->zb_asize,
(u_longlong_t)(tzb->zb_asize / tzb->zb_count),
(double)tzb->zb_lsize / tzb->zb_asize);
(void) printf("\t%-16s %14llu ref>1: %6llu deduplication: %6.2f\n",
"bp deduped:", (u_longlong_t)zcb.zcb_dedup_asize,
(u_longlong_t)zcb.zcb_dedup_blocks,
(double)zcb.zcb_dedup_asize / tzb->zb_asize + 1.0);
(void) printf("\t%-16s %14llu used: %5.2f%%\n", "Normal class:",
(u_longlong_t)norm_alloc, 100.0 * norm_alloc / norm_space);
if (spa_special_class(spa)->mc_allocator[0].mca_rotor != NULL) {
uint64_t alloc = metaslab_class_get_alloc(
spa_special_class(spa));
uint64_t space = metaslab_class_get_space(
spa_special_class(spa));
(void) printf("\t%-16s %14llu used: %5.2f%%\n",
"Special class", (u_longlong_t)alloc,
100.0 * alloc / space);
}
if (spa_dedup_class(spa)->mc_allocator[0].mca_rotor != NULL) {
uint64_t alloc = metaslab_class_get_alloc(
spa_dedup_class(spa));
uint64_t space = metaslab_class_get_space(
spa_dedup_class(spa));
(void) printf("\t%-16s %14llu used: %5.2f%%\n",
"Dedup class", (u_longlong_t)alloc,
100.0 * alloc / space);
}
if (spa_embedded_log_class(spa)->mc_allocator[0].mca_rotor != NULL) {
uint64_t alloc = metaslab_class_get_alloc(
spa_embedded_log_class(spa));
uint64_t space = metaslab_class_get_space(
spa_embedded_log_class(spa));
(void) printf("\t%-16s %14llu used: %5.2f%%\n",
"Embedded log class", (u_longlong_t)alloc,
100.0 * alloc / space);
}
for (i = 0; i < NUM_BP_EMBEDDED_TYPES; i++) {
if (zcb.zcb_embedded_blocks[i] == 0)
continue;
(void) printf("\n");
(void) printf("\tadditional, non-pointer bps of type %u: "
"%10llu\n",
i, (u_longlong_t)zcb.zcb_embedded_blocks[i]);
if (dump_opt['b'] >= 3) {
(void) printf("\t number of (compressed) bytes: "
"number of bps\n");
dump_histogram(zcb.zcb_embedded_histogram[i],
sizeof (zcb.zcb_embedded_histogram[i]) /
sizeof (zcb.zcb_embedded_histogram[i][0]), 0);
}
}
if (tzb->zb_ditto_samevdev != 0) {
(void) printf("\tDittoed blocks on same vdev: %llu\n",
(longlong_t)tzb->zb_ditto_samevdev);
}
if (tzb->zb_ditto_same_ms != 0) {
(void) printf("\tDittoed blocks in same metaslab: %llu\n",
(longlong_t)tzb->zb_ditto_same_ms);
}
for (uint64_t v = 0; v < spa->spa_root_vdev->vdev_children; v++) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[v];
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
if (vim == NULL) {
continue;
}
char mem[32];
zdb_nicenum(vdev_indirect_mapping_num_entries(vim),
mem, vdev_indirect_mapping_size(vim));
(void) printf("\tindirect vdev id %llu has %llu segments "
"(%s in memory)\n",
(longlong_t)vd->vdev_id,
(longlong_t)vdev_indirect_mapping_num_entries(vim), mem);
}
if (dump_opt['b'] >= 2) {
int l, t, level;
(void) printf("\nBlocks\tLSIZE\tPSIZE\tASIZE"
"\t avg\t comp\t%%Total\tType\n");
for (t = 0; t <= ZDB_OT_TOTAL; t++) {
char csize[32], lsize[32], psize[32], asize[32];
char avg[32], gang[32];
const char *typename;
/* make sure nicenum has enough space */
CTASSERT(sizeof (csize) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (lsize) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (psize) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (asize) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (avg) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (gang) >= NN_NUMBUF_SZ);
if (t < DMU_OT_NUMTYPES)
typename = dmu_ot[t].ot_name;
else
typename = zdb_ot_extname[t - DMU_OT_NUMTYPES];
if (zcb.zcb_type[ZB_TOTAL][t].zb_asize == 0) {
(void) printf("%6s\t%5s\t%5s\t%5s"
"\t%5s\t%5s\t%6s\t%s\n",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
typename);
continue;
}
for (l = ZB_TOTAL - 1; l >= -1; l--) {
level = (l == -1 ? ZB_TOTAL : l);
zb = &zcb.zcb_type[level][t];
if (zb->zb_asize == 0)
continue;
if (dump_opt['b'] < 3 && level != ZB_TOTAL)
continue;
if (level == 0 && zb->zb_asize ==
zcb.zcb_type[ZB_TOTAL][t].zb_asize)
continue;
zdb_nicenum(zb->zb_count, csize,
sizeof (csize));
zdb_nicenum(zb->zb_lsize, lsize,
sizeof (lsize));
zdb_nicenum(zb->zb_psize, psize,
sizeof (psize));
zdb_nicenum(zb->zb_asize, asize,
sizeof (asize));
zdb_nicenum(zb->zb_asize / zb->zb_count, avg,
sizeof (avg));
zdb_nicenum(zb->zb_gangs, gang, sizeof (gang));
(void) printf("%6s\t%5s\t%5s\t%5s\t%5s"
"\t%5.2f\t%6.2f\t",
csize, lsize, psize, asize, avg,
(double)zb->zb_lsize / zb->zb_psize,
100.0 * zb->zb_asize / tzb->zb_asize);
if (level == ZB_TOTAL)
(void) printf("%s\n", typename);
else
(void) printf(" L%d %s\n",
level, typename);
if (dump_opt['b'] >= 3 && zb->zb_gangs > 0) {
(void) printf("\t number of ganged "
"blocks: %s\n", gang);
}
if (dump_opt['b'] >= 4) {
(void) printf("psize "
"(in 512-byte sectors): "
"number of blocks\n");
dump_histogram(zb->zb_psize_histogram,
PSIZE_HISTO_SIZE, 0);
}
}
}
/* Output a table summarizing block sizes in the pool */
if (dump_opt['b'] >= 2) {
dump_size_histograms(&zcb);
}
}
(void) printf("\n");
if (leaks)
return (2);
if (zcb.zcb_haderrors)
return (3);
return (0);
}
typedef struct zdb_ddt_entry {
ddt_key_t zdde_key;
uint64_t zdde_ref_blocks;
uint64_t zdde_ref_lsize;
uint64_t zdde_ref_psize;
uint64_t zdde_ref_dsize;
avl_node_t zdde_node;
} zdb_ddt_entry_t;
/* ARGSUSED */
static int
zdb_ddt_add_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
{
avl_tree_t *t = arg;
avl_index_t where;
zdb_ddt_entry_t *zdde, zdde_search;
if (zb->zb_level == ZB_DNODE_LEVEL || BP_IS_HOLE(bp) ||
BP_IS_EMBEDDED(bp))
return (0);
if (dump_opt['S'] > 1 && zb->zb_level == ZB_ROOT_LEVEL) {
(void) printf("traversing objset %llu, %llu objects, "
"%lu blocks so far\n",
(u_longlong_t)zb->zb_objset,
(u_longlong_t)BP_GET_FILL(bp),
avl_numnodes(t));
}
if (BP_IS_HOLE(bp) || BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_OFF ||
BP_GET_LEVEL(bp) > 0 || DMU_OT_IS_METADATA(BP_GET_TYPE(bp)))
return (0);
ddt_key_fill(&zdde_search.zdde_key, bp);
zdde = avl_find(t, &zdde_search, &where);
if (zdde == NULL) {
zdde = umem_zalloc(sizeof (*zdde), UMEM_NOFAIL);
zdde->zdde_key = zdde_search.zdde_key;
avl_insert(t, zdde, where);
}
zdde->zdde_ref_blocks += 1;
zdde->zdde_ref_lsize += BP_GET_LSIZE(bp);
zdde->zdde_ref_psize += BP_GET_PSIZE(bp);
zdde->zdde_ref_dsize += bp_get_dsize_sync(spa, bp);
return (0);
}
static void
dump_simulated_ddt(spa_t *spa)
{
avl_tree_t t;
void *cookie = NULL;
zdb_ddt_entry_t *zdde;
ddt_histogram_t ddh_total;
ddt_stat_t dds_total;
bzero(&ddh_total, sizeof (ddh_total));
bzero(&dds_total, sizeof (dds_total));
avl_create(&t, ddt_entry_compare,
sizeof (zdb_ddt_entry_t), offsetof(zdb_ddt_entry_t, zdde_node));
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
(void) traverse_pool(spa, 0, TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA |
TRAVERSE_NO_DECRYPT, zdb_ddt_add_cb, &t);
spa_config_exit(spa, SCL_CONFIG, FTAG);
while ((zdde = avl_destroy_nodes(&t, &cookie)) != NULL) {
ddt_stat_t dds;
uint64_t refcnt = zdde->zdde_ref_blocks;
ASSERT(refcnt != 0);
dds.dds_blocks = zdde->zdde_ref_blocks / refcnt;
dds.dds_lsize = zdde->zdde_ref_lsize / refcnt;
dds.dds_psize = zdde->zdde_ref_psize / refcnt;
dds.dds_dsize = zdde->zdde_ref_dsize / refcnt;
dds.dds_ref_blocks = zdde->zdde_ref_blocks;
dds.dds_ref_lsize = zdde->zdde_ref_lsize;
dds.dds_ref_psize = zdde->zdde_ref_psize;
dds.dds_ref_dsize = zdde->zdde_ref_dsize;
ddt_stat_add(&ddh_total.ddh_stat[highbit64(refcnt) - 1],
&dds, 0);
umem_free(zdde, sizeof (*zdde));
}
avl_destroy(&t);
ddt_histogram_stat(&dds_total, &ddh_total);
(void) printf("Simulated DDT histogram:\n");
zpool_dump_ddt(&dds_total, &ddh_total);
dump_dedup_ratio(&dds_total);
}
static int
verify_device_removal_feature_counts(spa_t *spa)
{
uint64_t dr_feature_refcount = 0;
uint64_t oc_feature_refcount = 0;
uint64_t indirect_vdev_count = 0;
uint64_t precise_vdev_count = 0;
uint64_t obsolete_counts_object_count = 0;
uint64_t obsolete_sm_count = 0;
uint64_t obsolete_counts_count = 0;
uint64_t scip_count = 0;
uint64_t obsolete_bpobj_count = 0;
int ret = 0;
spa_condensing_indirect_phys_t *scip =
&spa->spa_condensing_indirect_phys;
if (scip->scip_next_mapping_object != 0) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[scip->scip_vdev];
ASSERT(scip->scip_prev_obsolete_sm_object != 0);
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
(void) printf("Condensing indirect vdev %llu: new mapping "
"object %llu, prev obsolete sm %llu\n",
(u_longlong_t)scip->scip_vdev,
(u_longlong_t)scip->scip_next_mapping_object,
(u_longlong_t)scip->scip_prev_obsolete_sm_object);
if (scip->scip_prev_obsolete_sm_object != 0) {
space_map_t *prev_obsolete_sm = NULL;
VERIFY0(space_map_open(&prev_obsolete_sm,
spa->spa_meta_objset,
scip->scip_prev_obsolete_sm_object,
0, vd->vdev_asize, 0));
dump_spacemap(spa->spa_meta_objset, prev_obsolete_sm);
(void) printf("\n");
space_map_close(prev_obsolete_sm);
}
scip_count += 2;
}
for (uint64_t i = 0; i < spa->spa_root_vdev->vdev_children; i++) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[i];
vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
if (vic->vic_mapping_object != 0) {
ASSERT(vd->vdev_ops == &vdev_indirect_ops ||
vd->vdev_removing);
indirect_vdev_count++;
if (vd->vdev_indirect_mapping->vim_havecounts) {
obsolete_counts_count++;
}
}
boolean_t are_precise;
VERIFY0(vdev_obsolete_counts_are_precise(vd, &are_precise));
if (are_precise) {
ASSERT(vic->vic_mapping_object != 0);
precise_vdev_count++;
}
uint64_t obsolete_sm_object;
VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
if (obsolete_sm_object != 0) {
ASSERT(vic->vic_mapping_object != 0);
obsolete_sm_count++;
}
}
(void) feature_get_refcount(spa,
&spa_feature_table[SPA_FEATURE_DEVICE_REMOVAL],
&dr_feature_refcount);
(void) feature_get_refcount(spa,
&spa_feature_table[SPA_FEATURE_OBSOLETE_COUNTS],
&oc_feature_refcount);
if (dr_feature_refcount != indirect_vdev_count) {
ret = 1;
(void) printf("Number of indirect vdevs (%llu) " \
"does not match feature count (%llu)\n",
(u_longlong_t)indirect_vdev_count,
(u_longlong_t)dr_feature_refcount);
} else {
(void) printf("Verified device_removal feature refcount " \
"of %llu is correct\n",
(u_longlong_t)dr_feature_refcount);
}
if (zap_contains(spa_meta_objset(spa), DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_OBSOLETE_BPOBJ) == 0) {
obsolete_bpobj_count++;
}
obsolete_counts_object_count = precise_vdev_count;
obsolete_counts_object_count += obsolete_sm_count;
obsolete_counts_object_count += obsolete_counts_count;
obsolete_counts_object_count += scip_count;
obsolete_counts_object_count += obsolete_bpobj_count;
obsolete_counts_object_count += remap_deadlist_count;
if (oc_feature_refcount != obsolete_counts_object_count) {
ret = 1;
(void) printf("Number of obsolete counts objects (%llu) " \
"does not match feature count (%llu)\n",
(u_longlong_t)obsolete_counts_object_count,
(u_longlong_t)oc_feature_refcount);
(void) printf("pv:%llu os:%llu oc:%llu sc:%llu "
"ob:%llu rd:%llu\n",
(u_longlong_t)precise_vdev_count,
(u_longlong_t)obsolete_sm_count,
(u_longlong_t)obsolete_counts_count,
(u_longlong_t)scip_count,
(u_longlong_t)obsolete_bpobj_count,
(u_longlong_t)remap_deadlist_count);
} else {
(void) printf("Verified indirect_refcount feature refcount " \
"of %llu is correct\n",
(u_longlong_t)oc_feature_refcount);
}
return (ret);
}
static void
zdb_set_skip_mmp(char *target)
{
spa_t *spa;
/*
* Disable the activity check to allow examination of
* active pools.
*/
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(target)) != NULL) {
spa->spa_import_flags |= ZFS_IMPORT_SKIP_MMP;
}
mutex_exit(&spa_namespace_lock);
}
#define BOGUS_SUFFIX "_CHECKPOINTED_UNIVERSE"
/*
* Import the checkpointed state of the pool specified by the target
* parameter as readonly. The function also accepts a pool config
* as an optional parameter, else it attempts to infer the config by
* the name of the target pool.
*
* Note that the checkpointed state's pool name will be the name of
* the original pool with the above suffix appended to it. In addition,
* if the target is not a pool name (e.g. a path to a dataset) then
* the new_path parameter is populated with the updated path to
* reflect the fact that we are looking into the checkpointed state.
*
* The function returns a newly-allocated copy of the name of the
* pool containing the checkpointed state. When this copy is no
* longer needed it should be freed with free(3C). Same thing
* applies to the new_path parameter if allocated.
*/
static char *
import_checkpointed_state(char *target, nvlist_t *cfg, char **new_path)
{
int error = 0;
char *poolname, *bogus_name = NULL;
boolean_t freecfg = B_FALSE;
/* If the target is not a pool, the extract the pool name */
char *path_start = strchr(target, '/');
if (path_start != NULL) {
size_t poolname_len = path_start - target;
poolname = strndup(target, poolname_len);
} else {
poolname = target;
}
if (cfg == NULL) {
zdb_set_skip_mmp(poolname);
error = spa_get_stats(poolname, &cfg, NULL, 0);
if (error != 0) {
fatal("Tried to read config of pool \"%s\" but "
"spa_get_stats() failed with error %d\n",
poolname, error);
}
freecfg = B_TRUE;
}
if (asprintf(&bogus_name, "%s%s", poolname, BOGUS_SUFFIX) == -1)
return (NULL);
fnvlist_add_string(cfg, ZPOOL_CONFIG_POOL_NAME, bogus_name);
error = spa_import(bogus_name, cfg, NULL,
ZFS_IMPORT_MISSING_LOG | ZFS_IMPORT_CHECKPOINT |
ZFS_IMPORT_SKIP_MMP);
if (freecfg)
nvlist_free(cfg);
if (error != 0) {
fatal("Tried to import pool \"%s\" but spa_import() failed "
"with error %d\n", bogus_name, error);
}
if (new_path != NULL && path_start != NULL) {
if (asprintf(new_path, "%s%s", bogus_name, path_start) == -1) {
if (path_start != NULL)
free(poolname);
return (NULL);
}
}
if (target != poolname)
free(poolname);
return (bogus_name);
}
typedef struct verify_checkpoint_sm_entry_cb_arg {
vdev_t *vcsec_vd;
/* the following fields are only used for printing progress */
uint64_t vcsec_entryid;
uint64_t vcsec_num_entries;
} verify_checkpoint_sm_entry_cb_arg_t;
#define ENTRIES_PER_PROGRESS_UPDATE 10000
static int
verify_checkpoint_sm_entry_cb(space_map_entry_t *sme, void *arg)
{
verify_checkpoint_sm_entry_cb_arg_t *vcsec = arg;
vdev_t *vd = vcsec->vcsec_vd;
metaslab_t *ms = vd->vdev_ms[sme->sme_offset >> vd->vdev_ms_shift];
uint64_t end = sme->sme_offset + sme->sme_run;
ASSERT(sme->sme_type == SM_FREE);
if ((vcsec->vcsec_entryid % ENTRIES_PER_PROGRESS_UPDATE) == 0) {
(void) fprintf(stderr,
"\rverifying vdev %llu, space map entry %llu of %llu ...",
(longlong_t)vd->vdev_id,
(longlong_t)vcsec->vcsec_entryid,
(longlong_t)vcsec->vcsec_num_entries);
}
vcsec->vcsec_entryid++;
/*
* See comment in checkpoint_sm_exclude_entry_cb()
*/
VERIFY3U(sme->sme_offset, >=, ms->ms_start);
VERIFY3U(end, <=, ms->ms_start + ms->ms_size);
/*
* The entries in the vdev_checkpoint_sm should be marked as
* allocated in the checkpointed state of the pool, therefore
* their respective ms_allocateable trees should not contain them.
*/
mutex_enter(&ms->ms_lock);
range_tree_verify_not_present(ms->ms_allocatable,
sme->sme_offset, sme->sme_run);
mutex_exit(&ms->ms_lock);
return (0);
}
/*
* Verify that all segments in the vdev_checkpoint_sm are allocated
* according to the checkpoint's ms_sm (i.e. are not in the checkpoint's
* ms_allocatable).
*
* Do so by comparing the checkpoint space maps (vdev_checkpoint_sm) of
* each vdev in the current state of the pool to the metaslab space maps
* (ms_sm) of the checkpointed state of the pool.
*
* Note that the function changes the state of the ms_allocatable
* trees of the current spa_t. The entries of these ms_allocatable
* trees are cleared out and then repopulated from with the free
* entries of their respective ms_sm space maps.
*/
static void
verify_checkpoint_vdev_spacemaps(spa_t *checkpoint, spa_t *current)
{
vdev_t *ckpoint_rvd = checkpoint->spa_root_vdev;
vdev_t *current_rvd = current->spa_root_vdev;
load_concrete_ms_allocatable_trees(checkpoint, SM_FREE);
for (uint64_t c = 0; c < ckpoint_rvd->vdev_children; c++) {
vdev_t *ckpoint_vd = ckpoint_rvd->vdev_child[c];
vdev_t *current_vd = current_rvd->vdev_child[c];
space_map_t *checkpoint_sm = NULL;
uint64_t checkpoint_sm_obj;
if (ckpoint_vd->vdev_ops == &vdev_indirect_ops) {
/*
* Since we don't allow device removal in a pool
* that has a checkpoint, we expect that all removed
* vdevs were removed from the pool before the
* checkpoint.
*/
ASSERT3P(current_vd->vdev_ops, ==, &vdev_indirect_ops);
continue;
}
/*
* If the checkpoint space map doesn't exist, then nothing
* here is checkpointed so there's nothing to verify.
*/
if (current_vd->vdev_top_zap == 0 ||
zap_contains(spa_meta_objset(current),
current_vd->vdev_top_zap,
VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) != 0)
continue;
VERIFY0(zap_lookup(spa_meta_objset(current),
current_vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM,
sizeof (uint64_t), 1, &checkpoint_sm_obj));
VERIFY0(space_map_open(&checkpoint_sm, spa_meta_objset(current),
checkpoint_sm_obj, 0, current_vd->vdev_asize,
current_vd->vdev_ashift));
verify_checkpoint_sm_entry_cb_arg_t vcsec;
vcsec.vcsec_vd = ckpoint_vd;
vcsec.vcsec_entryid = 0;
vcsec.vcsec_num_entries =
space_map_length(checkpoint_sm) / sizeof (uint64_t);
VERIFY0(space_map_iterate(checkpoint_sm,
space_map_length(checkpoint_sm),
verify_checkpoint_sm_entry_cb, &vcsec));
if (dump_opt['m'] > 3)
dump_spacemap(current->spa_meta_objset, checkpoint_sm);
space_map_close(checkpoint_sm);
}
/*
* If we've added vdevs since we took the checkpoint, ensure
* that their checkpoint space maps are empty.
*/
if (ckpoint_rvd->vdev_children < current_rvd->vdev_children) {
for (uint64_t c = ckpoint_rvd->vdev_children;
c < current_rvd->vdev_children; c++) {
vdev_t *current_vd = current_rvd->vdev_child[c];
VERIFY3P(current_vd->vdev_checkpoint_sm, ==, NULL);
}
}
/* for cleaner progress output */
(void) fprintf(stderr, "\n");
}
/*
* Verifies that all space that's allocated in the checkpoint is
* still allocated in the current version, by checking that everything
* in checkpoint's ms_allocatable (which is actually allocated, not
* allocatable/free) is not present in current's ms_allocatable.
*
* Note that the function changes the state of the ms_allocatable
* trees of both spas when called. The entries of all ms_allocatable
* trees are cleared out and then repopulated from their respective
* ms_sm space maps. In the checkpointed state we load the allocated
* entries, and in the current state we load the free entries.
*/
static void
verify_checkpoint_ms_spacemaps(spa_t *checkpoint, spa_t *current)
{
vdev_t *ckpoint_rvd = checkpoint->spa_root_vdev;
vdev_t *current_rvd = current->spa_root_vdev;
load_concrete_ms_allocatable_trees(checkpoint, SM_ALLOC);
load_concrete_ms_allocatable_trees(current, SM_FREE);
for (uint64_t i = 0; i < ckpoint_rvd->vdev_children; i++) {
vdev_t *ckpoint_vd = ckpoint_rvd->vdev_child[i];
vdev_t *current_vd = current_rvd->vdev_child[i];
if (ckpoint_vd->vdev_ops == &vdev_indirect_ops) {
/*
* See comment in verify_checkpoint_vdev_spacemaps()
*/
ASSERT3P(current_vd->vdev_ops, ==, &vdev_indirect_ops);
continue;
}
for (uint64_t m = 0; m < ckpoint_vd->vdev_ms_count; m++) {
metaslab_t *ckpoint_msp = ckpoint_vd->vdev_ms[m];
metaslab_t *current_msp = current_vd->vdev_ms[m];
(void) fprintf(stderr,
"\rverifying vdev %llu of %llu, "
"metaslab %llu of %llu ...",
(longlong_t)current_vd->vdev_id,
(longlong_t)current_rvd->vdev_children,
(longlong_t)current_vd->vdev_ms[m]->ms_id,
(longlong_t)current_vd->vdev_ms_count);
/*
* We walk through the ms_allocatable trees that
* are loaded with the allocated blocks from the
* ms_sm spacemaps of the checkpoint. For each
* one of these ranges we ensure that none of them
* exists in the ms_allocatable trees of the
* current state which are loaded with the ranges
* that are currently free.
*
* This way we ensure that none of the blocks that
* are part of the checkpoint were freed by mistake.
*/
range_tree_walk(ckpoint_msp->ms_allocatable,
(range_tree_func_t *)range_tree_verify_not_present,
current_msp->ms_allocatable);
}
}
/* for cleaner progress output */
(void) fprintf(stderr, "\n");
}
static void
verify_checkpoint_blocks(spa_t *spa)
{
ASSERT(!dump_opt['L']);
spa_t *checkpoint_spa;
char *checkpoint_pool;
int error = 0;
/*
* We import the checkpointed state of the pool (under a different
* name) so we can do verification on it against the current state
* of the pool.
*/
checkpoint_pool = import_checkpointed_state(spa->spa_name, NULL,
NULL);
ASSERT(strcmp(spa->spa_name, checkpoint_pool) != 0);
error = spa_open(checkpoint_pool, &checkpoint_spa, FTAG);
if (error != 0) {
fatal("Tried to open pool \"%s\" but spa_open() failed with "
"error %d\n", checkpoint_pool, error);
}
/*
* Ensure that ranges in the checkpoint space maps of each vdev
* are allocated according to the checkpointed state's metaslab
* space maps.
*/
verify_checkpoint_vdev_spacemaps(checkpoint_spa, spa);
/*
* Ensure that allocated ranges in the checkpoint's metaslab
* space maps remain allocated in the metaslab space maps of
* the current state.
*/
verify_checkpoint_ms_spacemaps(checkpoint_spa, spa);
/*
* Once we are done, we get rid of the checkpointed state.
*/
spa_close(checkpoint_spa, FTAG);
free(checkpoint_pool);
}
static void
dump_leftover_checkpoint_blocks(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t i = 0; i < rvd->vdev_children; i++) {
vdev_t *vd = rvd->vdev_child[i];
space_map_t *checkpoint_sm = NULL;
uint64_t checkpoint_sm_obj;
if (vd->vdev_top_zap == 0)
continue;
if (zap_contains(spa_meta_objset(spa), vd->vdev_top_zap,
VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) != 0)
continue;
VERIFY0(zap_lookup(spa_meta_objset(spa), vd->vdev_top_zap,
VDEV_TOP_ZAP_POOL_CHECKPOINT_SM,
sizeof (uint64_t), 1, &checkpoint_sm_obj));
VERIFY0(space_map_open(&checkpoint_sm, spa_meta_objset(spa),
checkpoint_sm_obj, 0, vd->vdev_asize, vd->vdev_ashift));
dump_spacemap(spa->spa_meta_objset, checkpoint_sm);
space_map_close(checkpoint_sm);
}
}
static int
verify_checkpoint(spa_t *spa)
{
uberblock_t checkpoint;
int error;
if (!spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT))
return (0);
error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t),
sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint);
if (error == ENOENT && !dump_opt['L']) {
/*
* If the feature is active but the uberblock is missing
* then we must be in the middle of discarding the
* checkpoint.
*/
(void) printf("\nPartially discarded checkpoint "
"state found:\n");
if (dump_opt['m'] > 3)
dump_leftover_checkpoint_blocks(spa);
return (0);
} else if (error != 0) {
(void) printf("lookup error %d when looking for "
"checkpointed uberblock in MOS\n", error);
return (error);
}
dump_uberblock(&checkpoint, "\nCheckpointed uberblock found:\n", "\n");
if (checkpoint.ub_checkpoint_txg == 0) {
(void) printf("\nub_checkpoint_txg not set in checkpointed "
"uberblock\n");
error = 3;
}
if (error == 0 && !dump_opt['L'])
verify_checkpoint_blocks(spa);
return (error);
}
/* ARGSUSED */
static void
mos_leaks_cb(void *arg, uint64_t start, uint64_t size)
{
for (uint64_t i = start; i < size; i++) {
(void) printf("MOS object %llu referenced but not allocated\n",
(u_longlong_t)i);
}
}
static void
mos_obj_refd(uint64_t obj)
{
if (obj != 0 && mos_refd_objs != NULL)
range_tree_add(mos_refd_objs, obj, 1);
}
/*
* Call on a MOS object that may already have been referenced.
*/
static void
mos_obj_refd_multiple(uint64_t obj)
{
if (obj != 0 && mos_refd_objs != NULL &&
!range_tree_contains(mos_refd_objs, obj, 1))
range_tree_add(mos_refd_objs, obj, 1);
}
static void
mos_leak_vdev_top_zap(vdev_t *vd)
{
uint64_t ms_flush_data_obj;
int error = zap_lookup(spa_meta_objset(vd->vdev_spa),
vd->vdev_top_zap, VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS,
sizeof (ms_flush_data_obj), 1, &ms_flush_data_obj);
if (error == ENOENT)
return;
ASSERT0(error);
mos_obj_refd(ms_flush_data_obj);
}
static void
mos_leak_vdev(vdev_t *vd)
{
mos_obj_refd(vd->vdev_dtl_object);
mos_obj_refd(vd->vdev_ms_array);
mos_obj_refd(vd->vdev_indirect_config.vic_births_object);
mos_obj_refd(vd->vdev_indirect_config.vic_mapping_object);
mos_obj_refd(vd->vdev_leaf_zap);
if (vd->vdev_checkpoint_sm != NULL)
mos_obj_refd(vd->vdev_checkpoint_sm->sm_object);
if (vd->vdev_indirect_mapping != NULL) {
mos_obj_refd(vd->vdev_indirect_mapping->
vim_phys->vimp_counts_object);
}
if (vd->vdev_obsolete_sm != NULL)
mos_obj_refd(vd->vdev_obsolete_sm->sm_object);
for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *ms = vd->vdev_ms[m];
mos_obj_refd(space_map_object(ms->ms_sm));
}
if (vd->vdev_top_zap != 0) {
mos_obj_refd(vd->vdev_top_zap);
mos_leak_vdev_top_zap(vd);
}
for (uint64_t c = 0; c < vd->vdev_children; c++) {
mos_leak_vdev(vd->vdev_child[c]);
}
}
static void
mos_leak_log_spacemaps(spa_t *spa)
{
uint64_t spacemap_zap;
int error = zap_lookup(spa_meta_objset(spa),
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_LOG_SPACEMAP_ZAP,
sizeof (spacemap_zap), 1, &spacemap_zap);
if (error == ENOENT)
return;
ASSERT0(error);
mos_obj_refd(spacemap_zap);
for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls))
mos_obj_refd(sls->sls_sm_obj);
}
static int
dump_mos_leaks(spa_t *spa)
{
int rv = 0;
objset_t *mos = spa->spa_meta_objset;
dsl_pool_t *dp = spa->spa_dsl_pool;
/* Visit and mark all referenced objects in the MOS */
mos_obj_refd(DMU_POOL_DIRECTORY_OBJECT);
mos_obj_refd(spa->spa_pool_props_object);
mos_obj_refd(spa->spa_config_object);
mos_obj_refd(spa->spa_ddt_stat_object);
mos_obj_refd(spa->spa_feat_desc_obj);
mos_obj_refd(spa->spa_feat_enabled_txg_obj);
mos_obj_refd(spa->spa_feat_for_read_obj);
mos_obj_refd(spa->spa_feat_for_write_obj);
mos_obj_refd(spa->spa_history);
mos_obj_refd(spa->spa_errlog_last);
mos_obj_refd(spa->spa_errlog_scrub);
mos_obj_refd(spa->spa_all_vdev_zaps);
mos_obj_refd(spa->spa_dsl_pool->dp_bptree_obj);
mos_obj_refd(spa->spa_dsl_pool->dp_tmp_userrefs_obj);
mos_obj_refd(spa->spa_dsl_pool->dp_scan->scn_phys.scn_queue_obj);
bpobj_count_refd(&spa->spa_deferred_bpobj);
mos_obj_refd(dp->dp_empty_bpobj);
bpobj_count_refd(&dp->dp_obsolete_bpobj);
bpobj_count_refd(&dp->dp_free_bpobj);
mos_obj_refd(spa->spa_l2cache.sav_object);
mos_obj_refd(spa->spa_spares.sav_object);
if (spa->spa_syncing_log_sm != NULL)
mos_obj_refd(spa->spa_syncing_log_sm->sm_object);
mos_leak_log_spacemaps(spa);
mos_obj_refd(spa->spa_condensing_indirect_phys.
scip_next_mapping_object);
mos_obj_refd(spa->spa_condensing_indirect_phys.
scip_prev_obsolete_sm_object);
if (spa->spa_condensing_indirect_phys.scip_next_mapping_object != 0) {
vdev_indirect_mapping_t *vim =
vdev_indirect_mapping_open(mos,
spa->spa_condensing_indirect_phys.scip_next_mapping_object);
mos_obj_refd(vim->vim_phys->vimp_counts_object);
vdev_indirect_mapping_close(vim);
}
deleted_livelists_dump_mos(spa);
if (dp->dp_origin_snap != NULL) {
dsl_dataset_t *ds;
dsl_pool_config_enter(dp, FTAG);
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dataset_phys(dp->dp_origin_snap)->ds_next_snap_obj,
FTAG, &ds));
count_ds_mos_objects(ds);
dump_blkptr_list(&ds->ds_deadlist, "Deadlist");
dsl_dataset_rele(ds, FTAG);
dsl_pool_config_exit(dp, FTAG);
count_ds_mos_objects(dp->dp_origin_snap);
dump_blkptr_list(&dp->dp_origin_snap->ds_deadlist, "Deadlist");
}
count_dir_mos_objects(dp->dp_mos_dir);
if (dp->dp_free_dir != NULL)
count_dir_mos_objects(dp->dp_free_dir);
if (dp->dp_leak_dir != NULL)
count_dir_mos_objects(dp->dp_leak_dir);
mos_leak_vdev(spa->spa_root_vdev);
for (uint64_t class = 0; class < DDT_CLASSES; class++) {
for (uint64_t type = 0; type < DDT_TYPES; type++) {
for (uint64_t cksum = 0;
cksum < ZIO_CHECKSUM_FUNCTIONS; cksum++) {
ddt_t *ddt = spa->spa_ddt[cksum];
mos_obj_refd(ddt->ddt_object[type][class]);
}
}
}
/*
* Visit all allocated objects and make sure they are referenced.
*/
uint64_t object = 0;
while (dmu_object_next(mos, &object, B_FALSE, 0) == 0) {
if (range_tree_contains(mos_refd_objs, object, 1)) {
range_tree_remove(mos_refd_objs, object, 1);
} else {
dmu_object_info_t doi;
const char *name;
dmu_object_info(mos, object, &doi);
if (doi.doi_type & DMU_OT_NEWTYPE) {
dmu_object_byteswap_t bswap =
DMU_OT_BYTESWAP(doi.doi_type);
name = dmu_ot_byteswap[bswap].ob_name;
} else {
name = dmu_ot[doi.doi_type].ot_name;
}
(void) printf("MOS object %llu (%s) leaked\n",
(u_longlong_t)object, name);
rv = 2;
}
}
(void) range_tree_walk(mos_refd_objs, mos_leaks_cb, NULL);
if (!range_tree_is_empty(mos_refd_objs))
rv = 2;
range_tree_vacate(mos_refd_objs, NULL, NULL);
range_tree_destroy(mos_refd_objs);
return (rv);
}
typedef struct log_sm_obsolete_stats_arg {
uint64_t lsos_current_txg;
uint64_t lsos_total_entries;
uint64_t lsos_valid_entries;
uint64_t lsos_sm_entries;
uint64_t lsos_valid_sm_entries;
} log_sm_obsolete_stats_arg_t;
static int
log_spacemap_obsolete_stats_cb(spa_t *spa, space_map_entry_t *sme,
uint64_t txg, void *arg)
{
log_sm_obsolete_stats_arg_t *lsos = arg;
uint64_t offset = sme->sme_offset;
uint64_t vdev_id = sme->sme_vdev;
if (lsos->lsos_current_txg == 0) {
/* this is the first log */
lsos->lsos_current_txg = txg;
} else if (lsos->lsos_current_txg < txg) {
/* we just changed log - print stats and reset */
(void) printf("%-8llu valid entries out of %-8llu - txg %llu\n",
(u_longlong_t)lsos->lsos_valid_sm_entries,
(u_longlong_t)lsos->lsos_sm_entries,
(u_longlong_t)lsos->lsos_current_txg);
lsos->lsos_valid_sm_entries = 0;
lsos->lsos_sm_entries = 0;
lsos->lsos_current_txg = txg;
}
ASSERT3U(lsos->lsos_current_txg, ==, txg);
lsos->lsos_sm_entries++;
lsos->lsos_total_entries++;
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
if (!vdev_is_concrete(vd))
return (0);
metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE);
if (txg < metaslab_unflushed_txg(ms))
return (0);
lsos->lsos_valid_sm_entries++;
lsos->lsos_valid_entries++;
return (0);
}
static void
dump_log_spacemap_obsolete_stats(spa_t *spa)
{
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
return;
log_sm_obsolete_stats_arg_t lsos;
bzero(&lsos, sizeof (lsos));
(void) printf("Log Space Map Obsolete Entry Statistics:\n");
iterate_through_spacemap_logs(spa,
log_spacemap_obsolete_stats_cb, &lsos);
/* print stats for latest log */
(void) printf("%-8llu valid entries out of %-8llu - txg %llu\n",
(u_longlong_t)lsos.lsos_valid_sm_entries,
(u_longlong_t)lsos.lsos_sm_entries,
(u_longlong_t)lsos.lsos_current_txg);
(void) printf("%-8llu valid entries out of %-8llu - total\n\n",
(u_longlong_t)lsos.lsos_valid_entries,
(u_longlong_t)lsos.lsos_total_entries);
}
static void
dump_zpool(spa_t *spa)
{
dsl_pool_t *dp = spa_get_dsl(spa);
int rc = 0;
if (dump_opt['y']) {
livelist_metaslab_validate(spa);
}
if (dump_opt['S']) {
dump_simulated_ddt(spa);
return;
}
if (!dump_opt['e'] && dump_opt['C'] > 1) {
(void) printf("\nCached configuration:\n");
dump_nvlist(spa->spa_config, 8);
}
if (dump_opt['C'])
dump_config(spa);
if (dump_opt['u'])
dump_uberblock(&spa->spa_uberblock, "\nUberblock:\n", "\n");
if (dump_opt['D'])
dump_all_ddts(spa);
if (dump_opt['d'] > 2 || dump_opt['m'])
dump_metaslabs(spa);
if (dump_opt['M'])
dump_metaslab_groups(spa);
if (dump_opt['d'] > 2 || dump_opt['m']) {
dump_log_spacemaps(spa);
dump_log_spacemap_obsolete_stats(spa);
}
if (dump_opt['d'] || dump_opt['i']) {
spa_feature_t f;
mos_refd_objs = range_tree_create(NULL, RANGE_SEG64, NULL, 0,
0);
dump_objset(dp->dp_meta_objset);
if (dump_opt['d'] >= 3) {
dsl_pool_t *dp = spa->spa_dsl_pool;
dump_full_bpobj(&spa->spa_deferred_bpobj,
"Deferred frees", 0);
if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
dump_full_bpobj(&dp->dp_free_bpobj,
"Pool snapshot frees", 0);
}
if (bpobj_is_open(&dp->dp_obsolete_bpobj)) {
ASSERT(spa_feature_is_enabled(spa,
SPA_FEATURE_DEVICE_REMOVAL));
dump_full_bpobj(&dp->dp_obsolete_bpobj,
"Pool obsolete blocks", 0);
}
if (spa_feature_is_active(spa,
SPA_FEATURE_ASYNC_DESTROY)) {
dump_bptree(spa->spa_meta_objset,
dp->dp_bptree_obj,
"Pool dataset frees");
}
dump_dtl(spa->spa_root_vdev, 0);
}
for (spa_feature_t f = 0; f < SPA_FEATURES; f++)
global_feature_count[f] = UINT64_MAX;
global_feature_count[SPA_FEATURE_REDACTION_BOOKMARKS] = 0;
global_feature_count[SPA_FEATURE_BOOKMARK_WRITTEN] = 0;
global_feature_count[SPA_FEATURE_LIVELIST] = 0;
(void) dmu_objset_find(spa_name(spa), dump_one_objset,
NULL, DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN);
if (rc == 0 && !dump_opt['L'])
rc = dump_mos_leaks(spa);
for (f = 0; f < SPA_FEATURES; f++) {
uint64_t refcount;
uint64_t *arr;
if (!(spa_feature_table[f].fi_flags &
ZFEATURE_FLAG_PER_DATASET)) {
if (global_feature_count[f] == UINT64_MAX)
continue;
if (!spa_feature_is_enabled(spa, f)) {
ASSERT0(global_feature_count[f]);
continue;
}
arr = global_feature_count;
} else {
if (!spa_feature_is_enabled(spa, f)) {
ASSERT0(dataset_feature_count[f]);
continue;
}
arr = dataset_feature_count;
}
if (feature_get_refcount(spa, &spa_feature_table[f],
&refcount) == ENOTSUP)
continue;
if (arr[f] != refcount) {
(void) printf("%s feature refcount mismatch: "
"%lld consumers != %lld refcount\n",
spa_feature_table[f].fi_uname,
(longlong_t)arr[f], (longlong_t)refcount);
rc = 2;
} else {
(void) printf("Verified %s feature refcount "
"of %llu is correct\n",
spa_feature_table[f].fi_uname,
(longlong_t)refcount);
}
}
if (rc == 0)
rc = verify_device_removal_feature_counts(spa);
}
if (rc == 0 && (dump_opt['b'] || dump_opt['c']))
rc = dump_block_stats(spa);
if (rc == 0)
rc = verify_spacemap_refcounts(spa);
if (dump_opt['s'])
show_pool_stats(spa);
if (dump_opt['h'])
dump_history(spa);
if (rc == 0)
rc = verify_checkpoint(spa);
if (rc != 0) {
dump_debug_buffer();
exit(rc);
}
}
#define ZDB_FLAG_CHECKSUM 0x0001
#define ZDB_FLAG_DECOMPRESS 0x0002
#define ZDB_FLAG_BSWAP 0x0004
#define ZDB_FLAG_GBH 0x0008
#define ZDB_FLAG_INDIRECT 0x0010
#define ZDB_FLAG_RAW 0x0020
#define ZDB_FLAG_PRINT_BLKPTR 0x0040
#define ZDB_FLAG_VERBOSE 0x0080
static int flagbits[256];
static char flagbitstr[16];
static void
zdb_print_blkptr(const blkptr_t *bp, int flags)
{
char blkbuf[BP_SPRINTF_LEN];
if (flags & ZDB_FLAG_BSWAP)
byteswap_uint64_array((void *)bp, sizeof (blkptr_t));
snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
(void) printf("%s\n", blkbuf);
}
static void
zdb_dump_indirect(blkptr_t *bp, int nbps, int flags)
{
int i;
for (i = 0; i < nbps; i++)
zdb_print_blkptr(&bp[i], flags);
}
static void
zdb_dump_gbh(void *buf, int flags)
{
zdb_dump_indirect((blkptr_t *)buf, SPA_GBH_NBLKPTRS, flags);
}
static void
zdb_dump_block_raw(void *buf, uint64_t size, int flags)
{
if (flags & ZDB_FLAG_BSWAP)
byteswap_uint64_array(buf, size);
VERIFY(write(fileno(stdout), buf, size) == size);
}
static void
zdb_dump_block(char *label, void *buf, uint64_t size, int flags)
{
uint64_t *d = (uint64_t *)buf;
unsigned nwords = size / sizeof (uint64_t);
int do_bswap = !!(flags & ZDB_FLAG_BSWAP);
unsigned i, j;
const char *hdr;
char *c;
if (do_bswap)
hdr = " 7 6 5 4 3 2 1 0 f e d c b a 9 8";
else
hdr = " 0 1 2 3 4 5 6 7 8 9 a b c d e f";
(void) printf("\n%s\n%6s %s 0123456789abcdef\n", label, "", hdr);
#ifdef _LITTLE_ENDIAN
/* correct the endianness */
do_bswap = !do_bswap;
#endif
for (i = 0; i < nwords; i += 2) {
(void) printf("%06llx: %016llx %016llx ",
(u_longlong_t)(i * sizeof (uint64_t)),
(u_longlong_t)(do_bswap ? BSWAP_64(d[i]) : d[i]),
(u_longlong_t)(do_bswap ? BSWAP_64(d[i + 1]) : d[i + 1]));
c = (char *)&d[i];
for (j = 0; j < 2 * sizeof (uint64_t); j++)
(void) printf("%c", isprint(c[j]) ? c[j] : '.');
(void) printf("\n");
}
}
/*
* There are two acceptable formats:
* leaf_name - For example: c1t0d0 or /tmp/ztest.0a
* child[.child]* - For example: 0.1.1
*
* The second form can be used to specify arbitrary vdevs anywhere
* in the hierarchy. For example, in a pool with a mirror of
* RAID-Zs, you can specify either RAID-Z vdev with 0.0 or 0.1 .
*/
static vdev_t *
zdb_vdev_lookup(vdev_t *vdev, const char *path)
{
char *s, *p, *q;
unsigned i;
if (vdev == NULL)
return (NULL);
/* First, assume the x.x.x.x format */
i = strtoul(path, &s, 10);
if (s == path || (s && *s != '.' && *s != '\0'))
goto name;
if (i >= vdev->vdev_children)
return (NULL);
vdev = vdev->vdev_child[i];
if (s && *s == '\0')
return (vdev);
return (zdb_vdev_lookup(vdev, s+1));
name:
for (i = 0; i < vdev->vdev_children; i++) {
vdev_t *vc = vdev->vdev_child[i];
if (vc->vdev_path == NULL) {
vc = zdb_vdev_lookup(vc, path);
if (vc == NULL)
continue;
else
return (vc);
}
p = strrchr(vc->vdev_path, '/');
p = p ? p + 1 : vc->vdev_path;
q = &vc->vdev_path[strlen(vc->vdev_path) - 2];
if (strcmp(vc->vdev_path, path) == 0)
return (vc);
if (strcmp(p, path) == 0)
return (vc);
if (strcmp(q, "s0") == 0 && strncmp(p, path, q - p) == 0)
return (vc);
}
return (NULL);
}
static int
name_from_objset_id(spa_t *spa, uint64_t objset_id, char *outstr)
{
dsl_dataset_t *ds;
dsl_pool_config_enter(spa->spa_dsl_pool, FTAG);
int error = dsl_dataset_hold_obj(spa->spa_dsl_pool, objset_id,
NULL, &ds);
if (error != 0) {
(void) fprintf(stderr, "failed to hold objset %llu: %s\n",
(u_longlong_t)objset_id, strerror(error));
dsl_pool_config_exit(spa->spa_dsl_pool, FTAG);
return (error);
}
dsl_dataset_name(ds, outstr);
dsl_dataset_rele(ds, NULL);
dsl_pool_config_exit(spa->spa_dsl_pool, FTAG);
return (0);
}
static boolean_t
zdb_parse_block_sizes(char *sizes, uint64_t *lsize, uint64_t *psize)
{
char *s0, *s1, *tmp = NULL;
if (sizes == NULL)
return (B_FALSE);
s0 = strtok_r(sizes, "/", &tmp);
if (s0 == NULL)
return (B_FALSE);
s1 = strtok_r(NULL, "/", &tmp);
*lsize = strtoull(s0, NULL, 16);
*psize = s1 ? strtoull(s1, NULL, 16) : *lsize;
return (*lsize >= *psize && *psize > 0);
}
#define ZIO_COMPRESS_MASK(alg) (1ULL << (ZIO_COMPRESS_##alg))
static boolean_t
zdb_decompress_block(abd_t *pabd, void *buf, void *lbuf, uint64_t lsize,
uint64_t psize, int flags)
{
boolean_t exceeded = B_FALSE;
/*
* We don't know how the data was compressed, so just try
* every decompress function at every inflated blocksize.
*/
void *lbuf2 = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
int cfuncs[ZIO_COMPRESS_FUNCTIONS] = { 0 };
int *cfuncp = cfuncs;
uint64_t maxlsize = SPA_MAXBLOCKSIZE;
uint64_t mask = ZIO_COMPRESS_MASK(ON) | ZIO_COMPRESS_MASK(OFF) |
ZIO_COMPRESS_MASK(INHERIT) | ZIO_COMPRESS_MASK(EMPTY) |
(getenv("ZDB_NO_ZLE") ? ZIO_COMPRESS_MASK(ZLE) : 0);
*cfuncp++ = ZIO_COMPRESS_LZ4;
*cfuncp++ = ZIO_COMPRESS_LZJB;
mask |= ZIO_COMPRESS_MASK(LZ4) | ZIO_COMPRESS_MASK(LZJB);
for (int c = 0; c < ZIO_COMPRESS_FUNCTIONS; c++)
if (((1ULL << c) & mask) == 0)
*cfuncp++ = c;
/*
* On the one hand, with SPA_MAXBLOCKSIZE at 16MB, this
* could take a while and we should let the user know
* we are not stuck. On the other hand, printing progress
* info gets old after a while. User can specify 'v' flag
* to see the progression.
*/
if (lsize == psize)
lsize += SPA_MINBLOCKSIZE;
else
maxlsize = lsize;
for (; lsize <= maxlsize; lsize += SPA_MINBLOCKSIZE) {
for (cfuncp = cfuncs; *cfuncp; cfuncp++) {
if (flags & ZDB_FLAG_VERBOSE) {
(void) fprintf(stderr,
"Trying %05llx -> %05llx (%s)\n",
(u_longlong_t)psize,
(u_longlong_t)lsize,
zio_compress_table[*cfuncp].\
ci_name);
}
/*
* We randomize lbuf2, and decompress to both
* lbuf and lbuf2. This way, we will know if
* decompression fill exactly to lsize.
*/
VERIFY0(random_get_pseudo_bytes(lbuf2, lsize));
if (zio_decompress_data(*cfuncp, pabd,
lbuf, psize, lsize, NULL) == 0 &&
zio_decompress_data(*cfuncp, pabd,
lbuf2, psize, lsize, NULL) == 0 &&
bcmp(lbuf, lbuf2, lsize) == 0)
break;
}
if (*cfuncp != 0)
break;
}
umem_free(lbuf2, SPA_MAXBLOCKSIZE);
if (lsize > maxlsize) {
exceeded = B_TRUE;
}
if (*cfuncp == ZIO_COMPRESS_ZLE) {
printf("\nZLE decompression was selected. If you "
"suspect the results are wrong,\ntry avoiding ZLE "
"by setting and exporting ZDB_NO_ZLE=\"true\"\n");
}
return (exceeded);
}
/*
* Read a block from a pool and print it out. The syntax of the
* block descriptor is:
*
* pool:vdev_specifier:offset:[lsize/]psize[:flags]
*
* pool - The name of the pool you wish to read from
* vdev_specifier - Which vdev (see comment for zdb_vdev_lookup)
* offset - offset, in hex, in bytes
* size - Amount of data to read, in hex, in bytes
* flags - A string of characters specifying options
* b: Decode a blkptr at given offset within block
* c: Calculate and display checksums
* d: Decompress data before dumping
* e: Byteswap data before dumping
* g: Display data as a gang block header
* i: Display as an indirect block
* r: Dump raw data to stdout
* v: Verbose
*
*/
static void
zdb_read_block(char *thing, spa_t *spa)
{
blkptr_t blk, *bp = &blk;
dva_t *dva = bp->blk_dva;
int flags = 0;
uint64_t offset = 0, psize = 0, lsize = 0, blkptr_offset = 0;
zio_t *zio;
vdev_t *vd;
abd_t *pabd;
void *lbuf, *buf;
char *s, *p, *dup, *vdev, *flagstr, *sizes, *tmp = NULL;
int i, error;
boolean_t borrowed = B_FALSE, found = B_FALSE;
dup = strdup(thing);
s = strtok_r(dup, ":", &tmp);
vdev = s ? s : "";
s = strtok_r(NULL, ":", &tmp);
offset = strtoull(s ? s : "", NULL, 16);
sizes = strtok_r(NULL, ":", &tmp);
s = strtok_r(NULL, ":", &tmp);
flagstr = strdup(s ? s : "");
s = NULL;
tmp = NULL;
if (!zdb_parse_block_sizes(sizes, &lsize, &psize))
s = "invalid size(s)";
if (!IS_P2ALIGNED(psize, DEV_BSIZE) || !IS_P2ALIGNED(lsize, DEV_BSIZE))
s = "size must be a multiple of sector size";
if (!IS_P2ALIGNED(offset, DEV_BSIZE))
s = "offset must be a multiple of sector size";
if (s) {
(void) printf("Invalid block specifier: %s - %s\n", thing, s);
goto done;
}
for (s = strtok_r(flagstr, ":", &tmp);
s != NULL;
s = strtok_r(NULL, ":", &tmp)) {
for (i = 0; i < strlen(flagstr); i++) {
int bit = flagbits[(uchar_t)flagstr[i]];
if (bit == 0) {
(void) printf("***Ignoring flag: %c\n",
(uchar_t)flagstr[i]);
continue;
}
found = B_TRUE;
flags |= bit;
p = &flagstr[i + 1];
if (*p != ':' && *p != '\0') {
int j = 0, nextbit = flagbits[(uchar_t)*p];
char *end, offstr[8] = { 0 };
if ((bit == ZDB_FLAG_PRINT_BLKPTR) &&
(nextbit == 0)) {
/* look ahead to isolate the offset */
while (nextbit == 0 &&
strchr(flagbitstr, *p) == NULL) {
offstr[j] = *p;
j++;
if (i + j > strlen(flagstr))
break;
p++;
nextbit = flagbits[(uchar_t)*p];
}
blkptr_offset = strtoull(offstr, &end,
16);
i += j;
} else if (nextbit == 0) {
(void) printf("***Ignoring flag arg:"
" '%c'\n", (uchar_t)*p);
}
}
}
}
if (blkptr_offset % sizeof (blkptr_t)) {
printf("Block pointer offset 0x%llx "
"must be divisible by 0x%x\n",
(longlong_t)blkptr_offset, (int)sizeof (blkptr_t));
goto done;
}
if (found == B_FALSE && strlen(flagstr) > 0) {
printf("Invalid flag arg: '%s'\n", flagstr);
goto done;
}
vd = zdb_vdev_lookup(spa->spa_root_vdev, vdev);
if (vd == NULL) {
(void) printf("***Invalid vdev: %s\n", vdev);
free(dup);
return;
} else {
if (vd->vdev_path)
(void) fprintf(stderr, "Found vdev: %s\n",
vd->vdev_path);
else
(void) fprintf(stderr, "Found vdev type: %s\n",
vd->vdev_ops->vdev_op_type);
}
pabd = abd_alloc_for_io(SPA_MAXBLOCKSIZE, B_FALSE);
lbuf = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
BP_ZERO(bp);
DVA_SET_VDEV(&dva[0], vd->vdev_id);
DVA_SET_OFFSET(&dva[0], offset);
DVA_SET_GANG(&dva[0], !!(flags & ZDB_FLAG_GBH));
DVA_SET_ASIZE(&dva[0], vdev_psize_to_asize(vd, psize));
BP_SET_BIRTH(bp, TXG_INITIAL, TXG_INITIAL);
BP_SET_LSIZE(bp, lsize);
BP_SET_PSIZE(bp, psize);
BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF);
BP_SET_CHECKSUM(bp, ZIO_CHECKSUM_OFF);
BP_SET_TYPE(bp, DMU_OT_NONE);
BP_SET_LEVEL(bp, 0);
BP_SET_DEDUP(bp, 0);
BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
zio = zio_root(spa, NULL, NULL, 0);
if (vd == vd->vdev_top) {
/*
* Treat this as a normal block read.
*/
zio_nowait(zio_read(zio, spa, bp, pabd, psize, NULL, NULL,
ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW, NULL));
} else {
/*
* Treat this as a vdev child I/O.
*/
zio_nowait(zio_vdev_child_io(zio, bp, vd, offset, pabd,
psize, ZIO_TYPE_READ, ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_PROPAGATE |
ZIO_FLAG_DONT_RETRY | ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW |
ZIO_FLAG_OPTIONAL, NULL, NULL));
}
error = zio_wait(zio);
spa_config_exit(spa, SCL_STATE, FTAG);
if (error) {
(void) printf("Read of %s failed, error: %d\n", thing, error);
goto out;
}
uint64_t orig_lsize = lsize;
buf = lbuf;
if (flags & ZDB_FLAG_DECOMPRESS) {
boolean_t failed = zdb_decompress_block(pabd, buf, lbuf,
lsize, psize, flags);
if (failed) {
(void) printf("Decompress of %s failed\n", thing);
goto out;
}
} else {
buf = abd_borrow_buf_copy(pabd, lsize);
borrowed = B_TRUE;
}
/*
* Try to detect invalid block pointer. If invalid, try
* decompressing.
*/
if ((flags & ZDB_FLAG_PRINT_BLKPTR || flags & ZDB_FLAG_INDIRECT) &&
!(flags & ZDB_FLAG_DECOMPRESS)) {
const blkptr_t *b = (const blkptr_t *)(void *)
((uintptr_t)buf + (uintptr_t)blkptr_offset);
if (zfs_blkptr_verify(spa, b, B_FALSE, BLK_VERIFY_ONLY) ==
B_FALSE) {
abd_return_buf_copy(pabd, buf, lsize);
borrowed = B_FALSE;
buf = lbuf;
boolean_t failed = zdb_decompress_block(pabd, buf,
lbuf, lsize, psize, flags);
b = (const blkptr_t *)(void *)
((uintptr_t)buf + (uintptr_t)blkptr_offset);
if (failed || zfs_blkptr_verify(spa, b, B_FALSE,
BLK_VERIFY_LOG) == B_FALSE) {
printf("invalid block pointer at this DVA\n");
goto out;
}
}
}
if (flags & ZDB_FLAG_PRINT_BLKPTR)
zdb_print_blkptr((blkptr_t *)(void *)
((uintptr_t)buf + (uintptr_t)blkptr_offset), flags);
else if (flags & ZDB_FLAG_RAW)
zdb_dump_block_raw(buf, lsize, flags);
else if (flags & ZDB_FLAG_INDIRECT)
zdb_dump_indirect((blkptr_t *)buf,
orig_lsize / sizeof (blkptr_t), flags);
else if (flags & ZDB_FLAG_GBH)
zdb_dump_gbh(buf, flags);
else
zdb_dump_block(thing, buf, lsize, flags);
/*
* If :c was specified, iterate through the checksum table to
* calculate and display each checksum for our specified
* DVA and length.
*/
if ((flags & ZDB_FLAG_CHECKSUM) && !(flags & ZDB_FLAG_RAW) &&
!(flags & ZDB_FLAG_GBH)) {
zio_t *czio;
(void) printf("\n");
for (enum zio_checksum ck = ZIO_CHECKSUM_LABEL;
ck < ZIO_CHECKSUM_FUNCTIONS; ck++) {
if ((zio_checksum_table[ck].ci_flags &
ZCHECKSUM_FLAG_EMBEDDED) ||
ck == ZIO_CHECKSUM_NOPARITY) {
continue;
}
BP_SET_CHECKSUM(bp, ck);
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
czio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
czio->io_bp = bp;
if (vd == vd->vdev_top) {
zio_nowait(zio_read(czio, spa, bp, pabd, psize,
NULL, NULL,
ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW |
ZIO_FLAG_DONT_RETRY, NULL));
} else {
zio_nowait(zio_vdev_child_io(czio, bp, vd,
offset, pabd, psize, ZIO_TYPE_READ,
ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_DONT_CACHE |
ZIO_FLAG_DONT_PROPAGATE |
ZIO_FLAG_DONT_RETRY |
ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW |
ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_OPTIONAL, NULL, NULL));
}
error = zio_wait(czio);
if (error == 0 || error == ECKSUM) {
zio_t *ck_zio = zio_root(spa, NULL, NULL, 0);
ck_zio->io_offset =
DVA_GET_OFFSET(&bp->blk_dva[0]);
ck_zio->io_bp = bp;
zio_checksum_compute(ck_zio, ck, pabd, lsize);
printf("%12s\tcksum=%llx:%llx:%llx:%llx\n",
zio_checksum_table[ck].ci_name,
(u_longlong_t)bp->blk_cksum.zc_word[0],
(u_longlong_t)bp->blk_cksum.zc_word[1],
(u_longlong_t)bp->blk_cksum.zc_word[2],
(u_longlong_t)bp->blk_cksum.zc_word[3]);
zio_wait(ck_zio);
} else {
printf("error %d reading block\n", error);
}
spa_config_exit(spa, SCL_STATE, FTAG);
}
}
if (borrowed)
abd_return_buf_copy(pabd, buf, lsize);
out:
abd_free(pabd);
umem_free(lbuf, SPA_MAXBLOCKSIZE);
done:
free(flagstr);
free(dup);
}
static void
zdb_embedded_block(char *thing)
{
blkptr_t bp;
unsigned long long *words = (void *)&bp;
char *buf;
int err;
bzero(&bp, sizeof (bp));
err = sscanf(thing, "%llx:%llx:%llx:%llx:%llx:%llx:%llx:%llx:"
"%llx:%llx:%llx:%llx:%llx:%llx:%llx:%llx",
words + 0, words + 1, words + 2, words + 3,
words + 4, words + 5, words + 6, words + 7,
words + 8, words + 9, words + 10, words + 11,
words + 12, words + 13, words + 14, words + 15);
if (err != 16) {
(void) fprintf(stderr, "invalid input format\n");
exit(1);
}
ASSERT3U(BPE_GET_LSIZE(&bp), <=, SPA_MAXBLOCKSIZE);
buf = malloc(SPA_MAXBLOCKSIZE);
if (buf == NULL) {
(void) fprintf(stderr, "out of memory\n");
exit(1);
}
err = decode_embedded_bp(&bp, buf, BPE_GET_LSIZE(&bp));
if (err != 0) {
(void) fprintf(stderr, "decode failed: %u\n", err);
exit(1);
}
zdb_dump_block_raw(buf, BPE_GET_LSIZE(&bp), 0);
free(buf);
}
int
main(int argc, char **argv)
{
int c;
struct rlimit rl = { 1024, 1024 };
spa_t *spa = NULL;
objset_t *os = NULL;
int dump_all = 1;
int verbose = 0;
int error = 0;
char **searchdirs = NULL;
int nsearch = 0;
char *target, *target_pool, dsname[ZFS_MAX_DATASET_NAME_LEN];
nvlist_t *policy = NULL;
uint64_t max_txg = UINT64_MAX;
int64_t objset_id = -1;
uint64_t object;
int flags = ZFS_IMPORT_MISSING_LOG;
int rewind = ZPOOL_NEVER_REWIND;
char *spa_config_path_env, *objset_str;
boolean_t target_is_spa = B_TRUE, dataset_lookup = B_FALSE;
nvlist_t *cfg = NULL;
(void) setrlimit(RLIMIT_NOFILE, &rl);
(void) enable_extended_FILE_stdio(-1, -1);
dprintf_setup(&argc, argv);
/*
* If there is an environment variable SPA_CONFIG_PATH it overrides
* default spa_config_path setting. If -U flag is specified it will
* override this environment variable settings once again.
*/
spa_config_path_env = getenv("SPA_CONFIG_PATH");
if (spa_config_path_env != NULL)
spa_config_path = spa_config_path_env;
/*
* For performance reasons, we set this tunable down. We do so before
* the arg parsing section so that the user can override this value if
* they choose.
*/
zfs_btree_verify_intensity = 3;
while ((c = getopt(argc, argv,
"AbcCdDeEFGhiI:klLmMo:Op:PqrRsSt:uU:vVx:XYyZ")) != -1) {
switch (c) {
case 'b':
case 'c':
case 'C':
case 'd':
case 'D':
case 'E':
case 'G':
case 'h':
case 'i':
case 'l':
case 'm':
case 'M':
case 'O':
case 'r':
case 'R':
case 's':
case 'S':
case 'u':
case 'y':
case 'Z':
dump_opt[c]++;
dump_all = 0;
break;
case 'A':
case 'e':
case 'F':
case 'k':
case 'L':
case 'P':
case 'q':
case 'X':
dump_opt[c]++;
break;
case 'Y':
zfs_reconstruct_indirect_combinations_max = INT_MAX;
zfs_deadman_enabled = 0;
break;
/* NB: Sort single match options below. */
case 'I':
max_inflight_bytes = strtoull(optarg, NULL, 0);
if (max_inflight_bytes == 0) {
(void) fprintf(stderr, "maximum number "
"of inflight bytes must be greater "
"than 0\n");
usage();
}
break;
case 'o':
error = set_global_var(optarg);
if (error != 0)
usage();
break;
case 'p':
if (searchdirs == NULL) {
searchdirs = umem_alloc(sizeof (char *),
UMEM_NOFAIL);
} else {
char **tmp = umem_alloc((nsearch + 1) *
sizeof (char *), UMEM_NOFAIL);
bcopy(searchdirs, tmp, nsearch *
sizeof (char *));
umem_free(searchdirs,
nsearch * sizeof (char *));
searchdirs = tmp;
}
searchdirs[nsearch++] = optarg;
break;
case 't':
max_txg = strtoull(optarg, NULL, 0);
if (max_txg < TXG_INITIAL) {
(void) fprintf(stderr, "incorrect txg "
"specified: %s\n", optarg);
usage();
}
break;
case 'U':
spa_config_path = optarg;
if (spa_config_path[0] != '/') {
(void) fprintf(stderr,
"cachefile must be an absolute path "
"(i.e. start with a slash)\n");
usage();
}
break;
case 'v':
verbose++;
break;
case 'V':
flags = ZFS_IMPORT_VERBATIM;
break;
case 'x':
vn_dumpdir = optarg;
break;
default:
usage();
break;
}
}
if (!dump_opt['e'] && searchdirs != NULL) {
(void) fprintf(stderr, "-p option requires use of -e\n");
usage();
}
if (dump_opt['d'] || dump_opt['r']) {
/* <pool>[/<dataset | objset id> is accepted */
if (argv[2] && (objset_str = strchr(argv[2], '/')) != NULL &&
objset_str++ != NULL) {
char *endptr;
errno = 0;
objset_id = strtoull(objset_str, &endptr, 0);
/* dataset 0 is the same as opening the pool */
if (errno == 0 && endptr != objset_str &&
objset_id != 0) {
target_is_spa = B_FALSE;
dataset_lookup = B_TRUE;
} else if (objset_id != 0) {
printf("failed to open objset %s "
"%llu %s", objset_str,
(u_longlong_t)objset_id,
strerror(errno));
exit(1);
}
/* normal dataset name not an objset ID */
if (endptr == objset_str) {
objset_id = -1;
}
}
}
#if defined(_LP64)
/*
* ZDB does not typically re-read blocks; therefore limit the ARC
* to 256 MB, which can be used entirely for metadata.
*/
zfs_arc_min = zfs_arc_meta_min = 2ULL << SPA_MAXBLOCKSHIFT;
zfs_arc_max = zfs_arc_meta_limit = 256 * 1024 * 1024;
#endif
/*
* "zdb -c" uses checksum-verifying scrub i/os which are async reads.
* "zdb -b" uses traversal prefetch which uses async reads.
* For good performance, let several of them be active at once.
*/
zfs_vdev_async_read_max_active = 10;
/*
* Disable reference tracking for better performance.
*/
reference_tracking_enable = B_FALSE;
/*
* Do not fail spa_load when spa_load_verify fails. This is needed
* to load non-idle pools.
*/
spa_load_verify_dryrun = B_TRUE;
kernel_init(SPA_MODE_READ);
if (dump_all)
verbose = MAX(verbose, 1);
for (c = 0; c < 256; c++) {
if (dump_all && strchr("AeEFklLOPrRSXy", c) == NULL)
dump_opt[c] = 1;
if (dump_opt[c])
dump_opt[c] += verbose;
}
libspl_assert_ok = (dump_opt['A'] == 1) || (dump_opt['A'] > 2);
zfs_recover = (dump_opt['A'] > 1);
argc -= optind;
argv += optind;
if (argc < 2 && dump_opt['R'])
usage();
if (dump_opt['E']) {
if (argc != 1)
usage();
zdb_embedded_block(argv[0]);
return (0);
}
if (argc < 1) {
if (!dump_opt['e'] && dump_opt['C']) {
dump_cachefile(spa_config_path);
return (0);
}
usage();
}
if (dump_opt['l'])
return (dump_label(argv[0]));
if (dump_opt['O']) {
if (argc != 2)
usage();
dump_opt['v'] = verbose + 3;
return (dump_path(argv[0], argv[1], NULL));
}
if (dump_opt['r']) {
if (argc != 3)
usage();
dump_opt['v'] = verbose;
error = dump_path(argv[0], argv[1], &object);
}
if (dump_opt['X'] || dump_opt['F'])
rewind = ZPOOL_DO_REWIND |
(dump_opt['X'] ? ZPOOL_EXTREME_REWIND : 0);
if (nvlist_alloc(&policy, NV_UNIQUE_NAME_TYPE, 0) != 0 ||
nvlist_add_uint64(policy, ZPOOL_LOAD_REQUEST_TXG, max_txg) != 0 ||
nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY, rewind) != 0)
fatal("internal error: %s", strerror(ENOMEM));
error = 0;
target = argv[0];
if (strpbrk(target, "/@") != NULL) {
size_t targetlen;
target_pool = strdup(target);
*strpbrk(target_pool, "/@") = '\0';
target_is_spa = B_FALSE;
targetlen = strlen(target);
if (targetlen && target[targetlen - 1] == '/')
target[targetlen - 1] = '\0';
} else {
target_pool = target;
}
if (dump_opt['e']) {
importargs_t args = { 0 };
args.paths = nsearch;
args.path = searchdirs;
args.can_be_active = B_TRUE;
error = zpool_find_config(NULL, target_pool, &cfg, &args,
&libzpool_config_ops);
if (error == 0) {
if (nvlist_add_nvlist(cfg,
ZPOOL_LOAD_POLICY, policy) != 0) {
fatal("can't open '%s': %s",
target, strerror(ENOMEM));
}
if (dump_opt['C'] > 1) {
(void) printf("\nConfiguration for import:\n");
dump_nvlist(cfg, 8);
}
/*
* Disable the activity check to allow examination of
* active pools.
*/
error = spa_import(target_pool, cfg, NULL,
flags | ZFS_IMPORT_SKIP_MMP);
}
}
if (searchdirs != NULL) {
umem_free(searchdirs, nsearch * sizeof (char *));
searchdirs = NULL;
}
/*
* import_checkpointed_state makes the assumption that the
* target pool that we pass it is already part of the spa
* namespace. Because of that we need to make sure to call
* it always after the -e option has been processed, which
* imports the pool to the namespace if it's not in the
* cachefile.
*/
char *checkpoint_pool = NULL;
char *checkpoint_target = NULL;
if (dump_opt['k']) {
checkpoint_pool = import_checkpointed_state(target, cfg,
&checkpoint_target);
if (checkpoint_target != NULL)
target = checkpoint_target;
}
if (cfg != NULL) {
nvlist_free(cfg);
cfg = NULL;
}
if (target_pool != target)
free(target_pool);
if (error == 0) {
if (dump_opt['k'] && (target_is_spa || dump_opt['R'])) {
ASSERT(checkpoint_pool != NULL);
ASSERT(checkpoint_target == NULL);
error = spa_open(checkpoint_pool, &spa, FTAG);
if (error != 0) {
fatal("Tried to open pool \"%s\" but "
"spa_open() failed with error %d\n",
checkpoint_pool, error);
}
} else if (target_is_spa || dump_opt['R'] || objset_id == 0) {
zdb_set_skip_mmp(target);
error = spa_open_rewind(target, &spa, FTAG, policy,
NULL);
if (error) {
/*
* If we're missing the log device then
* try opening the pool after clearing the
* log state.
*/
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(target)) != NULL &&
spa->spa_log_state == SPA_LOG_MISSING) {
spa->spa_log_state = SPA_LOG_CLEAR;
error = 0;
}
mutex_exit(&spa_namespace_lock);
if (!error) {
error = spa_open_rewind(target, &spa,
FTAG, policy, NULL);
}
}
} else if (strpbrk(target, "#") != NULL) {
dsl_pool_t *dp;
error = dsl_pool_hold(target, FTAG, &dp);
if (error != 0) {
fatal("can't dump '%s': %s", target,
strerror(error));
}
error = dump_bookmark(dp, target, B_TRUE, verbose > 1);
dsl_pool_rele(dp, FTAG);
if (error != 0) {
fatal("can't dump '%s': %s", target,
strerror(error));
}
return (error);
} else {
zdb_set_skip_mmp(target);
if (dataset_lookup == B_TRUE) {
/*
* Use the supplied id to get the name
* for open_objset.
*/
error = spa_open(target, &spa, FTAG);
if (error == 0) {
error = name_from_objset_id(spa,
objset_id, dsname);
spa_close(spa, FTAG);
if (error == 0)
target = dsname;
}
}
if (error == 0)
error = open_objset(target, FTAG, &os);
if (error == 0)
spa = dmu_objset_spa(os);
}
}
nvlist_free(policy);
if (error)
fatal("can't open '%s': %s", target, strerror(error));
/*
* Set the pool failure mode to panic in order to prevent the pool
* from suspending. A suspended I/O will have no way to resume and
* can prevent the zdb(8) command from terminating as expected.
*/
if (spa != NULL)
spa->spa_failmode = ZIO_FAILURE_MODE_PANIC;
argv++;
argc--;
if (dump_opt['r']) {
error = zdb_copy_object(os, object, argv[1]);
} else if (!dump_opt['R']) {
flagbits['d'] = ZOR_FLAG_DIRECTORY;
flagbits['f'] = ZOR_FLAG_PLAIN_FILE;
flagbits['m'] = ZOR_FLAG_SPACE_MAP;
flagbits['z'] = ZOR_FLAG_ZAP;
flagbits['A'] = ZOR_FLAG_ALL_TYPES;
if (argc > 0 && dump_opt['d']) {
zopt_object_args = argc;
zopt_object_ranges = calloc(zopt_object_args,
sizeof (zopt_object_range_t));
for (unsigned i = 0; i < zopt_object_args; i++) {
int err;
char *msg = NULL;
err = parse_object_range(argv[i],
&zopt_object_ranges[i], &msg);
if (err != 0)
fatal("Bad object or range: '%s': %s\n",
argv[i], msg ? msg : "");
}
} else if (argc > 0 && dump_opt['m']) {
zopt_metaslab_args = argc;
zopt_metaslab = calloc(zopt_metaslab_args,
sizeof (uint64_t));
for (unsigned i = 0; i < zopt_metaslab_args; i++) {
errno = 0;
zopt_metaslab[i] = strtoull(argv[i], NULL, 0);
if (zopt_metaslab[i] == 0 && errno != 0)
fatal("bad number %s: %s", argv[i],
strerror(errno));
}
}
if (os != NULL) {
dump_objset(os);
} else if (zopt_object_args > 0 && !dump_opt['m']) {
dump_objset(spa->spa_meta_objset);
} else {
dump_zpool(spa);
}
} else {
flagbits['b'] = ZDB_FLAG_PRINT_BLKPTR;
flagbits['c'] = ZDB_FLAG_CHECKSUM;
flagbits['d'] = ZDB_FLAG_DECOMPRESS;
flagbits['e'] = ZDB_FLAG_BSWAP;
flagbits['g'] = ZDB_FLAG_GBH;
flagbits['i'] = ZDB_FLAG_INDIRECT;
flagbits['r'] = ZDB_FLAG_RAW;
flagbits['v'] = ZDB_FLAG_VERBOSE;
for (int i = 0; i < argc; i++)
zdb_read_block(argv[i], spa);
}
if (dump_opt['k']) {
free(checkpoint_pool);
if (!target_is_spa)
free(checkpoint_target);
}
if (os != NULL) {
close_objset(os, FTAG);
} else {
spa_close(spa, FTAG);
}
fuid_table_destroy();
dump_debug_buffer();
kernel_fini();
return (error);
}
diff --git a/sys/contrib/openzfs/cmd/zfs/zfs_main.c b/sys/contrib/openzfs/cmd/zfs/zfs_main.c
index a8d68cd03cf8..1a49d44f6086 100644
--- a/sys/contrib/openzfs/cmd/zfs/zfs_main.c
+++ b/sys/contrib/openzfs/cmd/zfs/zfs_main.c
@@ -1,8794 +1,8792 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright 2012 Milan Jurik. All rights reserved.
* Copyright (c) 2012, Joyent, Inc. All rights reserved.
* Copyright (c) 2013 Steven Hartland. All rights reserved.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>.
* Copyright 2016 Nexenta Systems, Inc.
* Copyright (c) 2019 Datto Inc.
* Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>
* Copyright 2019 Joyent, Inc.
* Copyright (c) 2019, 2020 by Christian Schwarz. All rights reserved.
*/
#include <assert.h>
#include <ctype.h>
#include <sys/debug.h>
#include <errno.h>
#include <getopt.h>
#include <libgen.h>
#include <libintl.h>
#include <libuutil.h>
#include <libnvpair.h>
#include <locale.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <unistd.h>
#include <fcntl.h>
#include <zone.h>
#include <grp.h>
#include <pwd.h>
#include <umem.h>
#include <pthread.h>
#include <signal.h>
#include <sys/list.h>
#include <sys/mkdev.h>
#include <sys/mntent.h>
#include <sys/mnttab.h>
#include <sys/mount.h>
#include <sys/stat.h>
#include <sys/fs/zfs.h>
#include <sys/systeminfo.h>
#include <sys/types.h>
#include <time.h>
#include <sys/zfs_project.h>
#include <libzfs.h>
#include <libzfs_core.h>
#include <zfs_prop.h>
#include <zfs_deleg.h>
#include <libzutil.h>
#ifdef HAVE_IDMAP
#include <aclutils.h>
#include <directory.h>
#endif /* HAVE_IDMAP */
#include "zfs_iter.h"
#include "zfs_util.h"
#include "zfs_comutil.h"
#include "zfs_projectutil.h"
libzfs_handle_t *g_zfs;
static char history_str[HIS_MAX_RECORD_LEN];
static boolean_t log_history = B_TRUE;
static int zfs_do_clone(int argc, char **argv);
static int zfs_do_create(int argc, char **argv);
static int zfs_do_destroy(int argc, char **argv);
static int zfs_do_get(int argc, char **argv);
static int zfs_do_inherit(int argc, char **argv);
static int zfs_do_list(int argc, char **argv);
static int zfs_do_mount(int argc, char **argv);
static int zfs_do_rename(int argc, char **argv);
static int zfs_do_rollback(int argc, char **argv);
static int zfs_do_set(int argc, char **argv);
static int zfs_do_upgrade(int argc, char **argv);
static int zfs_do_snapshot(int argc, char **argv);
static int zfs_do_unmount(int argc, char **argv);
static int zfs_do_share(int argc, char **argv);
static int zfs_do_unshare(int argc, char **argv);
static int zfs_do_send(int argc, char **argv);
static int zfs_do_receive(int argc, char **argv);
static int zfs_do_promote(int argc, char **argv);
static int zfs_do_userspace(int argc, char **argv);
static int zfs_do_allow(int argc, char **argv);
static int zfs_do_unallow(int argc, char **argv);
static int zfs_do_hold(int argc, char **argv);
static int zfs_do_holds(int argc, char **argv);
static int zfs_do_release(int argc, char **argv);
static int zfs_do_diff(int argc, char **argv);
static int zfs_do_bookmark(int argc, char **argv);
static int zfs_do_channel_program(int argc, char **argv);
static int zfs_do_load_key(int argc, char **argv);
static int zfs_do_unload_key(int argc, char **argv);
static int zfs_do_change_key(int argc, char **argv);
static int zfs_do_project(int argc, char **argv);
static int zfs_do_version(int argc, char **argv);
static int zfs_do_redact(int argc, char **argv);
static int zfs_do_wait(int argc, char **argv);
#ifdef __FreeBSD__
static int zfs_do_jail(int argc, char **argv);
static int zfs_do_unjail(int argc, char **argv);
#endif
/*
* Enable a reasonable set of defaults for libumem debugging on DEBUG builds.
*/
#ifdef DEBUG
const char *
_umem_debug_init(void)
{
return ("default,verbose"); /* $UMEM_DEBUG setting */
}
const char *
_umem_logging_init(void)
{
return ("fail,contents"); /* $UMEM_LOGGING setting */
}
#endif
typedef enum {
HELP_CLONE,
HELP_CREATE,
HELP_DESTROY,
HELP_GET,
HELP_INHERIT,
HELP_UPGRADE,
HELP_LIST,
HELP_MOUNT,
HELP_PROMOTE,
HELP_RECEIVE,
HELP_RENAME,
HELP_ROLLBACK,
HELP_SEND,
HELP_SET,
HELP_SHARE,
HELP_SNAPSHOT,
HELP_UNMOUNT,
HELP_UNSHARE,
HELP_ALLOW,
HELP_UNALLOW,
HELP_USERSPACE,
HELP_GROUPSPACE,
HELP_PROJECTSPACE,
HELP_PROJECT,
HELP_HOLD,
HELP_HOLDS,
HELP_RELEASE,
HELP_DIFF,
HELP_BOOKMARK,
HELP_CHANNEL_PROGRAM,
HELP_LOAD_KEY,
HELP_UNLOAD_KEY,
HELP_CHANGE_KEY,
HELP_VERSION,
HELP_REDACT,
HELP_JAIL,
HELP_UNJAIL,
HELP_WAIT,
} zfs_help_t;
typedef struct zfs_command {
const char *name;
int (*func)(int argc, char **argv);
zfs_help_t usage;
} zfs_command_t;
/*
* Master command table. Each ZFS command has a name, associated function, and
* usage message. The usage messages need to be internationalized, so we have
* to have a function to return the usage message based on a command index.
*
* These commands are organized according to how they are displayed in the usage
* message. An empty command (one with a NULL name) indicates an empty line in
* the generic usage message.
*/
static zfs_command_t command_table[] = {
{ "version", zfs_do_version, HELP_VERSION },
{ NULL },
{ "create", zfs_do_create, HELP_CREATE },
{ "destroy", zfs_do_destroy, HELP_DESTROY },
{ NULL },
{ "snapshot", zfs_do_snapshot, HELP_SNAPSHOT },
{ "rollback", zfs_do_rollback, HELP_ROLLBACK },
{ "clone", zfs_do_clone, HELP_CLONE },
{ "promote", zfs_do_promote, HELP_PROMOTE },
{ "rename", zfs_do_rename, HELP_RENAME },
{ "bookmark", zfs_do_bookmark, HELP_BOOKMARK },
{ "program", zfs_do_channel_program, HELP_CHANNEL_PROGRAM },
{ NULL },
{ "list", zfs_do_list, HELP_LIST },
{ NULL },
{ "set", zfs_do_set, HELP_SET },
{ "get", zfs_do_get, HELP_GET },
{ "inherit", zfs_do_inherit, HELP_INHERIT },
{ "upgrade", zfs_do_upgrade, HELP_UPGRADE },
{ NULL },
{ "userspace", zfs_do_userspace, HELP_USERSPACE },
{ "groupspace", zfs_do_userspace, HELP_GROUPSPACE },
{ "projectspace", zfs_do_userspace, HELP_PROJECTSPACE },
{ NULL },
{ "project", zfs_do_project, HELP_PROJECT },
{ NULL },
{ "mount", zfs_do_mount, HELP_MOUNT },
{ "unmount", zfs_do_unmount, HELP_UNMOUNT },
{ "share", zfs_do_share, HELP_SHARE },
{ "unshare", zfs_do_unshare, HELP_UNSHARE },
{ NULL },
{ "send", zfs_do_send, HELP_SEND },
{ "receive", zfs_do_receive, HELP_RECEIVE },
{ NULL },
{ "allow", zfs_do_allow, HELP_ALLOW },
{ NULL },
{ "unallow", zfs_do_unallow, HELP_UNALLOW },
{ NULL },
{ "hold", zfs_do_hold, HELP_HOLD },
{ "holds", zfs_do_holds, HELP_HOLDS },
{ "release", zfs_do_release, HELP_RELEASE },
{ "diff", zfs_do_diff, HELP_DIFF },
{ "load-key", zfs_do_load_key, HELP_LOAD_KEY },
{ "unload-key", zfs_do_unload_key, HELP_UNLOAD_KEY },
{ "change-key", zfs_do_change_key, HELP_CHANGE_KEY },
{ "redact", zfs_do_redact, HELP_REDACT },
{ "wait", zfs_do_wait, HELP_WAIT },
#ifdef __FreeBSD__
{ "jail", zfs_do_jail, HELP_JAIL },
{ "unjail", zfs_do_unjail, HELP_UNJAIL },
#endif
};
#define NCOMMAND (sizeof (command_table) / sizeof (command_table[0]))
zfs_command_t *current_command;
static const char *
get_usage(zfs_help_t idx)
{
switch (idx) {
case HELP_CLONE:
return (gettext("\tclone [-p] [-o property=value] ... "
"<snapshot> <filesystem|volume>\n"));
case HELP_CREATE:
return (gettext("\tcreate [-Pnpuv] [-o property=value] ... "
"<filesystem>\n"
"\tcreate [-Pnpsv] [-b blocksize] [-o property=value] ... "
"-V <size> <volume>\n"));
case HELP_DESTROY:
return (gettext("\tdestroy [-fnpRrv] <filesystem|volume>\n"
"\tdestroy [-dnpRrv] "
"<filesystem|volume>@<snap>[%<snap>][,...]\n"
"\tdestroy <filesystem|volume>#<bookmark>\n"));
case HELP_GET:
return (gettext("\tget [-rHp] [-d max] "
"[-o \"all\" | field[,...]]\n"
"\t [-t type[,...]] [-s source[,...]]\n"
"\t <\"all\" | property[,...]> "
"[filesystem|volume|snapshot|bookmark] ...\n"));
case HELP_INHERIT:
return (gettext("\tinherit [-rS] <property> "
"<filesystem|volume|snapshot> ...\n"));
case HELP_UPGRADE:
return (gettext("\tupgrade [-v]\n"
"\tupgrade [-r] [-V version] <-a | filesystem ...>\n"));
case HELP_LIST:
return (gettext("\tlist [-Hp] [-r|-d max] [-o property[,...]] "
"[-s property]...\n\t [-S property]... [-t type[,...]] "
"[filesystem|volume|snapshot] ...\n"));
case HELP_MOUNT:
return (gettext("\tmount\n"
"\tmount [-flvO] [-o opts] <-a | filesystem>\n"));
case HELP_PROMOTE:
return (gettext("\tpromote <clone-filesystem>\n"));
case HELP_RECEIVE:
return (gettext("\treceive [-vMnsFhu] "
"[-o <property>=<value>] ... [-x <property>] ...\n"
"\t <filesystem|volume|snapshot>\n"
"\treceive [-vMnsFhu] [-o <property>=<value>] ... "
"[-x <property>] ... \n"
"\t [-d | -e] <filesystem>\n"
"\treceive -A <filesystem|volume>\n"));
case HELP_RENAME:
return (gettext("\trename [-f] <filesystem|volume|snapshot> "
"<filesystem|volume|snapshot>\n"
"\trename -p [-f] <filesystem|volume> <filesystem|volume>\n"
"\trename -u [-f] <filesystem> <filesystem>\n"
"\trename -r <snapshot> <snapshot>\n"));
case HELP_ROLLBACK:
return (gettext("\trollback [-rRf] <snapshot>\n"));
case HELP_SEND:
return (gettext("\tsend [-DnPpRvLecwhb] [-[i|I] snapshot] "
"<snapshot>\n"
"\tsend [-DnvPLecw] [-i snapshot|bookmark] "
"<filesystem|volume|snapshot>\n"
"\tsend [-DnPpvLec] [-i bookmark|snapshot] "
"--redact <bookmark> <snapshot>\n"
"\tsend [-nvPe] -t <receive_resume_token>\n"
"\tsend [-Pnv] --saved filesystem\n"));
case HELP_SET:
return (gettext("\tset <property=value> ... "
"<filesystem|volume|snapshot> ...\n"));
case HELP_SHARE:
return (gettext("\tshare [-l] <-a [nfs|smb] | filesystem>\n"));
case HELP_SNAPSHOT:
return (gettext("\tsnapshot [-r] [-o property=value] ... "
"<filesystem|volume>@<snap> ...\n"));
case HELP_UNMOUNT:
return (gettext("\tunmount [-fu] "
"<-a | filesystem|mountpoint>\n"));
case HELP_UNSHARE:
return (gettext("\tunshare "
"<-a [nfs|smb] | filesystem|mountpoint>\n"));
case HELP_ALLOW:
return (gettext("\tallow <filesystem|volume>\n"
"\tallow [-ldug] "
"<\"everyone\"|user|group>[,...] <perm|@setname>[,...]\n"
"\t <filesystem|volume>\n"
"\tallow [-ld] -e <perm|@setname>[,...] "
"<filesystem|volume>\n"
"\tallow -c <perm|@setname>[,...] <filesystem|volume>\n"
"\tallow -s @setname <perm|@setname>[,...] "
"<filesystem|volume>\n"));
case HELP_UNALLOW:
return (gettext("\tunallow [-rldug] "
"<\"everyone\"|user|group>[,...]\n"
"\t [<perm|@setname>[,...]] <filesystem|volume>\n"
"\tunallow [-rld] -e [<perm|@setname>[,...]] "
"<filesystem|volume>\n"
"\tunallow [-r] -c [<perm|@setname>[,...]] "
"<filesystem|volume>\n"
"\tunallow [-r] -s @setname [<perm|@setname>[,...]] "
"<filesystem|volume>\n"));
case HELP_USERSPACE:
return (gettext("\tuserspace [-Hinp] [-o field[,...]] "
"[-s field] ...\n"
"\t [-S field] ... [-t type[,...]] "
"<filesystem|snapshot|path>\n"));
case HELP_GROUPSPACE:
return (gettext("\tgroupspace [-Hinp] [-o field[,...]] "
"[-s field] ...\n"
"\t [-S field] ... [-t type[,...]] "
"<filesystem|snapshot|path>\n"));
case HELP_PROJECTSPACE:
return (gettext("\tprojectspace [-Hp] [-o field[,...]] "
"[-s field] ... \n"
"\t [-S field] ... <filesystem|snapshot|path>\n"));
case HELP_PROJECT:
return (gettext("\tproject [-d|-r] <directory|file ...>\n"
"\tproject -c [-0] [-d|-r] [-p id] <directory|file ...>\n"
"\tproject -C [-k] [-r] <directory ...>\n"
"\tproject [-p id] [-r] [-s] <directory ...>\n"));
case HELP_HOLD:
return (gettext("\thold [-r] <tag> <snapshot> ...\n"));
case HELP_HOLDS:
return (gettext("\tholds [-rH] <snapshot> ...\n"));
case HELP_RELEASE:
return (gettext("\trelease [-r] <tag> <snapshot> ...\n"));
case HELP_DIFF:
return (gettext("\tdiff [-FHt] <snapshot> "
"[snapshot|filesystem]\n"));
case HELP_BOOKMARK:
return (gettext("\tbookmark <snapshot|bookmark> "
"<newbookmark>\n"));
case HELP_CHANNEL_PROGRAM:
return (gettext("\tprogram [-jn] [-t <instruction limit>] "
"[-m <memory limit (b)>]\n"
"\t <pool> <program file> [lua args...]\n"));
case HELP_LOAD_KEY:
return (gettext("\tload-key [-rn] [-L <keylocation>] "
"<-a | filesystem|volume>\n"));
case HELP_UNLOAD_KEY:
return (gettext("\tunload-key [-r] "
"<-a | filesystem|volume>\n"));
case HELP_CHANGE_KEY:
return (gettext("\tchange-key [-l] [-o keyformat=<value>]\n"
"\t [-o keylocation=<value>] [-o pbkdf2iters=<value>]\n"
"\t <filesystem|volume>\n"
"\tchange-key -i [-l] <filesystem|volume>\n"));
case HELP_VERSION:
return (gettext("\tversion\n"));
case HELP_REDACT:
return (gettext("\tredact <snapshot> <bookmark> "
"<redaction_snapshot> ...\n"));
case HELP_JAIL:
return (gettext("\tjail <jailid|jailname> <filesystem>\n"));
case HELP_UNJAIL:
return (gettext("\tunjail <jailid|jailname> <filesystem>\n"));
case HELP_WAIT:
return (gettext("\twait [-t <activity>] <filesystem>\n"));
+ default:
+ __builtin_unreachable();
}
-
- abort();
- /* NOTREACHED */
}
void
nomem(void)
{
(void) fprintf(stderr, gettext("internal error: out of memory\n"));
exit(1);
}
/*
* Utility function to guarantee malloc() success.
*/
void *
safe_malloc(size_t size)
{
void *data;
if ((data = calloc(1, size)) == NULL)
nomem();
return (data);
}
static void *
safe_realloc(void *data, size_t size)
{
void *newp;
if ((newp = realloc(data, size)) == NULL) {
free(data);
nomem();
}
return (newp);
}
static char *
safe_strdup(char *str)
{
char *dupstr = strdup(str);
if (dupstr == NULL)
nomem();
return (dupstr);
}
/*
* Callback routine that will print out information for each of
* the properties.
*/
static int
usage_prop_cb(int prop, void *cb)
{
FILE *fp = cb;
(void) fprintf(fp, "\t%-15s ", zfs_prop_to_name(prop));
if (zfs_prop_readonly(prop))
(void) fprintf(fp, " NO ");
else
(void) fprintf(fp, "YES ");
if (zfs_prop_inheritable(prop))
(void) fprintf(fp, " YES ");
else
(void) fprintf(fp, " NO ");
if (zfs_prop_values(prop) == NULL)
(void) fprintf(fp, "-\n");
else
(void) fprintf(fp, "%s\n", zfs_prop_values(prop));
return (ZPROP_CONT);
}
/*
* Display usage message. If we're inside a command, display only the usage for
* that command. Otherwise, iterate over the entire command table and display
* a complete usage message.
*/
static void
usage(boolean_t requested)
{
int i;
boolean_t show_properties = B_FALSE;
FILE *fp = requested ? stdout : stderr;
if (current_command == NULL) {
(void) fprintf(fp, gettext("usage: zfs command args ...\n"));
(void) fprintf(fp,
gettext("where 'command' is one of the following:\n\n"));
for (i = 0; i < NCOMMAND; i++) {
if (command_table[i].name == NULL)
(void) fprintf(fp, "\n");
else
(void) fprintf(fp, "%s",
get_usage(command_table[i].usage));
}
(void) fprintf(fp, gettext("\nEach dataset is of the form: "
"pool/[dataset/]*dataset[@name]\n"));
} else {
(void) fprintf(fp, gettext("usage:\n"));
(void) fprintf(fp, "%s", get_usage(current_command->usage));
}
if (current_command != NULL &&
(strcmp(current_command->name, "set") == 0 ||
strcmp(current_command->name, "get") == 0 ||
strcmp(current_command->name, "inherit") == 0 ||
strcmp(current_command->name, "list") == 0))
show_properties = B_TRUE;
if (show_properties) {
(void) fprintf(fp,
gettext("\nThe following properties are supported:\n"));
(void) fprintf(fp, "\n\t%-14s %s %s %s\n\n",
"PROPERTY", "EDIT", "INHERIT", "VALUES");
/* Iterate over all properties */
(void) zprop_iter(usage_prop_cb, fp, B_FALSE, B_TRUE,
ZFS_TYPE_DATASET);
(void) fprintf(fp, "\t%-15s ", "userused@...");
(void) fprintf(fp, " NO NO <size>\n");
(void) fprintf(fp, "\t%-15s ", "groupused@...");
(void) fprintf(fp, " NO NO <size>\n");
(void) fprintf(fp, "\t%-15s ", "projectused@...");
(void) fprintf(fp, " NO NO <size>\n");
(void) fprintf(fp, "\t%-15s ", "userobjused@...");
(void) fprintf(fp, " NO NO <size>\n");
(void) fprintf(fp, "\t%-15s ", "groupobjused@...");
(void) fprintf(fp, " NO NO <size>\n");
(void) fprintf(fp, "\t%-15s ", "projectobjused@...");
(void) fprintf(fp, " NO NO <size>\n");
(void) fprintf(fp, "\t%-15s ", "userquota@...");
(void) fprintf(fp, "YES NO <size> | none\n");
(void) fprintf(fp, "\t%-15s ", "groupquota@...");
(void) fprintf(fp, "YES NO <size> | none\n");
(void) fprintf(fp, "\t%-15s ", "projectquota@...");
(void) fprintf(fp, "YES NO <size> | none\n");
(void) fprintf(fp, "\t%-15s ", "userobjquota@...");
(void) fprintf(fp, "YES NO <size> | none\n");
(void) fprintf(fp, "\t%-15s ", "groupobjquota@...");
(void) fprintf(fp, "YES NO <size> | none\n");
(void) fprintf(fp, "\t%-15s ", "projectobjquota@...");
(void) fprintf(fp, "YES NO <size> | none\n");
(void) fprintf(fp, "\t%-15s ", "written@<snap>");
(void) fprintf(fp, " NO NO <size>\n");
(void) fprintf(fp, "\t%-15s ", "written#<bookmark>");
(void) fprintf(fp, " NO NO <size>\n");
(void) fprintf(fp, gettext("\nSizes are specified in bytes "
"with standard units such as K, M, G, etc.\n"));
(void) fprintf(fp, gettext("\nUser-defined properties can "
"be specified by using a name containing a colon (:).\n"));
(void) fprintf(fp, gettext("\nThe {user|group|project}"
"[obj]{used|quota}@ properties must be appended with\n"
"a user|group|project specifier of one of these forms:\n"
" POSIX name (eg: \"matt\")\n"
" POSIX id (eg: \"126829\")\n"
" SMB name@domain (eg: \"matt@sun\")\n"
" SMB SID (eg: \"S-1-234-567-89\")\n"));
} else {
(void) fprintf(fp,
gettext("\nFor the property list, run: %s\n"),
"zfs set|get");
(void) fprintf(fp,
gettext("\nFor the delegated permission list, run: %s\n"),
"zfs allow|unallow");
}
/*
* See comments at end of main().
*/
if (getenv("ZFS_ABORT") != NULL) {
(void) printf("dumping core by request\n");
abort();
}
exit(requested ? 0 : 2);
}
/*
* Take a property=value argument string and add it to the given nvlist.
* Modifies the argument inplace.
*/
static boolean_t
parseprop(nvlist_t *props, char *propname)
{
char *propval;
if ((propval = strchr(propname, '=')) == NULL) {
(void) fprintf(stderr, gettext("missing "
"'=' for property=value argument\n"));
return (B_FALSE);
}
*propval = '\0';
propval++;
if (nvlist_exists(props, propname)) {
(void) fprintf(stderr, gettext("property '%s' "
"specified multiple times\n"), propname);
return (B_FALSE);
}
if (nvlist_add_string(props, propname, propval) != 0)
nomem();
return (B_TRUE);
}
/*
* Take a property name argument and add it to the given nvlist.
* Modifies the argument inplace.
*/
static boolean_t
parsepropname(nvlist_t *props, char *propname)
{
if (strchr(propname, '=') != NULL) {
(void) fprintf(stderr, gettext("invalid character "
"'=' in property argument\n"));
return (B_FALSE);
}
if (nvlist_exists(props, propname)) {
(void) fprintf(stderr, gettext("property '%s' "
"specified multiple times\n"), propname);
return (B_FALSE);
}
if (nvlist_add_boolean(props, propname) != 0)
nomem();
return (B_TRUE);
}
static int
parse_depth(char *opt, int *flags)
{
char *tmp;
int depth;
depth = (int)strtol(opt, &tmp, 0);
if (*tmp) {
(void) fprintf(stderr,
gettext("%s is not an integer\n"), optarg);
usage(B_FALSE);
}
if (depth < 0) {
(void) fprintf(stderr,
gettext("Depth can not be negative.\n"));
usage(B_FALSE);
}
*flags |= (ZFS_ITER_DEPTH_LIMIT|ZFS_ITER_RECURSE);
return (depth);
}
#define PROGRESS_DELAY 2 /* seconds */
static char *pt_reverse = "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b";
static time_t pt_begin;
static char *pt_header = NULL;
static boolean_t pt_shown;
static void
start_progress_timer(void)
{
pt_begin = time(NULL) + PROGRESS_DELAY;
pt_shown = B_FALSE;
}
static void
set_progress_header(char *header)
{
assert(pt_header == NULL);
pt_header = safe_strdup(header);
if (pt_shown) {
(void) printf("%s: ", header);
(void) fflush(stdout);
}
}
static void
update_progress(char *update)
{
if (!pt_shown && time(NULL) > pt_begin) {
int len = strlen(update);
(void) printf("%s: %s%*.*s", pt_header, update, len, len,
pt_reverse);
(void) fflush(stdout);
pt_shown = B_TRUE;
} else if (pt_shown) {
int len = strlen(update);
(void) printf("%s%*.*s", update, len, len, pt_reverse);
(void) fflush(stdout);
}
}
static void
finish_progress(char *done)
{
if (pt_shown) {
(void) printf("%s\n", done);
(void) fflush(stdout);
}
free(pt_header);
pt_header = NULL;
}
/* This function checks if the passed fd refers to /dev/null or /dev/zero */
#ifdef __linux__
static boolean_t
is_dev_nullzero(int fd)
{
struct stat st;
fstat(fd, &st);
return (major(st.st_rdev) == 1 && (minor(st.st_rdev) == 3 /* null */ ||
minor(st.st_rdev) == 5 /* zero */));
}
#endif
static void
note_dev_error(int err, int fd)
{
#ifdef __linux__
if (err == EINVAL && is_dev_nullzero(fd)) {
(void) fprintf(stderr,
gettext("Error: Writing directly to /dev/{null,zero} files"
" on certain kernels is not currently implemented.\n"
"(As a workaround, "
"try \"zfs send [...] | cat > /dev/null\")\n"));
}
#endif
}
static int
zfs_mount_and_share(libzfs_handle_t *hdl, const char *dataset, zfs_type_t type)
{
zfs_handle_t *zhp = NULL;
int ret = 0;
zhp = zfs_open(hdl, dataset, type);
if (zhp == NULL)
return (1);
/*
* Volumes may neither be mounted or shared. Potentially in the
* future filesystems detected on these volumes could be mounted.
*/
if (zfs_get_type(zhp) == ZFS_TYPE_VOLUME) {
zfs_close(zhp);
return (0);
}
/*
* Mount and/or share the new filesystem as appropriate. We provide a
* verbose error message to let the user know that their filesystem was
* in fact created, even if we failed to mount or share it.
*
* If the user doesn't want the dataset automatically mounted, then
* skip the mount/share step
*/
if (zfs_prop_valid_for_type(ZFS_PROP_CANMOUNT, type, B_FALSE) &&
zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) == ZFS_CANMOUNT_ON) {
if (zfs_mount_delegation_check()) {
(void) fprintf(stderr, gettext("filesystem "
"successfully created, but it may only be "
"mounted by root\n"));
ret = 1;
} else if (zfs_mount(zhp, NULL, 0) != 0) {
(void) fprintf(stderr, gettext("filesystem "
"successfully created, but not mounted\n"));
ret = 1;
} else if (zfs_share(zhp) != 0) {
(void) fprintf(stderr, gettext("filesystem "
"successfully created, but not shared\n"));
ret = 1;
}
zfs_commit_all_shares();
}
zfs_close(zhp);
return (ret);
}
/*
* zfs clone [-p] [-o prop=value] ... <snap> <fs | vol>
*
* Given an existing dataset, create a writable copy whose initial contents
* are the same as the source. The newly created dataset maintains a
* dependency on the original; the original cannot be destroyed so long as
* the clone exists.
*
* The '-p' flag creates all the non-existing ancestors of the target first.
*/
static int
zfs_do_clone(int argc, char **argv)
{
zfs_handle_t *zhp = NULL;
boolean_t parents = B_FALSE;
nvlist_t *props;
int ret = 0;
int c;
if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
nomem();
/* check options */
while ((c = getopt(argc, argv, "o:p")) != -1) {
switch (c) {
case 'o':
if (!parseprop(props, optarg)) {
nvlist_free(props);
return (1);
}
break;
case 'p':
parents = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
goto usage;
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing source dataset "
"argument\n"));
goto usage;
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing target dataset "
"argument\n"));
goto usage;
}
if (argc > 2) {
(void) fprintf(stderr, gettext("too many arguments\n"));
goto usage;
}
/* open the source dataset */
if ((zhp = zfs_open(g_zfs, argv[0], ZFS_TYPE_SNAPSHOT)) == NULL) {
nvlist_free(props);
return (1);
}
if (parents && zfs_name_valid(argv[1], ZFS_TYPE_FILESYSTEM |
ZFS_TYPE_VOLUME)) {
/*
* Now create the ancestors of the target dataset. If the
* target already exists and '-p' option was used we should not
* complain.
*/
if (zfs_dataset_exists(g_zfs, argv[1], ZFS_TYPE_FILESYSTEM |
ZFS_TYPE_VOLUME)) {
zfs_close(zhp);
nvlist_free(props);
return (0);
}
if (zfs_create_ancestors(g_zfs, argv[1]) != 0) {
zfs_close(zhp);
nvlist_free(props);
return (1);
}
}
/* pass to libzfs */
ret = zfs_clone(zhp, argv[1], props);
/* create the mountpoint if necessary */
if (ret == 0) {
if (log_history) {
(void) zpool_log_history(g_zfs, history_str);
log_history = B_FALSE;
}
ret = zfs_mount_and_share(g_zfs, argv[1], ZFS_TYPE_DATASET);
}
zfs_close(zhp);
nvlist_free(props);
return (!!ret);
usage:
ASSERT3P(zhp, ==, NULL);
nvlist_free(props);
usage(B_FALSE);
return (-1);
}
/*
* Return a default volblocksize for the pool which always uses more than
* half of the data sectors. This primarily applies to dRAID which always
* writes full stripe widths.
*/
static uint64_t
default_volblocksize(zpool_handle_t *zhp, nvlist_t *props)
{
uint64_t volblocksize, asize = SPA_MINBLOCKSIZE;
nvlist_t *tree, **vdevs;
uint_t nvdevs;
nvlist_t *config = zpool_get_config(zhp, NULL);
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree) != 0 ||
nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN,
&vdevs, &nvdevs) != 0) {
return (ZVOL_DEFAULT_BLOCKSIZE);
}
for (int i = 0; i < nvdevs; i++) {
nvlist_t *nv = vdevs[i];
uint64_t ashift, ndata, nparity;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &ashift) != 0)
continue;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DRAID_NDATA,
&ndata) == 0) {
/* dRAID minimum allocation width */
asize = MAX(asize, ndata * (1ULL << ashift));
} else if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
&nparity) == 0) {
/* raidz minimum allocation width */
if (nparity == 1)
asize = MAX(asize, 2 * (1ULL << ashift));
else
asize = MAX(asize, 4 * (1ULL << ashift));
} else {
/* mirror or (non-redundant) leaf vdev */
asize = MAX(asize, 1ULL << ashift);
}
}
/*
* Calculate the target volblocksize such that more than half
* of the asize is used. The following table is for 4k sectors.
*
* n asize blksz used | n asize blksz used
* -------------------------+---------------------------------
* 1 4,096 8,192 100% | 9 36,864 32,768 88%
* 2 8,192 8,192 100% | 10 40,960 32,768 80%
* 3 12,288 8,192 66% | 11 45,056 32,768 72%
* 4 16,384 16,384 100% | 12 49,152 32,768 66%
* 5 20,480 16,384 80% | 13 53,248 32,768 61%
* 6 24,576 16,384 66% | 14 57,344 32,768 57%
* 7 28,672 16,384 57% | 15 61,440 32,768 53%
* 8 32,768 32,768 100% | 16 65,536 65,636 100%
*
* This is primarily a concern for dRAID which always allocates
* a full stripe width. For dRAID the default stripe width is
* n=8 in which case the volblocksize is set to 32k. Ignoring
* compression there are no unused sectors. This same reasoning
* applies to raidz[2,3] so target 4 sectors to minimize waste.
*/
uint64_t tgt_volblocksize = ZVOL_DEFAULT_BLOCKSIZE;
while (tgt_volblocksize * 2 <= asize)
tgt_volblocksize *= 2;
const char *prop = zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE);
if (nvlist_lookup_uint64(props, prop, &volblocksize) == 0) {
/* Issue a warning when a non-optimal size is requested. */
if (volblocksize < ZVOL_DEFAULT_BLOCKSIZE) {
(void) fprintf(stderr, gettext("Warning: "
"volblocksize (%llu) is less than the default "
"minimum block size (%llu).\nTo reduce wasted "
"space a volblocksize of %llu is recommended.\n"),
(u_longlong_t)volblocksize,
(u_longlong_t)ZVOL_DEFAULT_BLOCKSIZE,
(u_longlong_t)tgt_volblocksize);
} else if (volblocksize < tgt_volblocksize) {
(void) fprintf(stderr, gettext("Warning: "
"volblocksize (%llu) is much less than the "
"minimum allocation\nunit (%llu), which wastes "
"at least %llu%% of space. To reduce wasted "
"space,\nuse a larger volblocksize (%llu is "
"recommended), fewer dRAID data disks\n"
"per group, or smaller sector size (ashift).\n"),
(u_longlong_t)volblocksize, (u_longlong_t)asize,
(u_longlong_t)((100 * (asize - volblocksize)) /
asize), (u_longlong_t)tgt_volblocksize);
}
} else {
volblocksize = tgt_volblocksize;
fnvlist_add_uint64(props, prop, volblocksize);
}
return (volblocksize);
}
/*
* zfs create [-Pnpv] [-o prop=value] ... fs
* zfs create [-Pnpsv] [-b blocksize] [-o prop=value] ... -V vol size
*
* Create a new dataset. This command can be used to create filesystems
* and volumes. Snapshot creation is handled by 'zfs snapshot'.
* For volumes, the user must specify a size to be used.
*
* The '-s' flag applies only to volumes, and indicates that we should not try
* to set the reservation for this volume. By default we set a reservation
* equal to the size for any volume. For pools with SPA_VERSION >=
* SPA_VERSION_REFRESERVATION, we set a refreservation instead.
*
* The '-p' flag creates all the non-existing ancestors of the target first.
*
* The '-n' flag is no-op (dry run) mode. This will perform a user-space sanity
* check of arguments and properties, but does not check for permissions,
* available space, etc.
*
* The '-u' flag prevents the newly created file system from being mounted.
*
* The '-v' flag is for verbose output.
*
* The '-P' flag is used for parseable output. It implies '-v'.
*/
static int
zfs_do_create(int argc, char **argv)
{
zfs_type_t type = ZFS_TYPE_FILESYSTEM;
zpool_handle_t *zpool_handle = NULL;
nvlist_t *real_props = NULL;
uint64_t volsize = 0;
int c;
boolean_t noreserve = B_FALSE;
boolean_t bflag = B_FALSE;
boolean_t parents = B_FALSE;
boolean_t dryrun = B_FALSE;
boolean_t nomount = B_FALSE;
boolean_t verbose = B_FALSE;
boolean_t parseable = B_FALSE;
int ret = 1;
nvlist_t *props;
uint64_t intval;
char *strval;
if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
nomem();
/* check options */
while ((c = getopt(argc, argv, ":PV:b:nso:puv")) != -1) {
switch (c) {
case 'V':
type = ZFS_TYPE_VOLUME;
if (zfs_nicestrtonum(g_zfs, optarg, &intval) != 0) {
(void) fprintf(stderr, gettext("bad volume "
"size '%s': %s\n"), optarg,
libzfs_error_description(g_zfs));
goto error;
}
if (nvlist_add_uint64(props,
zfs_prop_to_name(ZFS_PROP_VOLSIZE), intval) != 0)
nomem();
volsize = intval;
break;
case 'P':
verbose = B_TRUE;
parseable = B_TRUE;
break;
case 'p':
parents = B_TRUE;
break;
case 'b':
bflag = B_TRUE;
if (zfs_nicestrtonum(g_zfs, optarg, &intval) != 0) {
(void) fprintf(stderr, gettext("bad volume "
"block size '%s': %s\n"), optarg,
libzfs_error_description(g_zfs));
goto error;
}
if (nvlist_add_uint64(props,
zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE),
intval) != 0)
nomem();
break;
case 'n':
dryrun = B_TRUE;
break;
case 'o':
if (!parseprop(props, optarg))
goto error;
break;
case 's':
noreserve = B_TRUE;
break;
case 'u':
nomount = B_TRUE;
break;
case 'v':
verbose = B_TRUE;
break;
case ':':
(void) fprintf(stderr, gettext("missing size "
"argument\n"));
goto badusage;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
goto badusage;
}
}
if ((bflag || noreserve) && type != ZFS_TYPE_VOLUME) {
(void) fprintf(stderr, gettext("'-s' and '-b' can only be "
"used when creating a volume\n"));
goto badusage;
}
if (nomount && type != ZFS_TYPE_FILESYSTEM) {
(void) fprintf(stderr, gettext("'-u' can only be "
"used when creating a filesystem\n"));
goto badusage;
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc == 0) {
(void) fprintf(stderr, gettext("missing %s argument\n"),
zfs_type_to_name(type));
goto badusage;
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
goto badusage;
}
if (dryrun || type == ZFS_TYPE_VOLUME) {
char msg[ZFS_MAX_DATASET_NAME_LEN * 2];
char *p;
if ((p = strchr(argv[0], '/')) != NULL)
*p = '\0';
zpool_handle = zpool_open(g_zfs, argv[0]);
if (p != NULL)
*p = '/';
if (zpool_handle == NULL)
goto error;
(void) snprintf(msg, sizeof (msg),
dryrun ? gettext("cannot verify '%s'") :
gettext("cannot create '%s'"), argv[0]);
if (props && (real_props = zfs_valid_proplist(g_zfs, type,
props, 0, NULL, zpool_handle, B_TRUE, msg)) == NULL) {
zpool_close(zpool_handle);
goto error;
}
}
if (type == ZFS_TYPE_VOLUME) {
const char *prop = zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE);
uint64_t volblocksize = default_volblocksize(zpool_handle,
real_props);
if (volblocksize != ZVOL_DEFAULT_BLOCKSIZE &&
nvlist_lookup_string(props, prop, &strval) != 0) {
if (asprintf(&strval, "%llu",
(u_longlong_t)volblocksize) == -1)
nomem();
nvlist_add_string(props, prop, strval);
free(strval);
}
/*
* If volsize is not a multiple of volblocksize, round it
* up to the nearest multiple of the volblocksize.
*/
if (volsize % volblocksize) {
volsize = P2ROUNDUP_TYPED(volsize, volblocksize,
uint64_t);
if (nvlist_add_uint64(props,
zfs_prop_to_name(ZFS_PROP_VOLSIZE), volsize) != 0) {
nvlist_free(props);
nomem();
}
}
}
if (type == ZFS_TYPE_VOLUME && !noreserve) {
uint64_t spa_version;
zfs_prop_t resv_prop;
spa_version = zpool_get_prop_int(zpool_handle,
ZPOOL_PROP_VERSION, NULL);
if (spa_version >= SPA_VERSION_REFRESERVATION)
resv_prop = ZFS_PROP_REFRESERVATION;
else
resv_prop = ZFS_PROP_RESERVATION;
volsize = zvol_volsize_to_reservation(zpool_handle, volsize,
real_props);
if (nvlist_lookup_string(props, zfs_prop_to_name(resv_prop),
&strval) != 0) {
if (nvlist_add_uint64(props,
zfs_prop_to_name(resv_prop), volsize) != 0) {
nvlist_free(props);
nomem();
}
}
}
if (zpool_handle != NULL) {
zpool_close(zpool_handle);
nvlist_free(real_props);
}
if (parents && zfs_name_valid(argv[0], type)) {
/*
* Now create the ancestors of target dataset. If the target
* already exists and '-p' option was used we should not
* complain.
*/
if (zfs_dataset_exists(g_zfs, argv[0], type)) {
ret = 0;
goto error;
}
if (verbose) {
(void) printf(parseable ? "create_ancestors\t%s\n" :
dryrun ? "would create ancestors of %s\n" :
"create ancestors of %s\n", argv[0]);
}
if (!dryrun) {
if (zfs_create_ancestors(g_zfs, argv[0]) != 0) {
goto error;
}
}
}
if (verbose) {
nvpair_t *nvp = NULL;
(void) printf(parseable ? "create\t%s\n" :
dryrun ? "would create %s\n" : "create %s\n", argv[0]);
while ((nvp = nvlist_next_nvpair(props, nvp)) != NULL) {
uint64_t uval;
char *sval;
switch (nvpair_type(nvp)) {
case DATA_TYPE_UINT64:
VERIFY0(nvpair_value_uint64(nvp, &uval));
(void) printf(parseable ?
"property\t%s\t%llu\n" : "\t%s=%llu\n",
nvpair_name(nvp), (u_longlong_t)uval);
break;
case DATA_TYPE_STRING:
VERIFY0(nvpair_value_string(nvp, &sval));
(void) printf(parseable ?
"property\t%s\t%s\n" : "\t%s=%s\n",
nvpair_name(nvp), sval);
break;
default:
(void) fprintf(stderr, "property '%s' "
"has illegal type %d\n",
nvpair_name(nvp), nvpair_type(nvp));
abort();
}
}
}
if (dryrun) {
ret = 0;
goto error;
}
/* pass to libzfs */
if (zfs_create(g_zfs, argv[0], type, props) != 0)
goto error;
if (log_history) {
(void) zpool_log_history(g_zfs, history_str);
log_history = B_FALSE;
}
if (nomount) {
ret = 0;
goto error;
}
ret = zfs_mount_and_share(g_zfs, argv[0], ZFS_TYPE_DATASET);
error:
nvlist_free(props);
return (ret);
badusage:
nvlist_free(props);
usage(B_FALSE);
return (2);
}
/*
* zfs destroy [-rRf] <fs, vol>
* zfs destroy [-rRd] <snap>
*
* -r Recursively destroy all children
* -R Recursively destroy all dependents, including clones
* -f Force unmounting of any dependents
* -d If we can't destroy now, mark for deferred destruction
*
* Destroys the given dataset. By default, it will unmount any filesystems,
* and refuse to destroy a dataset that has any dependents. A dependent can
* either be a child, or a clone of a child.
*/
typedef struct destroy_cbdata {
boolean_t cb_first;
boolean_t cb_force;
boolean_t cb_recurse;
boolean_t cb_error;
boolean_t cb_doclones;
zfs_handle_t *cb_target;
boolean_t cb_defer_destroy;
boolean_t cb_verbose;
boolean_t cb_parsable;
boolean_t cb_dryrun;
nvlist_t *cb_nvl;
nvlist_t *cb_batchedsnaps;
/* first snap in contiguous run */
char *cb_firstsnap;
/* previous snap in contiguous run */
char *cb_prevsnap;
int64_t cb_snapused;
char *cb_snapspec;
char *cb_bookmark;
uint64_t cb_snap_count;
} destroy_cbdata_t;
/*
* Check for any dependents based on the '-r' or '-R' flags.
*/
static int
destroy_check_dependent(zfs_handle_t *zhp, void *data)
{
destroy_cbdata_t *cbp = data;
const char *tname = zfs_get_name(cbp->cb_target);
const char *name = zfs_get_name(zhp);
if (strncmp(tname, name, strlen(tname)) == 0 &&
(name[strlen(tname)] == '/' || name[strlen(tname)] == '@')) {
/*
* This is a direct descendant, not a clone somewhere else in
* the hierarchy.
*/
if (cbp->cb_recurse)
goto out;
if (cbp->cb_first) {
(void) fprintf(stderr, gettext("cannot destroy '%s': "
"%s has children\n"),
zfs_get_name(cbp->cb_target),
zfs_type_to_name(zfs_get_type(cbp->cb_target)));
(void) fprintf(stderr, gettext("use '-r' to destroy "
"the following datasets:\n"));
cbp->cb_first = B_FALSE;
cbp->cb_error = B_TRUE;
}
(void) fprintf(stderr, "%s\n", zfs_get_name(zhp));
} else {
/*
* This is a clone. We only want to report this if the '-r'
* wasn't specified, or the target is a snapshot.
*/
if (!cbp->cb_recurse &&
zfs_get_type(cbp->cb_target) != ZFS_TYPE_SNAPSHOT)
goto out;
if (cbp->cb_first) {
(void) fprintf(stderr, gettext("cannot destroy '%s': "
"%s has dependent clones\n"),
zfs_get_name(cbp->cb_target),
zfs_type_to_name(zfs_get_type(cbp->cb_target)));
(void) fprintf(stderr, gettext("use '-R' to destroy "
"the following datasets:\n"));
cbp->cb_first = B_FALSE;
cbp->cb_error = B_TRUE;
cbp->cb_dryrun = B_TRUE;
}
(void) fprintf(stderr, "%s\n", zfs_get_name(zhp));
}
out:
zfs_close(zhp);
return (0);
}
static int
destroy_batched(destroy_cbdata_t *cb)
{
int error = zfs_destroy_snaps_nvl(g_zfs,
cb->cb_batchedsnaps, B_FALSE);
fnvlist_free(cb->cb_batchedsnaps);
cb->cb_batchedsnaps = fnvlist_alloc();
return (error);
}
static int
destroy_callback(zfs_handle_t *zhp, void *data)
{
destroy_cbdata_t *cb = data;
const char *name = zfs_get_name(zhp);
int error;
if (cb->cb_verbose) {
if (cb->cb_parsable) {
(void) printf("destroy\t%s\n", name);
} else if (cb->cb_dryrun) {
(void) printf(gettext("would destroy %s\n"),
name);
} else {
(void) printf(gettext("will destroy %s\n"),
name);
}
}
/*
* Ignore pools (which we've already flagged as an error before getting
* here).
*/
if (strchr(zfs_get_name(zhp), '/') == NULL &&
zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) {
zfs_close(zhp);
return (0);
}
if (cb->cb_dryrun) {
zfs_close(zhp);
return (0);
}
/*
* We batch up all contiguous snapshots (even of different
* filesystems) and destroy them with one ioctl. We can't
* simply do all snap deletions and then all fs deletions,
* because we must delete a clone before its origin.
*/
if (zfs_get_type(zhp) == ZFS_TYPE_SNAPSHOT) {
cb->cb_snap_count++;
fnvlist_add_boolean(cb->cb_batchedsnaps, name);
if (cb->cb_snap_count % 10 == 0 && cb->cb_defer_destroy)
error = destroy_batched(cb);
} else {
error = destroy_batched(cb);
if (error != 0 ||
zfs_unmount(zhp, NULL, cb->cb_force ? MS_FORCE : 0) != 0 ||
zfs_destroy(zhp, cb->cb_defer_destroy) != 0) {
zfs_close(zhp);
/*
* When performing a recursive destroy we ignore errors
* so that the recursive destroy could continue
* destroying past problem datasets
*/
if (cb->cb_recurse) {
cb->cb_error = B_TRUE;
return (0);
}
return (-1);
}
}
zfs_close(zhp);
return (0);
}
static int
destroy_print_cb(zfs_handle_t *zhp, void *arg)
{
destroy_cbdata_t *cb = arg;
const char *name = zfs_get_name(zhp);
int err = 0;
if (nvlist_exists(cb->cb_nvl, name)) {
if (cb->cb_firstsnap == NULL)
cb->cb_firstsnap = strdup(name);
if (cb->cb_prevsnap != NULL)
free(cb->cb_prevsnap);
/* this snap continues the current range */
cb->cb_prevsnap = strdup(name);
if (cb->cb_firstsnap == NULL || cb->cb_prevsnap == NULL)
nomem();
if (cb->cb_verbose) {
if (cb->cb_parsable) {
(void) printf("destroy\t%s\n", name);
} else if (cb->cb_dryrun) {
(void) printf(gettext("would destroy %s\n"),
name);
} else {
(void) printf(gettext("will destroy %s\n"),
name);
}
}
} else if (cb->cb_firstsnap != NULL) {
/* end of this range */
uint64_t used = 0;
err = lzc_snaprange_space(cb->cb_firstsnap,
cb->cb_prevsnap, &used);
cb->cb_snapused += used;
free(cb->cb_firstsnap);
cb->cb_firstsnap = NULL;
free(cb->cb_prevsnap);
cb->cb_prevsnap = NULL;
}
zfs_close(zhp);
return (err);
}
static int
destroy_print_snapshots(zfs_handle_t *fs_zhp, destroy_cbdata_t *cb)
{
int err;
assert(cb->cb_firstsnap == NULL);
assert(cb->cb_prevsnap == NULL);
err = zfs_iter_snapshots_sorted(fs_zhp, destroy_print_cb, cb, 0, 0);
if (cb->cb_firstsnap != NULL) {
uint64_t used = 0;
if (err == 0) {
err = lzc_snaprange_space(cb->cb_firstsnap,
cb->cb_prevsnap, &used);
}
cb->cb_snapused += used;
free(cb->cb_firstsnap);
cb->cb_firstsnap = NULL;
free(cb->cb_prevsnap);
cb->cb_prevsnap = NULL;
}
return (err);
}
static int
snapshot_to_nvl_cb(zfs_handle_t *zhp, void *arg)
{
destroy_cbdata_t *cb = arg;
int err = 0;
/* Check for clones. */
if (!cb->cb_doclones && !cb->cb_defer_destroy) {
cb->cb_target = zhp;
cb->cb_first = B_TRUE;
err = zfs_iter_dependents(zhp, B_TRUE,
destroy_check_dependent, cb);
}
if (err == 0) {
if (nvlist_add_boolean(cb->cb_nvl, zfs_get_name(zhp)))
nomem();
}
zfs_close(zhp);
return (err);
}
static int
gather_snapshots(zfs_handle_t *zhp, void *arg)
{
destroy_cbdata_t *cb = arg;
int err = 0;
err = zfs_iter_snapspec(zhp, cb->cb_snapspec, snapshot_to_nvl_cb, cb);
if (err == ENOENT)
err = 0;
if (err != 0)
goto out;
if (cb->cb_verbose) {
err = destroy_print_snapshots(zhp, cb);
if (err != 0)
goto out;
}
if (cb->cb_recurse)
err = zfs_iter_filesystems(zhp, gather_snapshots, cb);
out:
zfs_close(zhp);
return (err);
}
static int
destroy_clones(destroy_cbdata_t *cb)
{
nvpair_t *pair;
for (pair = nvlist_next_nvpair(cb->cb_nvl, NULL);
pair != NULL;
pair = nvlist_next_nvpair(cb->cb_nvl, pair)) {
zfs_handle_t *zhp = zfs_open(g_zfs, nvpair_name(pair),
ZFS_TYPE_SNAPSHOT);
if (zhp != NULL) {
boolean_t defer = cb->cb_defer_destroy;
int err;
/*
* We can't defer destroy non-snapshots, so set it to
* false while destroying the clones.
*/
cb->cb_defer_destroy = B_FALSE;
err = zfs_iter_dependents(zhp, B_FALSE,
destroy_callback, cb);
cb->cb_defer_destroy = defer;
zfs_close(zhp);
if (err != 0)
return (err);
}
}
return (0);
}
static int
zfs_do_destroy(int argc, char **argv)
{
destroy_cbdata_t cb = { 0 };
int rv = 0;
int err = 0;
int c;
zfs_handle_t *zhp = NULL;
char *at, *pound;
zfs_type_t type = ZFS_TYPE_DATASET;
/* check options */
while ((c = getopt(argc, argv, "vpndfrR")) != -1) {
switch (c) {
case 'v':
cb.cb_verbose = B_TRUE;
break;
case 'p':
cb.cb_verbose = B_TRUE;
cb.cb_parsable = B_TRUE;
break;
case 'n':
cb.cb_dryrun = B_TRUE;
break;
case 'd':
cb.cb_defer_destroy = B_TRUE;
type = ZFS_TYPE_SNAPSHOT;
break;
case 'f':
cb.cb_force = B_TRUE;
break;
case 'r':
cb.cb_recurse = B_TRUE;
break;
case 'R':
cb.cb_recurse = B_TRUE;
cb.cb_doclones = B_TRUE;
break;
case '?':
default:
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc == 0) {
(void) fprintf(stderr, gettext("missing dataset argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
at = strchr(argv[0], '@');
pound = strchr(argv[0], '#');
if (at != NULL) {
/* Build the list of snaps to destroy in cb_nvl. */
cb.cb_nvl = fnvlist_alloc();
*at = '\0';
zhp = zfs_open(g_zfs, argv[0],
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL) {
nvlist_free(cb.cb_nvl);
return (1);
}
cb.cb_snapspec = at + 1;
if (gather_snapshots(zfs_handle_dup(zhp), &cb) != 0 ||
cb.cb_error) {
rv = 1;
goto out;
}
if (nvlist_empty(cb.cb_nvl)) {
(void) fprintf(stderr, gettext("could not find any "
"snapshots to destroy; check snapshot names.\n"));
rv = 1;
goto out;
}
if (cb.cb_verbose) {
char buf[16];
zfs_nicebytes(cb.cb_snapused, buf, sizeof (buf));
if (cb.cb_parsable) {
(void) printf("reclaim\t%llu\n",
(u_longlong_t)cb.cb_snapused);
} else if (cb.cb_dryrun) {
(void) printf(gettext("would reclaim %s\n"),
buf);
} else {
(void) printf(gettext("will reclaim %s\n"),
buf);
}
}
if (!cb.cb_dryrun) {
if (cb.cb_doclones) {
cb.cb_batchedsnaps = fnvlist_alloc();
err = destroy_clones(&cb);
if (err == 0) {
err = zfs_destroy_snaps_nvl(g_zfs,
cb.cb_batchedsnaps, B_FALSE);
}
if (err != 0) {
rv = 1;
goto out;
}
}
if (err == 0) {
err = zfs_destroy_snaps_nvl(g_zfs, cb.cb_nvl,
cb.cb_defer_destroy);
}
}
if (err != 0)
rv = 1;
} else if (pound != NULL) {
int err;
nvlist_t *nvl;
if (cb.cb_dryrun) {
(void) fprintf(stderr,
"dryrun is not supported with bookmark\n");
return (-1);
}
if (cb.cb_defer_destroy) {
(void) fprintf(stderr,
"defer destroy is not supported with bookmark\n");
return (-1);
}
if (cb.cb_recurse) {
(void) fprintf(stderr,
"recursive is not supported with bookmark\n");
return (-1);
}
/*
* Unfortunately, zfs_bookmark() doesn't honor the
* casesensitivity setting. However, we can't simply
* remove this check, because lzc_destroy_bookmarks()
* ignores non-existent bookmarks, so this is necessary
* to get a proper error message.
*/
if (!zfs_bookmark_exists(argv[0])) {
(void) fprintf(stderr, gettext("bookmark '%s' "
"does not exist.\n"), argv[0]);
return (1);
}
nvl = fnvlist_alloc();
fnvlist_add_boolean(nvl, argv[0]);
err = lzc_destroy_bookmarks(nvl, NULL);
if (err != 0) {
(void) zfs_standard_error(g_zfs, err,
"cannot destroy bookmark");
}
nvlist_free(nvl);
return (err);
} else {
/* Open the given dataset */
if ((zhp = zfs_open(g_zfs, argv[0], type)) == NULL)
return (1);
cb.cb_target = zhp;
/*
* Perform an explicit check for pools before going any further.
*/
if (!cb.cb_recurse && strchr(zfs_get_name(zhp), '/') == NULL &&
zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) {
(void) fprintf(stderr, gettext("cannot destroy '%s': "
"operation does not apply to pools\n"),
zfs_get_name(zhp));
(void) fprintf(stderr, gettext("use 'zfs destroy -r "
"%s' to destroy all datasets in the pool\n"),
zfs_get_name(zhp));
(void) fprintf(stderr, gettext("use 'zpool destroy %s' "
"to destroy the pool itself\n"), zfs_get_name(zhp));
rv = 1;
goto out;
}
/*
* Check for any dependents and/or clones.
*/
cb.cb_first = B_TRUE;
if (!cb.cb_doclones &&
zfs_iter_dependents(zhp, B_TRUE, destroy_check_dependent,
&cb) != 0) {
rv = 1;
goto out;
}
if (cb.cb_error) {
rv = 1;
goto out;
}
cb.cb_batchedsnaps = fnvlist_alloc();
if (zfs_iter_dependents(zhp, B_FALSE, destroy_callback,
&cb) != 0) {
rv = 1;
goto out;
}
/*
* Do the real thing. The callback will close the
* handle regardless of whether it succeeds or not.
*/
err = destroy_callback(zhp, &cb);
zhp = NULL;
if (err == 0) {
err = zfs_destroy_snaps_nvl(g_zfs,
cb.cb_batchedsnaps, cb.cb_defer_destroy);
}
if (err != 0 || cb.cb_error == B_TRUE)
rv = 1;
}
out:
fnvlist_free(cb.cb_batchedsnaps);
fnvlist_free(cb.cb_nvl);
if (zhp != NULL)
zfs_close(zhp);
return (rv);
}
static boolean_t
is_recvd_column(zprop_get_cbdata_t *cbp)
{
int i;
zfs_get_column_t col;
for (i = 0; i < ZFS_GET_NCOLS &&
(col = cbp->cb_columns[i]) != GET_COL_NONE; i++)
if (col == GET_COL_RECVD)
return (B_TRUE);
return (B_FALSE);
}
/*
* zfs get [-rHp] [-o all | field[,field]...] [-s source[,source]...]
* < all | property[,property]... > < fs | snap | vol > ...
*
* -r recurse over any child datasets
* -H scripted mode. Headers are stripped, and fields are separated
* by tabs instead of spaces.
* -o Set of fields to display. One of "name,property,value,
* received,source". Default is "name,property,value,source".
* "all" is an alias for all five.
* -s Set of sources to allow. One of
* "local,default,inherited,received,temporary,none". Default is
* all six.
* -p Display values in parsable (literal) format.
*
* Prints properties for the given datasets. The user can control which
* columns to display as well as which property types to allow.
*/
/*
* Invoked to display the properties for a single dataset.
*/
static int
get_callback(zfs_handle_t *zhp, void *data)
{
char buf[ZFS_MAXPROPLEN];
char rbuf[ZFS_MAXPROPLEN];
zprop_source_t sourcetype;
char source[ZFS_MAX_DATASET_NAME_LEN];
zprop_get_cbdata_t *cbp = data;
nvlist_t *user_props = zfs_get_user_props(zhp);
zprop_list_t *pl = cbp->cb_proplist;
nvlist_t *propval;
char *strval;
char *sourceval;
boolean_t received = is_recvd_column(cbp);
for (; pl != NULL; pl = pl->pl_next) {
char *recvdval = NULL;
/*
* Skip the special fake placeholder. This will also skip over
* the name property when 'all' is specified.
*/
if (pl->pl_prop == ZFS_PROP_NAME &&
pl == cbp->cb_proplist)
continue;
if (pl->pl_prop != ZPROP_INVAL) {
if (zfs_prop_get(zhp, pl->pl_prop, buf,
sizeof (buf), &sourcetype, source,
sizeof (source),
cbp->cb_literal) != 0) {
if (pl->pl_all)
continue;
if (!zfs_prop_valid_for_type(pl->pl_prop,
ZFS_TYPE_DATASET, B_FALSE)) {
(void) fprintf(stderr,
gettext("No such property '%s'\n"),
zfs_prop_to_name(pl->pl_prop));
continue;
}
sourcetype = ZPROP_SRC_NONE;
(void) strlcpy(buf, "-", sizeof (buf));
}
if (received && (zfs_prop_get_recvd(zhp,
zfs_prop_to_name(pl->pl_prop), rbuf, sizeof (rbuf),
cbp->cb_literal) == 0))
recvdval = rbuf;
zprop_print_one_property(zfs_get_name(zhp), cbp,
zfs_prop_to_name(pl->pl_prop),
buf, sourcetype, source, recvdval);
} else if (zfs_prop_userquota(pl->pl_user_prop)) {
sourcetype = ZPROP_SRC_LOCAL;
if (zfs_prop_get_userquota(zhp, pl->pl_user_prop,
buf, sizeof (buf), cbp->cb_literal) != 0) {
sourcetype = ZPROP_SRC_NONE;
(void) strlcpy(buf, "-", sizeof (buf));
}
zprop_print_one_property(zfs_get_name(zhp), cbp,
pl->pl_user_prop, buf, sourcetype, source, NULL);
} else if (zfs_prop_written(pl->pl_user_prop)) {
sourcetype = ZPROP_SRC_LOCAL;
if (zfs_prop_get_written(zhp, pl->pl_user_prop,
buf, sizeof (buf), cbp->cb_literal) != 0) {
sourcetype = ZPROP_SRC_NONE;
(void) strlcpy(buf, "-", sizeof (buf));
}
zprop_print_one_property(zfs_get_name(zhp), cbp,
pl->pl_user_prop, buf, sourcetype, source, NULL);
} else {
if (nvlist_lookup_nvlist(user_props,
pl->pl_user_prop, &propval) != 0) {
if (pl->pl_all)
continue;
sourcetype = ZPROP_SRC_NONE;
strval = "-";
} else {
verify(nvlist_lookup_string(propval,
ZPROP_VALUE, &strval) == 0);
verify(nvlist_lookup_string(propval,
ZPROP_SOURCE, &sourceval) == 0);
if (strcmp(sourceval,
zfs_get_name(zhp)) == 0) {
sourcetype = ZPROP_SRC_LOCAL;
} else if (strcmp(sourceval,
ZPROP_SOURCE_VAL_RECVD) == 0) {
sourcetype = ZPROP_SRC_RECEIVED;
} else {
sourcetype = ZPROP_SRC_INHERITED;
(void) strlcpy(source,
sourceval, sizeof (source));
}
}
if (received && (zfs_prop_get_recvd(zhp,
pl->pl_user_prop, rbuf, sizeof (rbuf),
cbp->cb_literal) == 0))
recvdval = rbuf;
zprop_print_one_property(zfs_get_name(zhp), cbp,
pl->pl_user_prop, strval, sourcetype,
source, recvdval);
}
}
return (0);
}
static int
zfs_do_get(int argc, char **argv)
{
zprop_get_cbdata_t cb = { 0 };
int i, c, flags = ZFS_ITER_ARGS_CAN_BE_PATHS;
int types = ZFS_TYPE_DATASET | ZFS_TYPE_BOOKMARK;
char *value, *fields;
int ret = 0;
int limit = 0;
zprop_list_t fake_name = { 0 };
/*
* Set up default columns and sources.
*/
cb.cb_sources = ZPROP_SRC_ALL;
cb.cb_columns[0] = GET_COL_NAME;
cb.cb_columns[1] = GET_COL_PROPERTY;
cb.cb_columns[2] = GET_COL_VALUE;
cb.cb_columns[3] = GET_COL_SOURCE;
cb.cb_type = ZFS_TYPE_DATASET;
/* check options */
while ((c = getopt(argc, argv, ":d:o:s:rt:Hp")) != -1) {
switch (c) {
case 'p':
cb.cb_literal = B_TRUE;
break;
case 'd':
limit = parse_depth(optarg, &flags);
break;
case 'r':
flags |= ZFS_ITER_RECURSE;
break;
case 'H':
cb.cb_scripted = B_TRUE;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case 'o':
/*
* Process the set of columns to display. We zero out
* the structure to give us a blank slate.
*/
bzero(&cb.cb_columns, sizeof (cb.cb_columns));
i = 0;
while (*optarg != '\0') {
static char *col_subopts[] =
{ "name", "property", "value", "received",
"source", "all", NULL };
if (i == ZFS_GET_NCOLS) {
(void) fprintf(stderr, gettext("too "
"many fields given to -o "
"option\n"));
usage(B_FALSE);
}
switch (getsubopt(&optarg, col_subopts,
&value)) {
case 0:
cb.cb_columns[i++] = GET_COL_NAME;
break;
case 1:
cb.cb_columns[i++] = GET_COL_PROPERTY;
break;
case 2:
cb.cb_columns[i++] = GET_COL_VALUE;
break;
case 3:
cb.cb_columns[i++] = GET_COL_RECVD;
flags |= ZFS_ITER_RECVD_PROPS;
break;
case 4:
cb.cb_columns[i++] = GET_COL_SOURCE;
break;
case 5:
if (i > 0) {
(void) fprintf(stderr,
gettext("\"all\" conflicts "
"with specific fields "
"given to -o option\n"));
usage(B_FALSE);
}
cb.cb_columns[0] = GET_COL_NAME;
cb.cb_columns[1] = GET_COL_PROPERTY;
cb.cb_columns[2] = GET_COL_VALUE;
cb.cb_columns[3] = GET_COL_RECVD;
cb.cb_columns[4] = GET_COL_SOURCE;
flags |= ZFS_ITER_RECVD_PROPS;
i = ZFS_GET_NCOLS;
break;
default:
(void) fprintf(stderr,
gettext("invalid column name "
"'%s'\n"), value);
usage(B_FALSE);
}
}
break;
case 's':
cb.cb_sources = 0;
while (*optarg != '\0') {
static char *source_subopts[] = {
"local", "default", "inherited",
"received", "temporary", "none",
NULL };
switch (getsubopt(&optarg, source_subopts,
&value)) {
case 0:
cb.cb_sources |= ZPROP_SRC_LOCAL;
break;
case 1:
cb.cb_sources |= ZPROP_SRC_DEFAULT;
break;
case 2:
cb.cb_sources |= ZPROP_SRC_INHERITED;
break;
case 3:
cb.cb_sources |= ZPROP_SRC_RECEIVED;
break;
case 4:
cb.cb_sources |= ZPROP_SRC_TEMPORARY;
break;
case 5:
cb.cb_sources |= ZPROP_SRC_NONE;
break;
default:
(void) fprintf(stderr,
gettext("invalid source "
"'%s'\n"), value);
usage(B_FALSE);
}
}
break;
case 't':
types = 0;
flags &= ~ZFS_ITER_PROP_LISTSNAPS;
while (*optarg != '\0') {
static char *type_subopts[] = { "filesystem",
"volume", "snapshot", "snap", "bookmark",
"all", NULL };
switch (getsubopt(&optarg, type_subopts,
&value)) {
case 0:
types |= ZFS_TYPE_FILESYSTEM;
break;
case 1:
types |= ZFS_TYPE_VOLUME;
break;
case 2:
case 3:
types |= ZFS_TYPE_SNAPSHOT;
break;
case 4:
types |= ZFS_TYPE_BOOKMARK;
break;
case 5:
types = ZFS_TYPE_DATASET |
ZFS_TYPE_BOOKMARK;
break;
default:
(void) fprintf(stderr,
gettext("invalid type '%s'\n"),
value);
usage(B_FALSE);
}
}
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing property "
"argument\n"));
usage(B_FALSE);
}
fields = argv[0];
/*
* Handle users who want to get all snapshots or bookmarks
* of a dataset (ex. 'zfs get -t snapshot refer <dataset>').
*/
if ((types == ZFS_TYPE_SNAPSHOT || types == ZFS_TYPE_BOOKMARK) &&
argc > 1 && (flags & ZFS_ITER_RECURSE) == 0 && limit == 0) {
flags |= (ZFS_ITER_DEPTH_LIMIT | ZFS_ITER_RECURSE);
limit = 1;
}
if (zprop_get_list(g_zfs, fields, &cb.cb_proplist, ZFS_TYPE_DATASET)
!= 0)
usage(B_FALSE);
argc--;
argv++;
/*
* As part of zfs_expand_proplist(), we keep track of the maximum column
* width for each property. For the 'NAME' (and 'SOURCE') columns, we
* need to know the maximum name length. However, the user likely did
* not specify 'name' as one of the properties to fetch, so we need to
* make sure we always include at least this property for
* print_get_headers() to work properly.
*/
if (cb.cb_proplist != NULL) {
fake_name.pl_prop = ZFS_PROP_NAME;
fake_name.pl_width = strlen(gettext("NAME"));
fake_name.pl_next = cb.cb_proplist;
cb.cb_proplist = &fake_name;
}
cb.cb_first = B_TRUE;
/* run for each object */
ret = zfs_for_each(argc, argv, flags, types, NULL,
&cb.cb_proplist, limit, get_callback, &cb);
if (cb.cb_proplist == &fake_name)
zprop_free_list(fake_name.pl_next);
else
zprop_free_list(cb.cb_proplist);
return (ret);
}
/*
* inherit [-rS] <property> <fs|vol> ...
*
* -r Recurse over all children
* -S Revert to received value, if any
*
* For each dataset specified on the command line, inherit the given property
* from its parent. Inheriting a property at the pool level will cause it to
* use the default value. The '-r' flag will recurse over all children, and is
* useful for setting a property on a hierarchy-wide basis, regardless of any
* local modifications for each dataset.
*/
typedef struct inherit_cbdata {
const char *cb_propname;
boolean_t cb_received;
} inherit_cbdata_t;
static int
inherit_recurse_cb(zfs_handle_t *zhp, void *data)
{
inherit_cbdata_t *cb = data;
zfs_prop_t prop = zfs_name_to_prop(cb->cb_propname);
/*
* If we're doing it recursively, then ignore properties that
* are not valid for this type of dataset.
*/
if (prop != ZPROP_INVAL &&
!zfs_prop_valid_for_type(prop, zfs_get_type(zhp), B_FALSE))
return (0);
return (zfs_prop_inherit(zhp, cb->cb_propname, cb->cb_received) != 0);
}
static int
inherit_cb(zfs_handle_t *zhp, void *data)
{
inherit_cbdata_t *cb = data;
return (zfs_prop_inherit(zhp, cb->cb_propname, cb->cb_received) != 0);
}
static int
zfs_do_inherit(int argc, char **argv)
{
int c;
zfs_prop_t prop;
inherit_cbdata_t cb = { 0 };
char *propname;
int ret = 0;
int flags = 0;
boolean_t received = B_FALSE;
/* check options */
while ((c = getopt(argc, argv, "rS")) != -1) {
switch (c) {
case 'r':
flags |= ZFS_ITER_RECURSE;
break;
case 'S':
received = B_TRUE;
break;
case '?':
default:
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing property argument\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing dataset argument\n"));
usage(B_FALSE);
}
propname = argv[0];
argc--;
argv++;
if ((prop = zfs_name_to_prop(propname)) != ZPROP_INVAL) {
if (zfs_prop_readonly(prop)) {
(void) fprintf(stderr, gettext(
"%s property is read-only\n"),
propname);
return (1);
}
if (!zfs_prop_inheritable(prop) && !received) {
(void) fprintf(stderr, gettext("'%s' property cannot "
"be inherited\n"), propname);
if (prop == ZFS_PROP_QUOTA ||
prop == ZFS_PROP_RESERVATION ||
prop == ZFS_PROP_REFQUOTA ||
prop == ZFS_PROP_REFRESERVATION) {
(void) fprintf(stderr, gettext("use 'zfs set "
"%s=none' to clear\n"), propname);
(void) fprintf(stderr, gettext("use 'zfs "
"inherit -S %s' to revert to received "
"value\n"), propname);
}
return (1);
}
if (received && (prop == ZFS_PROP_VOLSIZE ||
prop == ZFS_PROP_VERSION)) {
(void) fprintf(stderr, gettext("'%s' property cannot "
"be reverted to a received value\n"), propname);
return (1);
}
} else if (!zfs_prop_user(propname)) {
(void) fprintf(stderr, gettext("invalid property '%s'\n"),
propname);
usage(B_FALSE);
}
cb.cb_propname = propname;
cb.cb_received = received;
if (flags & ZFS_ITER_RECURSE) {
ret = zfs_for_each(argc, argv, flags, ZFS_TYPE_DATASET,
NULL, NULL, 0, inherit_recurse_cb, &cb);
} else {
ret = zfs_for_each(argc, argv, flags, ZFS_TYPE_DATASET,
NULL, NULL, 0, inherit_cb, &cb);
}
return (ret);
}
typedef struct upgrade_cbdata {
uint64_t cb_numupgraded;
uint64_t cb_numsamegraded;
uint64_t cb_numfailed;
uint64_t cb_version;
boolean_t cb_newer;
boolean_t cb_foundone;
char cb_lastfs[ZFS_MAX_DATASET_NAME_LEN];
} upgrade_cbdata_t;
static int
same_pool(zfs_handle_t *zhp, const char *name)
{
int len1 = strcspn(name, "/@");
const char *zhname = zfs_get_name(zhp);
int len2 = strcspn(zhname, "/@");
if (len1 != len2)
return (B_FALSE);
return (strncmp(name, zhname, len1) == 0);
}
static int
upgrade_list_callback(zfs_handle_t *zhp, void *data)
{
upgrade_cbdata_t *cb = data;
int version = zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
/* list if it's old/new */
if ((!cb->cb_newer && version < ZPL_VERSION) ||
(cb->cb_newer && version > ZPL_VERSION)) {
char *str;
if (cb->cb_newer) {
str = gettext("The following filesystems are "
"formatted using a newer software version and\n"
"cannot be accessed on the current system.\n\n");
} else {
str = gettext("The following filesystems are "
"out of date, and can be upgraded. After being\n"
"upgraded, these filesystems (and any 'zfs send' "
"streams generated from\n"
"subsequent snapshots) will no longer be "
"accessible by older software versions.\n\n");
}
if (!cb->cb_foundone) {
(void) puts(str);
(void) printf(gettext("VER FILESYSTEM\n"));
(void) printf(gettext("--- ------------\n"));
cb->cb_foundone = B_TRUE;
}
(void) printf("%2u %s\n", version, zfs_get_name(zhp));
}
return (0);
}
static int
upgrade_set_callback(zfs_handle_t *zhp, void *data)
{
upgrade_cbdata_t *cb = data;
int version = zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
int needed_spa_version;
int spa_version;
if (zfs_spa_version(zhp, &spa_version) < 0)
return (-1);
needed_spa_version = zfs_spa_version_map(cb->cb_version);
if (needed_spa_version < 0)
return (-1);
if (spa_version < needed_spa_version) {
/* can't upgrade */
(void) printf(gettext("%s: can not be "
"upgraded; the pool version needs to first "
"be upgraded\nto version %d\n\n"),
zfs_get_name(zhp), needed_spa_version);
cb->cb_numfailed++;
return (0);
}
/* upgrade */
if (version < cb->cb_version) {
char verstr[16];
(void) snprintf(verstr, sizeof (verstr),
"%llu", (u_longlong_t)cb->cb_version);
if (cb->cb_lastfs[0] && !same_pool(zhp, cb->cb_lastfs)) {
/*
* If they did "zfs upgrade -a", then we could
* be doing ioctls to different pools. We need
* to log this history once to each pool, and bypass
* the normal history logging that happens in main().
*/
(void) zpool_log_history(g_zfs, history_str);
log_history = B_FALSE;
}
if (zfs_prop_set(zhp, "version", verstr) == 0)
cb->cb_numupgraded++;
else
cb->cb_numfailed++;
(void) strcpy(cb->cb_lastfs, zfs_get_name(zhp));
} else if (version > cb->cb_version) {
/* can't downgrade */
(void) printf(gettext("%s: can not be downgraded; "
"it is already at version %u\n"),
zfs_get_name(zhp), version);
cb->cb_numfailed++;
} else {
cb->cb_numsamegraded++;
}
return (0);
}
/*
* zfs upgrade
* zfs upgrade -v
* zfs upgrade [-r] [-V <version>] <-a | filesystem>
*/
static int
zfs_do_upgrade(int argc, char **argv)
{
boolean_t all = B_FALSE;
boolean_t showversions = B_FALSE;
int ret = 0;
upgrade_cbdata_t cb = { 0 };
int c;
int flags = ZFS_ITER_ARGS_CAN_BE_PATHS;
/* check options */
while ((c = getopt(argc, argv, "rvV:a")) != -1) {
switch (c) {
case 'r':
flags |= ZFS_ITER_RECURSE;
break;
case 'v':
showversions = B_TRUE;
break;
case 'V':
if (zfs_prop_string_to_index(ZFS_PROP_VERSION,
optarg, &cb.cb_version) != 0) {
(void) fprintf(stderr,
gettext("invalid version %s\n"), optarg);
usage(B_FALSE);
}
break;
case 'a':
all = B_TRUE;
break;
case '?':
default:
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if ((!all && !argc) && ((flags & ZFS_ITER_RECURSE) | cb.cb_version))
usage(B_FALSE);
if (showversions && (flags & ZFS_ITER_RECURSE || all ||
cb.cb_version || argc))
usage(B_FALSE);
if ((all || argc) && (showversions))
usage(B_FALSE);
if (all && argc)
usage(B_FALSE);
if (showversions) {
/* Show info on available versions. */
(void) printf(gettext("The following filesystem versions are "
"supported:\n\n"));
(void) printf(gettext("VER DESCRIPTION\n"));
(void) printf("--- -----------------------------------------"
"---------------\n");
(void) printf(gettext(" 1 Initial ZFS filesystem version\n"));
(void) printf(gettext(" 2 Enhanced directory entries\n"));
(void) printf(gettext(" 3 Case insensitive and filesystem "
"user identifier (FUID)\n"));
(void) printf(gettext(" 4 userquota, groupquota "
"properties\n"));
(void) printf(gettext(" 5 System attributes\n"));
(void) printf(gettext("\nFor more information on a particular "
"version, including supported releases,\n"));
(void) printf("see the ZFS Administration Guide.\n\n");
ret = 0;
} else if (argc || all) {
/* Upgrade filesystems */
if (cb.cb_version == 0)
cb.cb_version = ZPL_VERSION;
ret = zfs_for_each(argc, argv, flags, ZFS_TYPE_FILESYSTEM,
NULL, NULL, 0, upgrade_set_callback, &cb);
(void) printf(gettext("%llu filesystems upgraded\n"),
(u_longlong_t)cb.cb_numupgraded);
if (cb.cb_numsamegraded) {
(void) printf(gettext("%llu filesystems already at "
"this version\n"),
(u_longlong_t)cb.cb_numsamegraded);
}
if (cb.cb_numfailed != 0)
ret = 1;
} else {
/* List old-version filesystems */
boolean_t found;
(void) printf(gettext("This system is currently running "
"ZFS filesystem version %llu.\n\n"), ZPL_VERSION);
flags |= ZFS_ITER_RECURSE;
ret = zfs_for_each(0, NULL, flags, ZFS_TYPE_FILESYSTEM,
NULL, NULL, 0, upgrade_list_callback, &cb);
found = cb.cb_foundone;
cb.cb_foundone = B_FALSE;
cb.cb_newer = B_TRUE;
ret = zfs_for_each(0, NULL, flags, ZFS_TYPE_FILESYSTEM,
NULL, NULL, 0, upgrade_list_callback, &cb);
if (!cb.cb_foundone && !found) {
(void) printf(gettext("All filesystems are "
"formatted with the current version.\n"));
}
}
return (ret);
}
/*
* zfs userspace [-Hinp] [-o field[,...]] [-s field [-s field]...]
* [-S field [-S field]...] [-t type[,...]]
* filesystem | snapshot | path
* zfs groupspace [-Hinp] [-o field[,...]] [-s field [-s field]...]
* [-S field [-S field]...] [-t type[,...]]
* filesystem | snapshot | path
* zfs projectspace [-Hp] [-o field[,...]] [-s field [-s field]...]
* [-S field [-S field]...] filesystem | snapshot | path
*
* -H Scripted mode; elide headers and separate columns by tabs.
* -i Translate SID to POSIX ID.
* -n Print numeric ID instead of user/group name.
* -o Control which fields to display.
* -p Use exact (parsable) numeric output.
* -s Specify sort columns, descending order.
* -S Specify sort columns, ascending order.
* -t Control which object types to display.
*
* Displays space consumed by, and quotas on, each user in the specified
* filesystem or snapshot.
*/
/* us_field_types, us_field_hdr and us_field_names should be kept in sync */
enum us_field_types {
USFIELD_TYPE,
USFIELD_NAME,
USFIELD_USED,
USFIELD_QUOTA,
USFIELD_OBJUSED,
USFIELD_OBJQUOTA
};
static char *us_field_hdr[] = { "TYPE", "NAME", "USED", "QUOTA",
"OBJUSED", "OBJQUOTA" };
static char *us_field_names[] = { "type", "name", "used", "quota",
"objused", "objquota" };
#define USFIELD_LAST (sizeof (us_field_names) / sizeof (char *))
#define USTYPE_PSX_GRP (1 << 0)
#define USTYPE_PSX_USR (1 << 1)
#define USTYPE_SMB_GRP (1 << 2)
#define USTYPE_SMB_USR (1 << 3)
#define USTYPE_PROJ (1 << 4)
#define USTYPE_ALL \
(USTYPE_PSX_GRP | USTYPE_PSX_USR | USTYPE_SMB_GRP | USTYPE_SMB_USR | \
USTYPE_PROJ)
static int us_type_bits[] = {
USTYPE_PSX_GRP,
USTYPE_PSX_USR,
USTYPE_SMB_GRP,
USTYPE_SMB_USR,
USTYPE_ALL
};
static char *us_type_names[] = { "posixgroup", "posixuser", "smbgroup",
"smbuser", "all" };
typedef struct us_node {
nvlist_t *usn_nvl;
uu_avl_node_t usn_avlnode;
uu_list_node_t usn_listnode;
} us_node_t;
typedef struct us_cbdata {
nvlist_t **cb_nvlp;
uu_avl_pool_t *cb_avl_pool;
uu_avl_t *cb_avl;
boolean_t cb_numname;
boolean_t cb_nicenum;
boolean_t cb_sid2posix;
zfs_userquota_prop_t cb_prop;
zfs_sort_column_t *cb_sortcol;
size_t cb_width[USFIELD_LAST];
} us_cbdata_t;
static boolean_t us_populated = B_FALSE;
typedef struct {
zfs_sort_column_t *si_sortcol;
boolean_t si_numname;
} us_sort_info_t;
static int
us_field_index(char *field)
{
int i;
for (i = 0; i < USFIELD_LAST; i++) {
if (strcmp(field, us_field_names[i]) == 0)
return (i);
}
return (-1);
}
static int
us_compare(const void *larg, const void *rarg, void *unused)
{
const us_node_t *l = larg;
const us_node_t *r = rarg;
us_sort_info_t *si = (us_sort_info_t *)unused;
zfs_sort_column_t *sortcol = si->si_sortcol;
boolean_t numname = si->si_numname;
nvlist_t *lnvl = l->usn_nvl;
nvlist_t *rnvl = r->usn_nvl;
int rc = 0;
boolean_t lvb, rvb;
for (; sortcol != NULL; sortcol = sortcol->sc_next) {
char *lvstr = "";
char *rvstr = "";
uint32_t lv32 = 0;
uint32_t rv32 = 0;
uint64_t lv64 = 0;
uint64_t rv64 = 0;
zfs_prop_t prop = sortcol->sc_prop;
const char *propname = NULL;
boolean_t reverse = sortcol->sc_reverse;
switch (prop) {
case ZFS_PROP_TYPE:
propname = "type";
(void) nvlist_lookup_uint32(lnvl, propname, &lv32);
(void) nvlist_lookup_uint32(rnvl, propname, &rv32);
if (rv32 != lv32)
rc = (rv32 < lv32) ? 1 : -1;
break;
case ZFS_PROP_NAME:
propname = "name";
if (numname) {
compare_nums:
(void) nvlist_lookup_uint64(lnvl, propname,
&lv64);
(void) nvlist_lookup_uint64(rnvl, propname,
&rv64);
if (rv64 != lv64)
rc = (rv64 < lv64) ? 1 : -1;
} else {
if ((nvlist_lookup_string(lnvl, propname,
&lvstr) == ENOENT) ||
(nvlist_lookup_string(rnvl, propname,
&rvstr) == ENOENT)) {
goto compare_nums;
}
rc = strcmp(lvstr, rvstr);
}
break;
case ZFS_PROP_USED:
case ZFS_PROP_QUOTA:
if (!us_populated)
break;
if (prop == ZFS_PROP_USED)
propname = "used";
else
propname = "quota";
(void) nvlist_lookup_uint64(lnvl, propname, &lv64);
(void) nvlist_lookup_uint64(rnvl, propname, &rv64);
if (rv64 != lv64)
rc = (rv64 < lv64) ? 1 : -1;
break;
default:
break;
}
if (rc != 0) {
if (rc < 0)
return (reverse ? 1 : -1);
else
return (reverse ? -1 : 1);
}
}
/*
* If entries still seem to be the same, check if they are of the same
* type (smbentity is added only if we are doing SID to POSIX ID
* translation where we can have duplicate type/name combinations).
*/
if (nvlist_lookup_boolean_value(lnvl, "smbentity", &lvb) == 0 &&
nvlist_lookup_boolean_value(rnvl, "smbentity", &rvb) == 0 &&
lvb != rvb)
return (lvb < rvb ? -1 : 1);
return (0);
}
static boolean_t
zfs_prop_is_user(unsigned p)
{
return (p == ZFS_PROP_USERUSED || p == ZFS_PROP_USERQUOTA ||
p == ZFS_PROP_USEROBJUSED || p == ZFS_PROP_USEROBJQUOTA);
}
static boolean_t
zfs_prop_is_group(unsigned p)
{
return (p == ZFS_PROP_GROUPUSED || p == ZFS_PROP_GROUPQUOTA ||
p == ZFS_PROP_GROUPOBJUSED || p == ZFS_PROP_GROUPOBJQUOTA);
}
static boolean_t
zfs_prop_is_project(unsigned p)
{
return (p == ZFS_PROP_PROJECTUSED || p == ZFS_PROP_PROJECTQUOTA ||
p == ZFS_PROP_PROJECTOBJUSED || p == ZFS_PROP_PROJECTOBJQUOTA);
}
static inline const char *
us_type2str(unsigned field_type)
{
switch (field_type) {
case USTYPE_PSX_USR:
return ("POSIX User");
case USTYPE_PSX_GRP:
return ("POSIX Group");
case USTYPE_SMB_USR:
return ("SMB User");
case USTYPE_SMB_GRP:
return ("SMB Group");
case USTYPE_PROJ:
return ("Project");
default:
return ("Undefined");
}
}
static int
userspace_cb(void *arg, const char *domain, uid_t rid, uint64_t space)
{
us_cbdata_t *cb = (us_cbdata_t *)arg;
zfs_userquota_prop_t prop = cb->cb_prop;
char *name = NULL;
char *propname;
char sizebuf[32];
us_node_t *node;
uu_avl_pool_t *avl_pool = cb->cb_avl_pool;
uu_avl_t *avl = cb->cb_avl;
uu_avl_index_t idx;
nvlist_t *props;
us_node_t *n;
zfs_sort_column_t *sortcol = cb->cb_sortcol;
unsigned type = 0;
const char *typestr;
size_t namelen;
size_t typelen;
size_t sizelen;
int typeidx, nameidx, sizeidx;
us_sort_info_t sortinfo = { sortcol, cb->cb_numname };
boolean_t smbentity = B_FALSE;
if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
nomem();
node = safe_malloc(sizeof (us_node_t));
uu_avl_node_init(node, &node->usn_avlnode, avl_pool);
node->usn_nvl = props;
if (domain != NULL && domain[0] != '\0') {
#ifdef HAVE_IDMAP
/* SMB */
char sid[MAXNAMELEN + 32];
uid_t id;
uint64_t classes;
int err;
directory_error_t e;
smbentity = B_TRUE;
(void) snprintf(sid, sizeof (sid), "%s-%u", domain, rid);
if (prop == ZFS_PROP_GROUPUSED || prop == ZFS_PROP_GROUPQUOTA) {
type = USTYPE_SMB_GRP;
err = sid_to_id(sid, B_FALSE, &id);
} else {
type = USTYPE_SMB_USR;
err = sid_to_id(sid, B_TRUE, &id);
}
if (err == 0) {
rid = id;
if (!cb->cb_sid2posix) {
e = directory_name_from_sid(NULL, sid, &name,
&classes);
if (e != NULL)
directory_error_free(e);
if (name == NULL)
name = sid;
}
}
#else
nvlist_free(props);
free(node);
return (-1);
#endif /* HAVE_IDMAP */
}
if (cb->cb_sid2posix || domain == NULL || domain[0] == '\0') {
/* POSIX or -i */
if (zfs_prop_is_group(prop)) {
type = USTYPE_PSX_GRP;
if (!cb->cb_numname) {
struct group *g;
if ((g = getgrgid(rid)) != NULL)
name = g->gr_name;
}
} else if (zfs_prop_is_user(prop)) {
type = USTYPE_PSX_USR;
if (!cb->cb_numname) {
struct passwd *p;
if ((p = getpwuid(rid)) != NULL)
name = p->pw_name;
}
} else {
type = USTYPE_PROJ;
}
}
/*
* Make sure that the type/name combination is unique when doing
* SID to POSIX ID translation (hence changing the type from SMB to
* POSIX).
*/
if (cb->cb_sid2posix &&
nvlist_add_boolean_value(props, "smbentity", smbentity) != 0)
nomem();
/* Calculate/update width of TYPE field */
typestr = us_type2str(type);
typelen = strlen(gettext(typestr));
typeidx = us_field_index("type");
if (typelen > cb->cb_width[typeidx])
cb->cb_width[typeidx] = typelen;
if (nvlist_add_uint32(props, "type", type) != 0)
nomem();
/* Calculate/update width of NAME field */
if ((cb->cb_numname && cb->cb_sid2posix) || name == NULL) {
if (nvlist_add_uint64(props, "name", rid) != 0)
nomem();
namelen = snprintf(NULL, 0, "%u", rid);
} else {
if (nvlist_add_string(props, "name", name) != 0)
nomem();
namelen = strlen(name);
}
nameidx = us_field_index("name");
if (nameidx >= 0 && namelen > cb->cb_width[nameidx])
cb->cb_width[nameidx] = namelen;
/*
* Check if this type/name combination is in the list and update it;
* otherwise add new node to the list.
*/
if ((n = uu_avl_find(avl, node, &sortinfo, &idx)) == NULL) {
uu_avl_insert(avl, node, idx);
} else {
nvlist_free(props);
free(node);
node = n;
props = node->usn_nvl;
}
/* Calculate/update width of USED/QUOTA fields */
if (cb->cb_nicenum) {
if (prop == ZFS_PROP_USERUSED || prop == ZFS_PROP_GROUPUSED ||
prop == ZFS_PROP_USERQUOTA || prop == ZFS_PROP_GROUPQUOTA ||
prop == ZFS_PROP_PROJECTUSED ||
prop == ZFS_PROP_PROJECTQUOTA) {
zfs_nicebytes(space, sizebuf, sizeof (sizebuf));
} else {
zfs_nicenum(space, sizebuf, sizeof (sizebuf));
}
} else {
(void) snprintf(sizebuf, sizeof (sizebuf), "%llu",
(u_longlong_t)space);
}
sizelen = strlen(sizebuf);
if (prop == ZFS_PROP_USERUSED || prop == ZFS_PROP_GROUPUSED ||
prop == ZFS_PROP_PROJECTUSED) {
propname = "used";
if (!nvlist_exists(props, "quota"))
(void) nvlist_add_uint64(props, "quota", 0);
} else if (prop == ZFS_PROP_USERQUOTA || prop == ZFS_PROP_GROUPQUOTA ||
prop == ZFS_PROP_PROJECTQUOTA) {
propname = "quota";
if (!nvlist_exists(props, "used"))
(void) nvlist_add_uint64(props, "used", 0);
} else if (prop == ZFS_PROP_USEROBJUSED ||
prop == ZFS_PROP_GROUPOBJUSED || prop == ZFS_PROP_PROJECTOBJUSED) {
propname = "objused";
if (!nvlist_exists(props, "objquota"))
(void) nvlist_add_uint64(props, "objquota", 0);
} else if (prop == ZFS_PROP_USEROBJQUOTA ||
prop == ZFS_PROP_GROUPOBJQUOTA ||
prop == ZFS_PROP_PROJECTOBJQUOTA) {
propname = "objquota";
if (!nvlist_exists(props, "objused"))
(void) nvlist_add_uint64(props, "objused", 0);
} else {
return (-1);
}
sizeidx = us_field_index(propname);
if (sizeidx >= 0 && sizelen > cb->cb_width[sizeidx])
cb->cb_width[sizeidx] = sizelen;
if (nvlist_add_uint64(props, propname, space) != 0)
nomem();
return (0);
}
static void
print_us_node(boolean_t scripted, boolean_t parsable, int *fields, int types,
size_t *width, us_node_t *node)
{
nvlist_t *nvl = node->usn_nvl;
char valstr[MAXNAMELEN];
boolean_t first = B_TRUE;
int cfield = 0;
int field;
uint32_t ustype;
/* Check type */
(void) nvlist_lookup_uint32(nvl, "type", &ustype);
if (!(ustype & types))
return;
while ((field = fields[cfield]) != USFIELD_LAST) {
nvpair_t *nvp = NULL;
data_type_t type;
uint32_t val32;
uint64_t val64;
char *strval = "-";
while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
if (strcmp(nvpair_name(nvp),
us_field_names[field]) == 0)
break;
}
type = nvp == NULL ? DATA_TYPE_UNKNOWN : nvpair_type(nvp);
switch (type) {
case DATA_TYPE_UINT32:
(void) nvpair_value_uint32(nvp, &val32);
break;
case DATA_TYPE_UINT64:
(void) nvpair_value_uint64(nvp, &val64);
break;
case DATA_TYPE_STRING:
(void) nvpair_value_string(nvp, &strval);
break;
case DATA_TYPE_UNKNOWN:
break;
default:
(void) fprintf(stderr, "invalid data type\n");
}
switch (field) {
case USFIELD_TYPE:
if (type == DATA_TYPE_UINT32)
strval = (char *)us_type2str(val32);
break;
case USFIELD_NAME:
if (type == DATA_TYPE_UINT64) {
(void) sprintf(valstr, "%llu",
(u_longlong_t)val64);
strval = valstr;
}
break;
case USFIELD_USED:
case USFIELD_QUOTA:
if (type == DATA_TYPE_UINT64) {
if (parsable) {
(void) sprintf(valstr, "%llu",
(u_longlong_t)val64);
strval = valstr;
} else if (field == USFIELD_QUOTA &&
val64 == 0) {
strval = "none";
} else {
zfs_nicebytes(val64, valstr,
sizeof (valstr));
strval = valstr;
}
}
break;
case USFIELD_OBJUSED:
case USFIELD_OBJQUOTA:
if (type == DATA_TYPE_UINT64) {
if (parsable) {
(void) sprintf(valstr, "%llu",
(u_longlong_t)val64);
strval = valstr;
} else if (field == USFIELD_OBJQUOTA &&
val64 == 0) {
strval = "none";
} else {
zfs_nicenum(val64, valstr,
sizeof (valstr));
strval = valstr;
}
}
break;
}
if (!first) {
if (scripted)
(void) printf("\t");
else
(void) printf(" ");
}
if (scripted)
(void) printf("%s", strval);
else if (field == USFIELD_TYPE || field == USFIELD_NAME)
(void) printf("%-*s", (int)width[field], strval);
else
(void) printf("%*s", (int)width[field], strval);
first = B_FALSE;
cfield++;
}
(void) printf("\n");
}
static void
print_us(boolean_t scripted, boolean_t parsable, int *fields, int types,
size_t *width, boolean_t rmnode, uu_avl_t *avl)
{
us_node_t *node;
const char *col;
int cfield = 0;
int field;
if (!scripted) {
boolean_t first = B_TRUE;
while ((field = fields[cfield]) != USFIELD_LAST) {
col = gettext(us_field_hdr[field]);
if (field == USFIELD_TYPE || field == USFIELD_NAME) {
(void) printf(first ? "%-*s" : " %-*s",
(int)width[field], col);
} else {
(void) printf(first ? "%*s" : " %*s",
(int)width[field], col);
}
first = B_FALSE;
cfield++;
}
(void) printf("\n");
}
for (node = uu_avl_first(avl); node; node = uu_avl_next(avl, node)) {
print_us_node(scripted, parsable, fields, types, width, node);
if (rmnode)
nvlist_free(node->usn_nvl);
}
}
static int
zfs_do_userspace(int argc, char **argv)
{
zfs_handle_t *zhp;
zfs_userquota_prop_t p;
uu_avl_pool_t *avl_pool;
uu_avl_t *avl_tree;
uu_avl_walk_t *walk;
char *delim;
char deffields[] = "type,name,used,quota,objused,objquota";
char *ofield = NULL;
char *tfield = NULL;
int cfield = 0;
int fields[256];
int i;
boolean_t scripted = B_FALSE;
boolean_t prtnum = B_FALSE;
boolean_t parsable = B_FALSE;
boolean_t sid2posix = B_FALSE;
int ret = 0;
int c;
zfs_sort_column_t *sortcol = NULL;
int types = USTYPE_PSX_USR | USTYPE_SMB_USR;
us_cbdata_t cb;
us_node_t *node;
us_node_t *rmnode;
uu_list_pool_t *listpool;
uu_list_t *list;
uu_avl_index_t idx = 0;
uu_list_index_t idx2 = 0;
if (argc < 2)
usage(B_FALSE);
if (strcmp(argv[0], "groupspace") == 0) {
/* Toggle default group types */
types = USTYPE_PSX_GRP | USTYPE_SMB_GRP;
} else if (strcmp(argv[0], "projectspace") == 0) {
types = USTYPE_PROJ;
prtnum = B_TRUE;
}
while ((c = getopt(argc, argv, "nHpo:s:S:t:i")) != -1) {
switch (c) {
case 'n':
if (types == USTYPE_PROJ) {
(void) fprintf(stderr,
gettext("invalid option 'n'\n"));
usage(B_FALSE);
}
prtnum = B_TRUE;
break;
case 'H':
scripted = B_TRUE;
break;
case 'p':
parsable = B_TRUE;
break;
case 'o':
ofield = optarg;
break;
case 's':
case 'S':
if (zfs_add_sort_column(&sortcol, optarg,
c == 's' ? B_FALSE : B_TRUE) != 0) {
(void) fprintf(stderr,
gettext("invalid field '%s'\n"), optarg);
usage(B_FALSE);
}
break;
case 't':
if (types == USTYPE_PROJ) {
(void) fprintf(stderr,
gettext("invalid option 't'\n"));
usage(B_FALSE);
}
tfield = optarg;
break;
case 'i':
if (types == USTYPE_PROJ) {
(void) fprintf(stderr,
gettext("invalid option 'i'\n"));
usage(B_FALSE);
}
sid2posix = B_TRUE;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing dataset name\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
/* Use default output fields if not specified using -o */
if (ofield == NULL)
ofield = deffields;
do {
if ((delim = strchr(ofield, ',')) != NULL)
*delim = '\0';
if ((fields[cfield++] = us_field_index(ofield)) == -1) {
(void) fprintf(stderr, gettext("invalid type '%s' "
"for -o option\n"), ofield);
return (-1);
}
if (delim != NULL)
ofield = delim + 1;
} while (delim != NULL);
fields[cfield] = USFIELD_LAST;
/* Override output types (-t option) */
if (tfield != NULL) {
types = 0;
do {
boolean_t found = B_FALSE;
if ((delim = strchr(tfield, ',')) != NULL)
*delim = '\0';
for (i = 0; i < sizeof (us_type_bits) / sizeof (int);
i++) {
if (strcmp(tfield, us_type_names[i]) == 0) {
found = B_TRUE;
types |= us_type_bits[i];
break;
}
}
if (!found) {
(void) fprintf(stderr, gettext("invalid type "
"'%s' for -t option\n"), tfield);
return (-1);
}
if (delim != NULL)
tfield = delim + 1;
} while (delim != NULL);
}
if ((zhp = zfs_path_to_zhandle(g_zfs, argv[0], ZFS_TYPE_FILESYSTEM |
ZFS_TYPE_SNAPSHOT)) == NULL)
return (1);
if (zfs_get_underlying_type(zhp) != ZFS_TYPE_FILESYSTEM) {
(void) fprintf(stderr, gettext("operation is only applicable "
"to filesystems and their snapshots\n"));
zfs_close(zhp);
return (1);
}
if ((avl_pool = uu_avl_pool_create("us_avl_pool", sizeof (us_node_t),
offsetof(us_node_t, usn_avlnode), us_compare, UU_DEFAULT)) == NULL)
nomem();
if ((avl_tree = uu_avl_create(avl_pool, NULL, UU_DEFAULT)) == NULL)
nomem();
/* Always add default sorting columns */
(void) zfs_add_sort_column(&sortcol, "type", B_FALSE);
(void) zfs_add_sort_column(&sortcol, "name", B_FALSE);
cb.cb_sortcol = sortcol;
cb.cb_numname = prtnum;
cb.cb_nicenum = !parsable;
cb.cb_avl_pool = avl_pool;
cb.cb_avl = avl_tree;
cb.cb_sid2posix = sid2posix;
for (i = 0; i < USFIELD_LAST; i++)
cb.cb_width[i] = strlen(gettext(us_field_hdr[i]));
for (p = 0; p < ZFS_NUM_USERQUOTA_PROPS; p++) {
if ((zfs_prop_is_user(p) &&
!(types & (USTYPE_PSX_USR | USTYPE_SMB_USR))) ||
(zfs_prop_is_group(p) &&
!(types & (USTYPE_PSX_GRP | USTYPE_SMB_GRP))) ||
(zfs_prop_is_project(p) && types != USTYPE_PROJ))
continue;
cb.cb_prop = p;
if ((ret = zfs_userspace(zhp, p, userspace_cb, &cb)) != 0) {
zfs_close(zhp);
return (ret);
}
}
zfs_close(zhp);
/* Sort the list */
if ((node = uu_avl_first(avl_tree)) == NULL)
return (0);
us_populated = B_TRUE;
listpool = uu_list_pool_create("tmplist", sizeof (us_node_t),
offsetof(us_node_t, usn_listnode), NULL, UU_DEFAULT);
list = uu_list_create(listpool, NULL, UU_DEFAULT);
uu_list_node_init(node, &node->usn_listnode, listpool);
while (node != NULL) {
rmnode = node;
node = uu_avl_next(avl_tree, node);
uu_avl_remove(avl_tree, rmnode);
if (uu_list_find(list, rmnode, NULL, &idx2) == NULL)
uu_list_insert(list, rmnode, idx2);
}
for (node = uu_list_first(list); node != NULL;
node = uu_list_next(list, node)) {
us_sort_info_t sortinfo = { sortcol, cb.cb_numname };
if (uu_avl_find(avl_tree, node, &sortinfo, &idx) == NULL)
uu_avl_insert(avl_tree, node, idx);
}
uu_list_destroy(list);
uu_list_pool_destroy(listpool);
/* Print and free node nvlist memory */
print_us(scripted, parsable, fields, types, cb.cb_width, B_TRUE,
cb.cb_avl);
zfs_free_sort_columns(sortcol);
/* Clean up the AVL tree */
if ((walk = uu_avl_walk_start(cb.cb_avl, UU_WALK_ROBUST)) == NULL)
nomem();
while ((node = uu_avl_walk_next(walk)) != NULL) {
uu_avl_remove(cb.cb_avl, node);
free(node);
}
uu_avl_walk_end(walk);
uu_avl_destroy(avl_tree);
uu_avl_pool_destroy(avl_pool);
return (ret);
}
/*
* list [-Hp][-r|-d max] [-o property[,...]] [-s property] ... [-S property]
* [-t type[,...]] [filesystem|volume|snapshot] ...
*
* -H Scripted mode; elide headers and separate columns by tabs
* -p Display values in parsable (literal) format.
* -r Recurse over all children
* -d Limit recursion by depth.
* -o Control which fields to display.
* -s Specify sort columns, descending order.
* -S Specify sort columns, ascending order.
* -t Control which object types to display.
*
* When given no arguments, list all filesystems in the system.
* Otherwise, list the specified datasets, optionally recursing down them if
* '-r' is specified.
*/
typedef struct list_cbdata {
boolean_t cb_first;
boolean_t cb_literal;
boolean_t cb_scripted;
zprop_list_t *cb_proplist;
} list_cbdata_t;
/*
* Given a list of columns to display, output appropriate headers for each one.
*/
static void
print_header(list_cbdata_t *cb)
{
zprop_list_t *pl = cb->cb_proplist;
char headerbuf[ZFS_MAXPROPLEN];
const char *header;
int i;
boolean_t first = B_TRUE;
boolean_t right_justify;
for (; pl != NULL; pl = pl->pl_next) {
if (!first) {
(void) printf(" ");
} else {
first = B_FALSE;
}
right_justify = B_FALSE;
if (pl->pl_prop != ZPROP_INVAL) {
header = zfs_prop_column_name(pl->pl_prop);
right_justify = zfs_prop_align_right(pl->pl_prop);
} else {
for (i = 0; pl->pl_user_prop[i] != '\0'; i++)
headerbuf[i] = toupper(pl->pl_user_prop[i]);
headerbuf[i] = '\0';
header = headerbuf;
}
if (pl->pl_next == NULL && !right_justify)
(void) printf("%s", header);
else if (right_justify)
(void) printf("%*s", (int)pl->pl_width, header);
else
(void) printf("%-*s", (int)pl->pl_width, header);
}
(void) printf("\n");
}
/*
* Given a dataset and a list of fields, print out all the properties according
* to the described layout.
*/
static void
print_dataset(zfs_handle_t *zhp, list_cbdata_t *cb)
{
zprop_list_t *pl = cb->cb_proplist;
boolean_t first = B_TRUE;
char property[ZFS_MAXPROPLEN];
nvlist_t *userprops = zfs_get_user_props(zhp);
nvlist_t *propval;
char *propstr;
boolean_t right_justify;
for (; pl != NULL; pl = pl->pl_next) {
if (!first) {
if (cb->cb_scripted)
(void) printf("\t");
else
(void) printf(" ");
} else {
first = B_FALSE;
}
if (pl->pl_prop == ZFS_PROP_NAME) {
(void) strlcpy(property, zfs_get_name(zhp),
sizeof (property));
propstr = property;
right_justify = zfs_prop_align_right(pl->pl_prop);
} else if (pl->pl_prop != ZPROP_INVAL) {
if (zfs_prop_get(zhp, pl->pl_prop, property,
sizeof (property), NULL, NULL, 0,
cb->cb_literal) != 0)
propstr = "-";
else
propstr = property;
right_justify = zfs_prop_align_right(pl->pl_prop);
} else if (zfs_prop_userquota(pl->pl_user_prop)) {
if (zfs_prop_get_userquota(zhp, pl->pl_user_prop,
property, sizeof (property), cb->cb_literal) != 0)
propstr = "-";
else
propstr = property;
right_justify = B_TRUE;
} else if (zfs_prop_written(pl->pl_user_prop)) {
if (zfs_prop_get_written(zhp, pl->pl_user_prop,
property, sizeof (property), cb->cb_literal) != 0)
propstr = "-";
else
propstr = property;
right_justify = B_TRUE;
} else {
if (nvlist_lookup_nvlist(userprops,
pl->pl_user_prop, &propval) != 0)
propstr = "-";
else
verify(nvlist_lookup_string(propval,
ZPROP_VALUE, &propstr) == 0);
right_justify = B_FALSE;
}
/*
* If this is being called in scripted mode, or if this is the
* last column and it is left-justified, don't include a width
* format specifier.
*/
if (cb->cb_scripted || (pl->pl_next == NULL && !right_justify))
(void) printf("%s", propstr);
else if (right_justify)
(void) printf("%*s", (int)pl->pl_width, propstr);
else
(void) printf("%-*s", (int)pl->pl_width, propstr);
}
(void) printf("\n");
}
/*
* Generic callback function to list a dataset or snapshot.
*/
static int
list_callback(zfs_handle_t *zhp, void *data)
{
list_cbdata_t *cbp = data;
if (cbp->cb_first) {
if (!cbp->cb_scripted)
print_header(cbp);
cbp->cb_first = B_FALSE;
}
print_dataset(zhp, cbp);
return (0);
}
static int
zfs_do_list(int argc, char **argv)
{
int c;
static char default_fields[] =
"name,used,available,referenced,mountpoint";
int types = ZFS_TYPE_DATASET;
boolean_t types_specified = B_FALSE;
char *fields = NULL;
list_cbdata_t cb = { 0 };
char *value;
int limit = 0;
int ret = 0;
zfs_sort_column_t *sortcol = NULL;
int flags = ZFS_ITER_PROP_LISTSNAPS | ZFS_ITER_ARGS_CAN_BE_PATHS;
/* check options */
while ((c = getopt(argc, argv, "HS:d:o:prs:t:")) != -1) {
switch (c) {
case 'o':
fields = optarg;
break;
case 'p':
cb.cb_literal = B_TRUE;
flags |= ZFS_ITER_LITERAL_PROPS;
break;
case 'd':
limit = parse_depth(optarg, &flags);
break;
case 'r':
flags |= ZFS_ITER_RECURSE;
break;
case 'H':
cb.cb_scripted = B_TRUE;
break;
case 's':
if (zfs_add_sort_column(&sortcol, optarg,
B_FALSE) != 0) {
(void) fprintf(stderr,
gettext("invalid property '%s'\n"), optarg);
usage(B_FALSE);
}
break;
case 'S':
if (zfs_add_sort_column(&sortcol, optarg,
B_TRUE) != 0) {
(void) fprintf(stderr,
gettext("invalid property '%s'\n"), optarg);
usage(B_FALSE);
}
break;
case 't':
types = 0;
types_specified = B_TRUE;
flags &= ~ZFS_ITER_PROP_LISTSNAPS;
while (*optarg != '\0') {
static char *type_subopts[] = { "filesystem",
"volume", "snapshot", "snap", "bookmark",
"all", NULL };
switch (getsubopt(&optarg, type_subopts,
&value)) {
case 0:
types |= ZFS_TYPE_FILESYSTEM;
break;
case 1:
types |= ZFS_TYPE_VOLUME;
break;
case 2:
case 3:
types |= ZFS_TYPE_SNAPSHOT;
break;
case 4:
types |= ZFS_TYPE_BOOKMARK;
break;
case 5:
types = ZFS_TYPE_DATASET |
ZFS_TYPE_BOOKMARK;
break;
default:
(void) fprintf(stderr,
gettext("invalid type '%s'\n"),
value);
usage(B_FALSE);
}
}
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (fields == NULL)
fields = default_fields;
/*
* If we are only going to list snapshot names and sort by name,
* then we can use faster version.
*/
if (strcmp(fields, "name") == 0 && zfs_sort_only_by_name(sortcol))
flags |= ZFS_ITER_SIMPLE;
/*
* If "-o space" and no types were specified, don't display snapshots.
*/
if (strcmp(fields, "space") == 0 && types_specified == B_FALSE)
types &= ~ZFS_TYPE_SNAPSHOT;
/*
* Handle users who want to list all snapshots or bookmarks
* of the current dataset (ex. 'zfs list -t snapshot <dataset>').
*/
if ((types == ZFS_TYPE_SNAPSHOT || types == ZFS_TYPE_BOOKMARK) &&
argc > 0 && (flags & ZFS_ITER_RECURSE) == 0 && limit == 0) {
flags |= (ZFS_ITER_DEPTH_LIMIT | ZFS_ITER_RECURSE);
limit = 1;
}
/*
* If the user specifies '-o all', the zprop_get_list() doesn't
* normally include the name of the dataset. For 'zfs list', we always
* want this property to be first.
*/
if (zprop_get_list(g_zfs, fields, &cb.cb_proplist, ZFS_TYPE_DATASET)
!= 0)
usage(B_FALSE);
cb.cb_first = B_TRUE;
ret = zfs_for_each(argc, argv, flags, types, sortcol, &cb.cb_proplist,
limit, list_callback, &cb);
zprop_free_list(cb.cb_proplist);
zfs_free_sort_columns(sortcol);
if (ret == 0 && cb.cb_first && !cb.cb_scripted)
(void) fprintf(stderr, gettext("no datasets available\n"));
return (ret);
}
/*
* zfs rename [-fu] <fs | snap | vol> <fs | snap | vol>
* zfs rename [-f] -p <fs | vol> <fs | vol>
* zfs rename [-u] -r <snap> <snap>
*
* Renames the given dataset to another of the same type.
*
* The '-p' flag creates all the non-existing ancestors of the target first.
* The '-u' flag prevents file systems from being remounted during rename.
*/
/* ARGSUSED */
static int
zfs_do_rename(int argc, char **argv)
{
zfs_handle_t *zhp;
renameflags_t flags = { 0 };
int c;
int ret = 0;
int types;
boolean_t parents = B_FALSE;
/* check options */
while ((c = getopt(argc, argv, "pruf")) != -1) {
switch (c) {
case 'p':
parents = B_TRUE;
break;
case 'r':
flags.recursive = B_TRUE;
break;
case 'u':
flags.nounmount = B_TRUE;
break;
case 'f':
flags.forceunmount = B_TRUE;
break;
case '?':
default:
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing source dataset "
"argument\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing target dataset "
"argument\n"));
usage(B_FALSE);
}
if (argc > 2) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if (flags.recursive && parents) {
(void) fprintf(stderr, gettext("-p and -r options are mutually "
"exclusive\n"));
usage(B_FALSE);
}
if (flags.nounmount && parents) {
(void) fprintf(stderr, gettext("-u and -p options are mutually "
"exclusive\n"));
usage(B_FALSE);
}
if (flags.recursive && strchr(argv[0], '@') == 0) {
(void) fprintf(stderr, gettext("source dataset for recursive "
"rename must be a snapshot\n"));
usage(B_FALSE);
}
if (flags.nounmount)
types = ZFS_TYPE_FILESYSTEM;
else if (parents)
types = ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME;
else
types = ZFS_TYPE_DATASET;
if ((zhp = zfs_open(g_zfs, argv[0], types)) == NULL)
return (1);
/* If we were asked and the name looks good, try to create ancestors. */
if (parents && zfs_name_valid(argv[1], zfs_get_type(zhp)) &&
zfs_create_ancestors(g_zfs, argv[1]) != 0) {
zfs_close(zhp);
return (1);
}
ret = (zfs_rename(zhp, argv[1], flags) != 0);
zfs_close(zhp);
return (ret);
}
/*
* zfs promote <fs>
*
* Promotes the given clone fs to be the parent
*/
/* ARGSUSED */
static int
zfs_do_promote(int argc, char **argv)
{
zfs_handle_t *zhp;
int ret = 0;
/* check options */
if (argc > 1 && argv[1][0] == '-') {
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
argv[1][1]);
usage(B_FALSE);
}
/* check number of arguments */
if (argc < 2) {
(void) fprintf(stderr, gettext("missing clone filesystem"
" argument\n"));
usage(B_FALSE);
}
if (argc > 2) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
zhp = zfs_open(g_zfs, argv[1], ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL)
return (1);
ret = (zfs_promote(zhp) != 0);
zfs_close(zhp);
return (ret);
}
static int
zfs_do_redact(int argc, char **argv)
{
char *snap = NULL;
char *bookname = NULL;
char **rsnaps = NULL;
int numrsnaps = 0;
argv++;
argc--;
if (argc < 3) {
(void) fprintf(stderr, gettext("too few arguments\n"));
usage(B_FALSE);
}
snap = argv[0];
bookname = argv[1];
rsnaps = argv + 2;
numrsnaps = argc - 2;
nvlist_t *rsnapnv = fnvlist_alloc();
for (int i = 0; i < numrsnaps; i++) {
fnvlist_add_boolean(rsnapnv, rsnaps[i]);
}
int err = lzc_redact(snap, bookname, rsnapnv);
fnvlist_free(rsnapnv);
switch (err) {
case 0:
break;
case ENOENT:
(void) fprintf(stderr,
gettext("provided snapshot %s does not exist\n"), snap);
break;
case EEXIST:
(void) fprintf(stderr, gettext("specified redaction bookmark "
"(%s) provided already exists\n"), bookname);
break;
case ENAMETOOLONG:
(void) fprintf(stderr, gettext("provided bookmark name cannot "
"be used, final name would be too long\n"));
break;
case E2BIG:
(void) fprintf(stderr, gettext("too many redaction snapshots "
"specified\n"));
break;
case EINVAL:
if (strchr(bookname, '#') != NULL)
(void) fprintf(stderr, gettext(
"redaction bookmark name must not contain '#'\n"));
else
(void) fprintf(stderr, gettext(
"redaction snapshot must be descendent of "
"snapshot being redacted\n"));
break;
case EALREADY:
(void) fprintf(stderr, gettext("attempted to redact redacted "
"dataset or with respect to redacted dataset\n"));
break;
case ENOTSUP:
(void) fprintf(stderr, gettext("redaction bookmarks feature "
"not enabled\n"));
break;
case EXDEV:
(void) fprintf(stderr, gettext("potentially invalid redaction "
"snapshot; full dataset names required\n"));
break;
default:
(void) fprintf(stderr, gettext("internal error: %s\n"),
strerror(errno));
}
return (err);
}
/*
* zfs rollback [-rRf] <snapshot>
*
* -r Delete any intervening snapshots before doing rollback
* -R Delete any snapshots and their clones
* -f ignored for backwards compatibility
*
* Given a filesystem, rollback to a specific snapshot, discarding any changes
* since then and making it the active dataset. If more recent snapshots exist,
* the command will complain unless the '-r' flag is given.
*/
typedef struct rollback_cbdata {
uint64_t cb_create;
uint8_t cb_younger_ds_printed;
boolean_t cb_first;
int cb_doclones;
char *cb_target;
int cb_error;
boolean_t cb_recurse;
} rollback_cbdata_t;
static int
rollback_check_dependent(zfs_handle_t *zhp, void *data)
{
rollback_cbdata_t *cbp = data;
if (cbp->cb_first && cbp->cb_recurse) {
(void) fprintf(stderr, gettext("cannot rollback to "
"'%s': clones of previous snapshots exist\n"),
cbp->cb_target);
(void) fprintf(stderr, gettext("use '-R' to "
"force deletion of the following clones and "
"dependents:\n"));
cbp->cb_first = 0;
cbp->cb_error = 1;
}
(void) fprintf(stderr, "%s\n", zfs_get_name(zhp));
zfs_close(zhp);
return (0);
}
/*
* Report some snapshots/bookmarks more recent than the one specified.
* Used when '-r' is not specified. We reuse this same callback for the
* snapshot dependents - if 'cb_dependent' is set, then this is a
* dependent and we should report it without checking the transaction group.
*/
static int
rollback_check(zfs_handle_t *zhp, void *data)
{
rollback_cbdata_t *cbp = data;
/*
* Max number of younger snapshots and/or bookmarks to display before
* we stop the iteration.
*/
const uint8_t max_younger = 32;
if (cbp->cb_doclones) {
zfs_close(zhp);
return (0);
}
if (zfs_prop_get_int(zhp, ZFS_PROP_CREATETXG) > cbp->cb_create) {
if (cbp->cb_first && !cbp->cb_recurse) {
(void) fprintf(stderr, gettext("cannot "
"rollback to '%s': more recent snapshots "
"or bookmarks exist\n"),
cbp->cb_target);
(void) fprintf(stderr, gettext("use '-r' to "
"force deletion of the following "
"snapshots and bookmarks:\n"));
cbp->cb_first = 0;
cbp->cb_error = 1;
}
if (cbp->cb_recurse) {
if (zfs_iter_dependents(zhp, B_TRUE,
rollback_check_dependent, cbp) != 0) {
zfs_close(zhp);
return (-1);
}
} else {
(void) fprintf(stderr, "%s\n",
zfs_get_name(zhp));
cbp->cb_younger_ds_printed++;
}
}
zfs_close(zhp);
if (cbp->cb_younger_ds_printed == max_younger) {
/*
* This non-recursive rollback is going to fail due to the
* presence of snapshots and/or bookmarks that are younger than
* the rollback target.
* We printed some of the offending objects, now we stop
* zfs_iter_snapshot/bookmark iteration so we can fail fast and
* avoid iterating over the rest of the younger objects
*/
(void) fprintf(stderr, gettext("Output limited to %d "
"snapshots/bookmarks\n"), max_younger);
return (-1);
}
return (0);
}
static int
zfs_do_rollback(int argc, char **argv)
{
int ret = 0;
int c;
boolean_t force = B_FALSE;
rollback_cbdata_t cb = { 0 };
zfs_handle_t *zhp, *snap;
char parentname[ZFS_MAX_DATASET_NAME_LEN];
char *delim;
uint64_t min_txg = 0;
/* check options */
while ((c = getopt(argc, argv, "rRf")) != -1) {
switch (c) {
case 'r':
cb.cb_recurse = 1;
break;
case 'R':
cb.cb_recurse = 1;
cb.cb_doclones = 1;
break;
case 'f':
force = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing dataset argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
/* open the snapshot */
if ((snap = zfs_open(g_zfs, argv[0], ZFS_TYPE_SNAPSHOT)) == NULL)
return (1);
/* open the parent dataset */
(void) strlcpy(parentname, argv[0], sizeof (parentname));
verify((delim = strrchr(parentname, '@')) != NULL);
*delim = '\0';
if ((zhp = zfs_open(g_zfs, parentname, ZFS_TYPE_DATASET)) == NULL) {
zfs_close(snap);
return (1);
}
/*
* Check for more recent snapshots and/or clones based on the presence
* of '-r' and '-R'.
*/
cb.cb_target = argv[0];
cb.cb_create = zfs_prop_get_int(snap, ZFS_PROP_CREATETXG);
cb.cb_first = B_TRUE;
cb.cb_error = 0;
if (cb.cb_create > 0)
min_txg = cb.cb_create;
if ((ret = zfs_iter_snapshots(zhp, B_FALSE, rollback_check, &cb,
min_txg, 0)) != 0)
goto out;
if ((ret = zfs_iter_bookmarks(zhp, rollback_check, &cb)) != 0)
goto out;
if ((ret = cb.cb_error) != 0)
goto out;
/*
* Rollback parent to the given snapshot.
*/
ret = zfs_rollback(zhp, snap, force);
out:
zfs_close(snap);
zfs_close(zhp);
if (ret == 0)
return (0);
else
return (1);
}
/*
* zfs set property=value ... { fs | snap | vol } ...
*
* Sets the given properties for all datasets specified on the command line.
*/
static int
set_callback(zfs_handle_t *zhp, void *data)
{
nvlist_t *props = data;
if (zfs_prop_set_list(zhp, props) != 0) {
switch (libzfs_errno(g_zfs)) {
case EZFS_MOUNTFAILED:
(void) fprintf(stderr, gettext("property may be set "
"but unable to remount filesystem\n"));
break;
case EZFS_SHARENFSFAILED:
(void) fprintf(stderr, gettext("property may be set "
"but unable to reshare filesystem\n"));
break;
}
return (1);
}
return (0);
}
static int
zfs_do_set(int argc, char **argv)
{
nvlist_t *props = NULL;
int ds_start = -1; /* argv idx of first dataset arg */
int ret = 0;
int i;
/* check for options */
if (argc > 1 && argv[1][0] == '-') {
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
argv[1][1]);
usage(B_FALSE);
}
/* check number of arguments */
if (argc < 2) {
(void) fprintf(stderr, gettext("missing arguments\n"));
usage(B_FALSE);
}
if (argc < 3) {
if (strchr(argv[1], '=') == NULL) {
(void) fprintf(stderr, gettext("missing property=value "
"argument(s)\n"));
} else {
(void) fprintf(stderr, gettext("missing dataset "
"name(s)\n"));
}
usage(B_FALSE);
}
/* validate argument order: prop=val args followed by dataset args */
for (i = 1; i < argc; i++) {
if (strchr(argv[i], '=') != NULL) {
if (ds_start > 0) {
/* out-of-order prop=val argument */
(void) fprintf(stderr, gettext("invalid "
"argument order\n"));
usage(B_FALSE);
}
} else if (ds_start < 0) {
ds_start = i;
}
}
if (ds_start < 0) {
(void) fprintf(stderr, gettext("missing dataset name(s)\n"));
usage(B_FALSE);
}
/* Populate a list of property settings */
if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
nomem();
for (i = 1; i < ds_start; i++) {
if (!parseprop(props, argv[i])) {
ret = -1;
goto error;
}
}
ret = zfs_for_each(argc - ds_start, argv + ds_start, 0,
ZFS_TYPE_DATASET, NULL, NULL, 0, set_callback, props);
error:
nvlist_free(props);
return (ret);
}
typedef struct snap_cbdata {
nvlist_t *sd_nvl;
boolean_t sd_recursive;
const char *sd_snapname;
} snap_cbdata_t;
static int
zfs_snapshot_cb(zfs_handle_t *zhp, void *arg)
{
snap_cbdata_t *sd = arg;
char *name;
int rv = 0;
int error;
if (sd->sd_recursive &&
zfs_prop_get_int(zhp, ZFS_PROP_INCONSISTENT) != 0) {
zfs_close(zhp);
return (0);
}
error = asprintf(&name, "%s@%s", zfs_get_name(zhp), sd->sd_snapname);
if (error == -1)
nomem();
fnvlist_add_boolean(sd->sd_nvl, name);
free(name);
if (sd->sd_recursive)
rv = zfs_iter_filesystems(zhp, zfs_snapshot_cb, sd);
zfs_close(zhp);
return (rv);
}
/*
* zfs snapshot [-r] [-o prop=value] ... <fs@snap>
*
* Creates a snapshot with the given name. While functionally equivalent to
* 'zfs create', it is a separate command to differentiate intent.
*/
static int
zfs_do_snapshot(int argc, char **argv)
{
int ret = 0;
int c;
nvlist_t *props;
snap_cbdata_t sd = { 0 };
boolean_t multiple_snaps = B_FALSE;
if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
nomem();
if (nvlist_alloc(&sd.sd_nvl, NV_UNIQUE_NAME, 0) != 0)
nomem();
/* check options */
while ((c = getopt(argc, argv, "ro:")) != -1) {
switch (c) {
case 'o':
if (!parseprop(props, optarg)) {
nvlist_free(sd.sd_nvl);
nvlist_free(props);
return (1);
}
break;
case 'r':
sd.sd_recursive = B_TRUE;
multiple_snaps = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
goto usage;
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing snapshot argument\n"));
goto usage;
}
if (argc > 1)
multiple_snaps = B_TRUE;
for (; argc > 0; argc--, argv++) {
char *atp;
zfs_handle_t *zhp;
atp = strchr(argv[0], '@');
if (atp == NULL)
goto usage;
*atp = '\0';
sd.sd_snapname = atp + 1;
zhp = zfs_open(g_zfs, argv[0],
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL)
goto usage;
if (zfs_snapshot_cb(zhp, &sd) != 0)
goto usage;
}
ret = zfs_snapshot_nvl(g_zfs, sd.sd_nvl, props);
nvlist_free(sd.sd_nvl);
nvlist_free(props);
if (ret != 0 && multiple_snaps)
(void) fprintf(stderr, gettext("no snapshots were created\n"));
return (ret != 0);
usage:
nvlist_free(sd.sd_nvl);
nvlist_free(props);
usage(B_FALSE);
return (-1);
}
/*
* Send a backup stream to stdout.
*/
static int
zfs_do_send(int argc, char **argv)
{
char *fromname = NULL;
char *toname = NULL;
char *resume_token = NULL;
char *cp;
zfs_handle_t *zhp;
sendflags_t flags = { 0 };
int c, err;
nvlist_t *dbgnv = NULL;
char *redactbook = NULL;
struct option long_options[] = {
{"replicate", no_argument, NULL, 'R'},
{"skip-missing", no_argument, NULL, 's'},
{"redact", required_argument, NULL, 'd'},
{"props", no_argument, NULL, 'p'},
{"parsable", no_argument, NULL, 'P'},
{"dedup", no_argument, NULL, 'D'},
{"verbose", no_argument, NULL, 'v'},
{"dryrun", no_argument, NULL, 'n'},
{"large-block", no_argument, NULL, 'L'},
{"embed", no_argument, NULL, 'e'},
{"resume", required_argument, NULL, 't'},
{"compressed", no_argument, NULL, 'c'},
{"raw", no_argument, NULL, 'w'},
{"backup", no_argument, NULL, 'b'},
{"holds", no_argument, NULL, 'h'},
{"saved", no_argument, NULL, 'S'},
{0, 0, 0, 0}
};
/* check options */
while ((c = getopt_long(argc, argv, ":i:I:RsDpvnPLeht:cwbd:S",
long_options, NULL)) != -1) {
switch (c) {
case 'i':
if (fromname)
usage(B_FALSE);
fromname = optarg;
break;
case 'I':
if (fromname)
usage(B_FALSE);
fromname = optarg;
flags.doall = B_TRUE;
break;
case 'R':
flags.replicate = B_TRUE;
break;
case 's':
flags.skipmissing = B_TRUE;
break;
case 'd':
redactbook = optarg;
break;
case 'p':
flags.props = B_TRUE;
break;
case 'b':
flags.backup = B_TRUE;
break;
case 'h':
flags.holds = B_TRUE;
break;
case 'P':
flags.parsable = B_TRUE;
break;
case 'v':
flags.verbosity++;
flags.progress = B_TRUE;
break;
case 'D':
(void) fprintf(stderr,
gettext("WARNING: deduplicated send is no "
"longer supported. A regular,\n"
"non-deduplicated stream will be generated.\n\n"));
break;
case 'n':
flags.dryrun = B_TRUE;
break;
case 'L':
flags.largeblock = B_TRUE;
break;
case 'e':
flags.embed_data = B_TRUE;
break;
case 't':
resume_token = optarg;
break;
case 'c':
flags.compress = B_TRUE;
break;
case 'w':
flags.raw = B_TRUE;
flags.compress = B_TRUE;
flags.embed_data = B_TRUE;
flags.largeblock = B_TRUE;
break;
case 'S':
flags.saved = B_TRUE;
break;
case ':':
/*
* If a parameter was not passed, optopt contains the
* value that would normally lead us into the
* appropriate case statement. If it's > 256, then this
* must be a longopt and we should look at argv to get
* the string. Otherwise it's just the character, so we
* should use it directly.
*/
if (optopt <= UINT8_MAX) {
(void) fprintf(stderr,
gettext("missing argument for '%c' "
"option\n"), optopt);
} else {
(void) fprintf(stderr,
gettext("missing argument for '%s' "
"option\n"), argv[optind - 1]);
}
usage(B_FALSE);
break;
case '?':
- /*FALLTHROUGH*/
default:
/*
* If an invalid flag was passed, optopt contains the
* character if it was a short flag, or 0 if it was a
* longopt.
*/
if (optopt != 0) {
(void) fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
} else {
(void) fprintf(stderr,
gettext("invalid option '%s'\n"),
argv[optind - 1]);
}
usage(B_FALSE);
}
}
if (flags.parsable && flags.verbosity == 0)
flags.verbosity = 1;
argc -= optind;
argv += optind;
if (resume_token != NULL) {
if (fromname != NULL || flags.replicate || flags.props ||
flags.backup || flags.holds ||
flags.saved || redactbook != NULL) {
(void) fprintf(stderr,
gettext("invalid flags combined with -t\n"));
usage(B_FALSE);
}
if (argc > 0) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
} else {
if (argc < 1) {
(void) fprintf(stderr,
gettext("missing snapshot argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
}
if (flags.saved) {
if (fromname != NULL || flags.replicate || flags.props ||
flags.doall || flags.backup ||
flags.holds || flags.largeblock || flags.embed_data ||
flags.compress || flags.raw || redactbook != NULL) {
(void) fprintf(stderr, gettext("incompatible flags "
"combined with saved send flag\n"));
usage(B_FALSE);
}
if (strchr(argv[0], '@') != NULL) {
(void) fprintf(stderr, gettext("saved send must "
"specify the dataset with partially-received "
"state\n"));
usage(B_FALSE);
}
}
if (flags.raw && redactbook != NULL) {
(void) fprintf(stderr,
gettext("Error: raw sends may not be redacted.\n"));
return (1);
}
if (!flags.dryrun && isatty(STDOUT_FILENO)) {
(void) fprintf(stderr,
gettext("Error: Stream can not be written to a terminal.\n"
"You must redirect standard output.\n"));
return (1);
}
if (flags.saved) {
zhp = zfs_open(g_zfs, argv[0], ZFS_TYPE_DATASET);
if (zhp == NULL)
return (1);
err = zfs_send_saved(zhp, &flags, STDOUT_FILENO,
resume_token);
if (err != 0)
note_dev_error(errno, STDOUT_FILENO);
zfs_close(zhp);
return (err != 0);
} else if (resume_token != NULL) {
err = zfs_send_resume(g_zfs, &flags, STDOUT_FILENO,
resume_token);
if (err != 0)
note_dev_error(errno, STDOUT_FILENO);
return (err);
}
if (flags.skipmissing && !flags.replicate) {
(void) fprintf(stderr,
gettext("skip-missing flag can only be used in "
"conjunction with replicate\n"));
usage(B_FALSE);
}
/*
* For everything except -R and -I, use the new, cleaner code path.
*/
if (!(flags.replicate || flags.doall)) {
char frombuf[ZFS_MAX_DATASET_NAME_LEN];
if (fromname != NULL && (strchr(fromname, '#') == NULL &&
strchr(fromname, '@') == NULL)) {
/*
* Neither bookmark or snapshot was specified. Print a
* warning, and assume snapshot.
*/
(void) fprintf(stderr, "Warning: incremental source "
"didn't specify type, assuming snapshot. Use '@' "
"or '#' prefix to avoid ambiguity.\n");
(void) snprintf(frombuf, sizeof (frombuf), "@%s",
fromname);
fromname = frombuf;
}
if (fromname != NULL &&
(fromname[0] == '#' || fromname[0] == '@')) {
/*
* Incremental source name begins with # or @.
* Default to same fs as target.
*/
char tmpbuf[ZFS_MAX_DATASET_NAME_LEN];
(void) strlcpy(tmpbuf, fromname, sizeof (tmpbuf));
(void) strlcpy(frombuf, argv[0], sizeof (frombuf));
cp = strchr(frombuf, '@');
if (cp != NULL)
*cp = '\0';
(void) strlcat(frombuf, tmpbuf, sizeof (frombuf));
fromname = frombuf;
}
zhp = zfs_open(g_zfs, argv[0], ZFS_TYPE_DATASET);
if (zhp == NULL)
return (1);
err = zfs_send_one(zhp, fromname, STDOUT_FILENO, &flags,
redactbook);
zfs_close(zhp);
if (err != 0)
note_dev_error(errno, STDOUT_FILENO);
return (err != 0);
}
if (fromname != NULL && strchr(fromname, '#')) {
(void) fprintf(stderr,
gettext("Error: multiple snapshots cannot be "
"sent from a bookmark.\n"));
return (1);
}
if (redactbook != NULL) {
(void) fprintf(stderr, gettext("Error: multiple snapshots "
"cannot be sent redacted.\n"));
return (1);
}
if ((cp = strchr(argv[0], '@')) == NULL) {
(void) fprintf(stderr, gettext("Error: "
"Unsupported flag with filesystem or bookmark.\n"));
return (1);
}
*cp = '\0';
toname = cp + 1;
zhp = zfs_open(g_zfs, argv[0], ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL)
return (1);
/*
* If they specified the full path to the snapshot, chop off
* everything except the short name of the snapshot, but special
* case if they specify the origin.
*/
if (fromname && (cp = strchr(fromname, '@')) != NULL) {
char origin[ZFS_MAX_DATASET_NAME_LEN];
zprop_source_t src;
(void) zfs_prop_get(zhp, ZFS_PROP_ORIGIN,
origin, sizeof (origin), &src, NULL, 0, B_FALSE);
if (strcmp(origin, fromname) == 0) {
fromname = NULL;
flags.fromorigin = B_TRUE;
} else {
*cp = '\0';
if (cp != fromname && strcmp(argv[0], fromname)) {
(void) fprintf(stderr,
gettext("incremental source must be "
"in same filesystem\n"));
usage(B_FALSE);
}
fromname = cp + 1;
if (strchr(fromname, '@') || strchr(fromname, '/')) {
(void) fprintf(stderr,
gettext("invalid incremental source\n"));
usage(B_FALSE);
}
}
}
if (flags.replicate && fromname == NULL)
flags.doall = B_TRUE;
err = zfs_send(zhp, fromname, toname, &flags, STDOUT_FILENO, NULL, 0,
flags.verbosity >= 3 ? &dbgnv : NULL);
if (flags.verbosity >= 3 && dbgnv != NULL) {
/*
* dump_nvlist prints to stdout, but that's been
* redirected to a file. Make it print to stderr
* instead.
*/
(void) dup2(STDERR_FILENO, STDOUT_FILENO);
dump_nvlist(dbgnv, 0);
nvlist_free(dbgnv);
}
zfs_close(zhp);
note_dev_error(errno, STDOUT_FILENO);
return (err != 0);
}
/*
* Restore a backup stream from stdin.
*/
static int
zfs_do_receive(int argc, char **argv)
{
int c, err = 0;
recvflags_t flags = { 0 };
boolean_t abort_resumable = B_FALSE;
nvlist_t *props;
if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
nomem();
/* check options */
while ((c = getopt(argc, argv, ":o:x:dehMnuvFsA")) != -1) {
switch (c) {
case 'o':
if (!parseprop(props, optarg)) {
nvlist_free(props);
usage(B_FALSE);
}
break;
case 'x':
if (!parsepropname(props, optarg)) {
nvlist_free(props);
usage(B_FALSE);
}
break;
case 'd':
if (flags.istail) {
(void) fprintf(stderr, gettext("invalid option "
"combination: -d and -e are mutually "
"exclusive\n"));
usage(B_FALSE);
}
flags.isprefix = B_TRUE;
break;
case 'e':
if (flags.isprefix) {
(void) fprintf(stderr, gettext("invalid option "
"combination: -d and -e are mutually "
"exclusive\n"));
usage(B_FALSE);
}
flags.istail = B_TRUE;
break;
case 'h':
flags.skipholds = B_TRUE;
break;
case 'M':
flags.forceunmount = B_TRUE;
break;
case 'n':
flags.dryrun = B_TRUE;
break;
case 'u':
flags.nomount = B_TRUE;
break;
case 'v':
flags.verbose = B_TRUE;
break;
case 's':
flags.resumable = B_TRUE;
break;
case 'F':
flags.force = B_TRUE;
break;
case 'A':
abort_resumable = B_TRUE;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* zfs recv -e (use "tail" name) implies -d (remove dataset "head") */
if (flags.istail)
flags.isprefix = B_TRUE;
/* check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing snapshot argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if (abort_resumable) {
if (flags.isprefix || flags.istail || flags.dryrun ||
flags.resumable || flags.nomount) {
(void) fprintf(stderr, gettext("invalid option\n"));
usage(B_FALSE);
}
char namebuf[ZFS_MAX_DATASET_NAME_LEN];
(void) snprintf(namebuf, sizeof (namebuf),
"%s/%%recv", argv[0]);
if (zfs_dataset_exists(g_zfs, namebuf,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME)) {
zfs_handle_t *zhp = zfs_open(g_zfs,
namebuf, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL) {
nvlist_free(props);
return (1);
}
err = zfs_destroy(zhp, B_FALSE);
zfs_close(zhp);
} else {
zfs_handle_t *zhp = zfs_open(g_zfs,
argv[0], ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL)
usage(B_FALSE);
if (!zfs_prop_get_int(zhp, ZFS_PROP_INCONSISTENT) ||
zfs_prop_get(zhp, ZFS_PROP_RECEIVE_RESUME_TOKEN,
NULL, 0, NULL, NULL, 0, B_TRUE) == -1) {
(void) fprintf(stderr,
gettext("'%s' does not have any "
"resumable receive state to abort\n"),
argv[0]);
nvlist_free(props);
zfs_close(zhp);
return (1);
}
err = zfs_destroy(zhp, B_FALSE);
zfs_close(zhp);
}
nvlist_free(props);
return (err != 0);
}
if (isatty(STDIN_FILENO)) {
(void) fprintf(stderr,
gettext("Error: Backup stream can not be read "
"from a terminal.\n"
"You must redirect standard input.\n"));
nvlist_free(props);
return (1);
}
err = zfs_receive(g_zfs, argv[0], props, &flags, STDIN_FILENO, NULL);
nvlist_free(props);
return (err != 0);
}
/*
* allow/unallow stuff
*/
/* copied from zfs/sys/dsl_deleg.h */
#define ZFS_DELEG_PERM_CREATE "create"
#define ZFS_DELEG_PERM_DESTROY "destroy"
#define ZFS_DELEG_PERM_SNAPSHOT "snapshot"
#define ZFS_DELEG_PERM_ROLLBACK "rollback"
#define ZFS_DELEG_PERM_CLONE "clone"
#define ZFS_DELEG_PERM_PROMOTE "promote"
#define ZFS_DELEG_PERM_RENAME "rename"
#define ZFS_DELEG_PERM_MOUNT "mount"
#define ZFS_DELEG_PERM_SHARE "share"
#define ZFS_DELEG_PERM_SEND "send"
#define ZFS_DELEG_PERM_RECEIVE "receive"
#define ZFS_DELEG_PERM_ALLOW "allow"
#define ZFS_DELEG_PERM_USERPROP "userprop"
#define ZFS_DELEG_PERM_VSCAN "vscan" /* ??? */
#define ZFS_DELEG_PERM_USERQUOTA "userquota"
#define ZFS_DELEG_PERM_GROUPQUOTA "groupquota"
#define ZFS_DELEG_PERM_USERUSED "userused"
#define ZFS_DELEG_PERM_GROUPUSED "groupused"
#define ZFS_DELEG_PERM_USEROBJQUOTA "userobjquota"
#define ZFS_DELEG_PERM_GROUPOBJQUOTA "groupobjquota"
#define ZFS_DELEG_PERM_USEROBJUSED "userobjused"
#define ZFS_DELEG_PERM_GROUPOBJUSED "groupobjused"
#define ZFS_DELEG_PERM_HOLD "hold"
#define ZFS_DELEG_PERM_RELEASE "release"
#define ZFS_DELEG_PERM_DIFF "diff"
#define ZFS_DELEG_PERM_BOOKMARK "bookmark"
#define ZFS_DELEG_PERM_LOAD_KEY "load-key"
#define ZFS_DELEG_PERM_CHANGE_KEY "change-key"
#define ZFS_DELEG_PERM_PROJECTUSED "projectused"
#define ZFS_DELEG_PERM_PROJECTQUOTA "projectquota"
#define ZFS_DELEG_PERM_PROJECTOBJUSED "projectobjused"
#define ZFS_DELEG_PERM_PROJECTOBJQUOTA "projectobjquota"
#define ZFS_NUM_DELEG_NOTES ZFS_DELEG_NOTE_NONE
static zfs_deleg_perm_tab_t zfs_deleg_perm_tbl[] = {
{ ZFS_DELEG_PERM_ALLOW, ZFS_DELEG_NOTE_ALLOW },
{ ZFS_DELEG_PERM_CLONE, ZFS_DELEG_NOTE_CLONE },
{ ZFS_DELEG_PERM_CREATE, ZFS_DELEG_NOTE_CREATE },
{ ZFS_DELEG_PERM_DESTROY, ZFS_DELEG_NOTE_DESTROY },
{ ZFS_DELEG_PERM_DIFF, ZFS_DELEG_NOTE_DIFF},
{ ZFS_DELEG_PERM_HOLD, ZFS_DELEG_NOTE_HOLD },
{ ZFS_DELEG_PERM_MOUNT, ZFS_DELEG_NOTE_MOUNT },
{ ZFS_DELEG_PERM_PROMOTE, ZFS_DELEG_NOTE_PROMOTE },
{ ZFS_DELEG_PERM_RECEIVE, ZFS_DELEG_NOTE_RECEIVE },
{ ZFS_DELEG_PERM_RELEASE, ZFS_DELEG_NOTE_RELEASE },
{ ZFS_DELEG_PERM_RENAME, ZFS_DELEG_NOTE_RENAME },
{ ZFS_DELEG_PERM_ROLLBACK, ZFS_DELEG_NOTE_ROLLBACK },
{ ZFS_DELEG_PERM_SEND, ZFS_DELEG_NOTE_SEND },
{ ZFS_DELEG_PERM_SHARE, ZFS_DELEG_NOTE_SHARE },
{ ZFS_DELEG_PERM_SNAPSHOT, ZFS_DELEG_NOTE_SNAPSHOT },
{ ZFS_DELEG_PERM_BOOKMARK, ZFS_DELEG_NOTE_BOOKMARK },
{ ZFS_DELEG_PERM_LOAD_KEY, ZFS_DELEG_NOTE_LOAD_KEY },
{ ZFS_DELEG_PERM_CHANGE_KEY, ZFS_DELEG_NOTE_CHANGE_KEY },
{ ZFS_DELEG_PERM_GROUPQUOTA, ZFS_DELEG_NOTE_GROUPQUOTA },
{ ZFS_DELEG_PERM_GROUPUSED, ZFS_DELEG_NOTE_GROUPUSED },
{ ZFS_DELEG_PERM_USERPROP, ZFS_DELEG_NOTE_USERPROP },
{ ZFS_DELEG_PERM_USERQUOTA, ZFS_DELEG_NOTE_USERQUOTA },
{ ZFS_DELEG_PERM_USERUSED, ZFS_DELEG_NOTE_USERUSED },
{ ZFS_DELEG_PERM_USEROBJQUOTA, ZFS_DELEG_NOTE_USEROBJQUOTA },
{ ZFS_DELEG_PERM_USEROBJUSED, ZFS_DELEG_NOTE_USEROBJUSED },
{ ZFS_DELEG_PERM_GROUPOBJQUOTA, ZFS_DELEG_NOTE_GROUPOBJQUOTA },
{ ZFS_DELEG_PERM_GROUPOBJUSED, ZFS_DELEG_NOTE_GROUPOBJUSED },
{ ZFS_DELEG_PERM_PROJECTUSED, ZFS_DELEG_NOTE_PROJECTUSED },
{ ZFS_DELEG_PERM_PROJECTQUOTA, ZFS_DELEG_NOTE_PROJECTQUOTA },
{ ZFS_DELEG_PERM_PROJECTOBJUSED, ZFS_DELEG_NOTE_PROJECTOBJUSED },
{ ZFS_DELEG_PERM_PROJECTOBJQUOTA, ZFS_DELEG_NOTE_PROJECTOBJQUOTA },
{ NULL, ZFS_DELEG_NOTE_NONE }
};
/* permission structure */
typedef struct deleg_perm {
zfs_deleg_who_type_t dp_who_type;
const char *dp_name;
boolean_t dp_local;
boolean_t dp_descend;
} deleg_perm_t;
/* */
typedef struct deleg_perm_node {
deleg_perm_t dpn_perm;
uu_avl_node_t dpn_avl_node;
} deleg_perm_node_t;
typedef struct fs_perm fs_perm_t;
/* permissions set */
typedef struct who_perm {
zfs_deleg_who_type_t who_type;
const char *who_name; /* id */
char who_ug_name[256]; /* user/group name */
fs_perm_t *who_fsperm; /* uplink */
uu_avl_t *who_deleg_perm_avl; /* permissions */
} who_perm_t;
/* */
typedef struct who_perm_node {
who_perm_t who_perm;
uu_avl_node_t who_avl_node;
} who_perm_node_t;
typedef struct fs_perm_set fs_perm_set_t;
/* fs permissions */
struct fs_perm {
const char *fsp_name;
uu_avl_t *fsp_sc_avl; /* sets,create */
uu_avl_t *fsp_uge_avl; /* user,group,everyone */
fs_perm_set_t *fsp_set; /* uplink */
};
/* */
typedef struct fs_perm_node {
fs_perm_t fspn_fsperm;
uu_avl_t *fspn_avl;
uu_list_node_t fspn_list_node;
} fs_perm_node_t;
/* top level structure */
struct fs_perm_set {
uu_list_pool_t *fsps_list_pool;
uu_list_t *fsps_list; /* list of fs_perms */
uu_avl_pool_t *fsps_named_set_avl_pool;
uu_avl_pool_t *fsps_who_perm_avl_pool;
uu_avl_pool_t *fsps_deleg_perm_avl_pool;
};
static inline const char *
deleg_perm_type(zfs_deleg_note_t note)
{
/* subcommands */
switch (note) {
/* SUBCOMMANDS */
/* OTHER */
case ZFS_DELEG_NOTE_GROUPQUOTA:
case ZFS_DELEG_NOTE_GROUPUSED:
case ZFS_DELEG_NOTE_USERPROP:
case ZFS_DELEG_NOTE_USERQUOTA:
case ZFS_DELEG_NOTE_USERUSED:
case ZFS_DELEG_NOTE_USEROBJQUOTA:
case ZFS_DELEG_NOTE_USEROBJUSED:
case ZFS_DELEG_NOTE_GROUPOBJQUOTA:
case ZFS_DELEG_NOTE_GROUPOBJUSED:
case ZFS_DELEG_NOTE_PROJECTUSED:
case ZFS_DELEG_NOTE_PROJECTQUOTA:
case ZFS_DELEG_NOTE_PROJECTOBJUSED:
case ZFS_DELEG_NOTE_PROJECTOBJQUOTA:
/* other */
return (gettext("other"));
default:
return (gettext("subcommand"));
}
}
static int
who_type2weight(zfs_deleg_who_type_t who_type)
{
int res;
switch (who_type) {
case ZFS_DELEG_NAMED_SET_SETS:
case ZFS_DELEG_NAMED_SET:
res = 0;
break;
case ZFS_DELEG_CREATE_SETS:
case ZFS_DELEG_CREATE:
res = 1;
break;
case ZFS_DELEG_USER_SETS:
case ZFS_DELEG_USER:
res = 2;
break;
case ZFS_DELEG_GROUP_SETS:
case ZFS_DELEG_GROUP:
res = 3;
break;
case ZFS_DELEG_EVERYONE_SETS:
case ZFS_DELEG_EVERYONE:
res = 4;
break;
default:
res = -1;
}
return (res);
}
/* ARGSUSED */
static int
who_perm_compare(const void *larg, const void *rarg, void *unused)
{
const who_perm_node_t *l = larg;
const who_perm_node_t *r = rarg;
zfs_deleg_who_type_t ltype = l->who_perm.who_type;
zfs_deleg_who_type_t rtype = r->who_perm.who_type;
int lweight = who_type2weight(ltype);
int rweight = who_type2weight(rtype);
int res = lweight - rweight;
if (res == 0)
res = strncmp(l->who_perm.who_name, r->who_perm.who_name,
ZFS_MAX_DELEG_NAME-1);
if (res == 0)
return (0);
if (res > 0)
return (1);
else
return (-1);
}
/* ARGSUSED */
static int
deleg_perm_compare(const void *larg, const void *rarg, void *unused)
{
const deleg_perm_node_t *l = larg;
const deleg_perm_node_t *r = rarg;
int res = strncmp(l->dpn_perm.dp_name, r->dpn_perm.dp_name,
ZFS_MAX_DELEG_NAME-1);
if (res == 0)
return (0);
if (res > 0)
return (1);
else
return (-1);
}
static inline void
fs_perm_set_init(fs_perm_set_t *fspset)
{
bzero(fspset, sizeof (fs_perm_set_t));
if ((fspset->fsps_list_pool = uu_list_pool_create("fsps_list_pool",
sizeof (fs_perm_node_t), offsetof(fs_perm_node_t, fspn_list_node),
NULL, UU_DEFAULT)) == NULL)
nomem();
if ((fspset->fsps_list = uu_list_create(fspset->fsps_list_pool, NULL,
UU_DEFAULT)) == NULL)
nomem();
if ((fspset->fsps_named_set_avl_pool = uu_avl_pool_create(
"named_set_avl_pool", sizeof (who_perm_node_t), offsetof(
who_perm_node_t, who_avl_node), who_perm_compare,
UU_DEFAULT)) == NULL)
nomem();
if ((fspset->fsps_who_perm_avl_pool = uu_avl_pool_create(
"who_perm_avl_pool", sizeof (who_perm_node_t), offsetof(
who_perm_node_t, who_avl_node), who_perm_compare,
UU_DEFAULT)) == NULL)
nomem();
if ((fspset->fsps_deleg_perm_avl_pool = uu_avl_pool_create(
"deleg_perm_avl_pool", sizeof (deleg_perm_node_t), offsetof(
deleg_perm_node_t, dpn_avl_node), deleg_perm_compare, UU_DEFAULT))
== NULL)
nomem();
}
static inline void fs_perm_fini(fs_perm_t *);
static inline void who_perm_fini(who_perm_t *);
static inline void
fs_perm_set_fini(fs_perm_set_t *fspset)
{
fs_perm_node_t *node = uu_list_first(fspset->fsps_list);
while (node != NULL) {
fs_perm_node_t *next_node =
uu_list_next(fspset->fsps_list, node);
fs_perm_t *fsperm = &node->fspn_fsperm;
fs_perm_fini(fsperm);
uu_list_remove(fspset->fsps_list, node);
free(node);
node = next_node;
}
uu_avl_pool_destroy(fspset->fsps_named_set_avl_pool);
uu_avl_pool_destroy(fspset->fsps_who_perm_avl_pool);
uu_avl_pool_destroy(fspset->fsps_deleg_perm_avl_pool);
}
static inline void
deleg_perm_init(deleg_perm_t *deleg_perm, zfs_deleg_who_type_t type,
const char *name)
{
deleg_perm->dp_who_type = type;
deleg_perm->dp_name = name;
}
static inline void
who_perm_init(who_perm_t *who_perm, fs_perm_t *fsperm,
zfs_deleg_who_type_t type, const char *name)
{
uu_avl_pool_t *pool;
pool = fsperm->fsp_set->fsps_deleg_perm_avl_pool;
bzero(who_perm, sizeof (who_perm_t));
if ((who_perm->who_deleg_perm_avl = uu_avl_create(pool, NULL,
UU_DEFAULT)) == NULL)
nomem();
who_perm->who_type = type;
who_perm->who_name = name;
who_perm->who_fsperm = fsperm;
}
static inline void
who_perm_fini(who_perm_t *who_perm)
{
deleg_perm_node_t *node = uu_avl_first(who_perm->who_deleg_perm_avl);
while (node != NULL) {
deleg_perm_node_t *next_node =
uu_avl_next(who_perm->who_deleg_perm_avl, node);
uu_avl_remove(who_perm->who_deleg_perm_avl, node);
free(node);
node = next_node;
}
uu_avl_destroy(who_perm->who_deleg_perm_avl);
}
static inline void
fs_perm_init(fs_perm_t *fsperm, fs_perm_set_t *fspset, const char *fsname)
{
uu_avl_pool_t *nset_pool = fspset->fsps_named_set_avl_pool;
uu_avl_pool_t *who_pool = fspset->fsps_who_perm_avl_pool;
bzero(fsperm, sizeof (fs_perm_t));
if ((fsperm->fsp_sc_avl = uu_avl_create(nset_pool, NULL, UU_DEFAULT))
== NULL)
nomem();
if ((fsperm->fsp_uge_avl = uu_avl_create(who_pool, NULL, UU_DEFAULT))
== NULL)
nomem();
fsperm->fsp_set = fspset;
fsperm->fsp_name = fsname;
}
static inline void
fs_perm_fini(fs_perm_t *fsperm)
{
who_perm_node_t *node = uu_avl_first(fsperm->fsp_sc_avl);
while (node != NULL) {
who_perm_node_t *next_node = uu_avl_next(fsperm->fsp_sc_avl,
node);
who_perm_t *who_perm = &node->who_perm;
who_perm_fini(who_perm);
uu_avl_remove(fsperm->fsp_sc_avl, node);
free(node);
node = next_node;
}
node = uu_avl_first(fsperm->fsp_uge_avl);
while (node != NULL) {
who_perm_node_t *next_node = uu_avl_next(fsperm->fsp_uge_avl,
node);
who_perm_t *who_perm = &node->who_perm;
who_perm_fini(who_perm);
uu_avl_remove(fsperm->fsp_uge_avl, node);
free(node);
node = next_node;
}
uu_avl_destroy(fsperm->fsp_sc_avl);
uu_avl_destroy(fsperm->fsp_uge_avl);
}
static void
set_deleg_perm_node(uu_avl_t *avl, deleg_perm_node_t *node,
zfs_deleg_who_type_t who_type, const char *name, char locality)
{
uu_avl_index_t idx = 0;
deleg_perm_node_t *found_node = NULL;
deleg_perm_t *deleg_perm = &node->dpn_perm;
deleg_perm_init(deleg_perm, who_type, name);
if ((found_node = uu_avl_find(avl, node, NULL, &idx))
== NULL)
uu_avl_insert(avl, node, idx);
else {
node = found_node;
deleg_perm = &node->dpn_perm;
}
switch (locality) {
case ZFS_DELEG_LOCAL:
deleg_perm->dp_local = B_TRUE;
break;
case ZFS_DELEG_DESCENDENT:
deleg_perm->dp_descend = B_TRUE;
break;
case ZFS_DELEG_NA:
break;
default:
assert(B_FALSE); /* invalid locality */
}
}
static inline int
parse_who_perm(who_perm_t *who_perm, nvlist_t *nvl, char locality)
{
nvpair_t *nvp = NULL;
fs_perm_set_t *fspset = who_perm->who_fsperm->fsp_set;
uu_avl_t *avl = who_perm->who_deleg_perm_avl;
zfs_deleg_who_type_t who_type = who_perm->who_type;
while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
const char *name = nvpair_name(nvp);
data_type_t type = nvpair_type(nvp);
uu_avl_pool_t *avl_pool = fspset->fsps_deleg_perm_avl_pool;
deleg_perm_node_t *node =
safe_malloc(sizeof (deleg_perm_node_t));
VERIFY(type == DATA_TYPE_BOOLEAN);
uu_avl_node_init(node, &node->dpn_avl_node, avl_pool);
set_deleg_perm_node(avl, node, who_type, name, locality);
}
return (0);
}
static inline int
parse_fs_perm(fs_perm_t *fsperm, nvlist_t *nvl)
{
nvpair_t *nvp = NULL;
fs_perm_set_t *fspset = fsperm->fsp_set;
while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
nvlist_t *nvl2 = NULL;
const char *name = nvpair_name(nvp);
uu_avl_t *avl = NULL;
uu_avl_pool_t *avl_pool = NULL;
zfs_deleg_who_type_t perm_type = name[0];
char perm_locality = name[1];
const char *perm_name = name + 3;
who_perm_t *who_perm = NULL;
assert('$' == name[2]);
if (nvpair_value_nvlist(nvp, &nvl2) != 0)
return (-1);
switch (perm_type) {
case ZFS_DELEG_CREATE:
case ZFS_DELEG_CREATE_SETS:
case ZFS_DELEG_NAMED_SET:
case ZFS_DELEG_NAMED_SET_SETS:
avl_pool = fspset->fsps_named_set_avl_pool;
avl = fsperm->fsp_sc_avl;
break;
case ZFS_DELEG_USER:
case ZFS_DELEG_USER_SETS:
case ZFS_DELEG_GROUP:
case ZFS_DELEG_GROUP_SETS:
case ZFS_DELEG_EVERYONE:
case ZFS_DELEG_EVERYONE_SETS:
avl_pool = fspset->fsps_who_perm_avl_pool;
avl = fsperm->fsp_uge_avl;
break;
default:
assert(!"unhandled zfs_deleg_who_type_t");
}
who_perm_node_t *found_node = NULL;
who_perm_node_t *node = safe_malloc(
sizeof (who_perm_node_t));
who_perm = &node->who_perm;
uu_avl_index_t idx = 0;
uu_avl_node_init(node, &node->who_avl_node, avl_pool);
who_perm_init(who_perm, fsperm, perm_type, perm_name);
if ((found_node = uu_avl_find(avl, node, NULL, &idx))
== NULL) {
if (avl == fsperm->fsp_uge_avl) {
uid_t rid = 0;
struct passwd *p = NULL;
struct group *g = NULL;
const char *nice_name = NULL;
switch (perm_type) {
case ZFS_DELEG_USER_SETS:
case ZFS_DELEG_USER:
rid = atoi(perm_name);
p = getpwuid(rid);
if (p)
nice_name = p->pw_name;
break;
case ZFS_DELEG_GROUP_SETS:
case ZFS_DELEG_GROUP:
rid = atoi(perm_name);
g = getgrgid(rid);
if (g)
nice_name = g->gr_name;
break;
default:
break;
}
if (nice_name != NULL) {
(void) strlcpy(
node->who_perm.who_ug_name,
nice_name, 256);
} else {
/* User or group unknown */
(void) snprintf(
node->who_perm.who_ug_name,
sizeof (node->who_perm.who_ug_name),
"(unknown: %d)", rid);
}
}
uu_avl_insert(avl, node, idx);
} else {
node = found_node;
who_perm = &node->who_perm;
}
assert(who_perm != NULL);
(void) parse_who_perm(who_perm, nvl2, perm_locality);
}
return (0);
}
static inline int
parse_fs_perm_set(fs_perm_set_t *fspset, nvlist_t *nvl)
{
nvpair_t *nvp = NULL;
uu_avl_index_t idx = 0;
while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
nvlist_t *nvl2 = NULL;
const char *fsname = nvpair_name(nvp);
data_type_t type = nvpair_type(nvp);
fs_perm_t *fsperm = NULL;
fs_perm_node_t *node = safe_malloc(sizeof (fs_perm_node_t));
if (node == NULL)
nomem();
fsperm = &node->fspn_fsperm;
VERIFY(DATA_TYPE_NVLIST == type);
uu_list_node_init(node, &node->fspn_list_node,
fspset->fsps_list_pool);
idx = uu_list_numnodes(fspset->fsps_list);
fs_perm_init(fsperm, fspset, fsname);
if (nvpair_value_nvlist(nvp, &nvl2) != 0)
return (-1);
(void) parse_fs_perm(fsperm, nvl2);
uu_list_insert(fspset->fsps_list, node, idx);
}
return (0);
}
static inline const char *
deleg_perm_comment(zfs_deleg_note_t note)
{
const char *str = "";
/* subcommands */
switch (note) {
/* SUBCOMMANDS */
case ZFS_DELEG_NOTE_ALLOW:
str = gettext("Must also have the permission that is being"
"\n\t\t\t\tallowed");
break;
case ZFS_DELEG_NOTE_CLONE:
str = gettext("Must also have the 'create' ability and 'mount'"
"\n\t\t\t\tability in the origin file system");
break;
case ZFS_DELEG_NOTE_CREATE:
str = gettext("Must also have the 'mount' ability");
break;
case ZFS_DELEG_NOTE_DESTROY:
str = gettext("Must also have the 'mount' ability");
break;
case ZFS_DELEG_NOTE_DIFF:
str = gettext("Allows lookup of paths within a dataset;"
"\n\t\t\t\tgiven an object number. Ordinary users need this"
"\n\t\t\t\tin order to use zfs diff");
break;
case ZFS_DELEG_NOTE_HOLD:
str = gettext("Allows adding a user hold to a snapshot");
break;
case ZFS_DELEG_NOTE_MOUNT:
str = gettext("Allows mount/umount of ZFS datasets");
break;
case ZFS_DELEG_NOTE_PROMOTE:
str = gettext("Must also have the 'mount'\n\t\t\t\tand"
" 'promote' ability in the origin file system");
break;
case ZFS_DELEG_NOTE_RECEIVE:
str = gettext("Must also have the 'mount' and 'create'"
" ability");
break;
case ZFS_DELEG_NOTE_RELEASE:
str = gettext("Allows releasing a user hold which\n\t\t\t\t"
"might destroy the snapshot");
break;
case ZFS_DELEG_NOTE_RENAME:
str = gettext("Must also have the 'mount' and 'create'"
"\n\t\t\t\tability in the new parent");
break;
case ZFS_DELEG_NOTE_ROLLBACK:
str = gettext("");
break;
case ZFS_DELEG_NOTE_SEND:
str = gettext("");
break;
case ZFS_DELEG_NOTE_SHARE:
str = gettext("Allows sharing file systems over NFS or SMB"
"\n\t\t\t\tprotocols");
break;
case ZFS_DELEG_NOTE_SNAPSHOT:
str = gettext("");
break;
case ZFS_DELEG_NOTE_LOAD_KEY:
str = gettext("Allows loading or unloading an encryption key");
break;
case ZFS_DELEG_NOTE_CHANGE_KEY:
str = gettext("Allows changing or adding an encryption key");
break;
/*
* case ZFS_DELEG_NOTE_VSCAN:
* str = gettext("");
* break;
*/
/* OTHER */
case ZFS_DELEG_NOTE_GROUPQUOTA:
str = gettext("Allows accessing any groupquota@... property");
break;
case ZFS_DELEG_NOTE_GROUPUSED:
str = gettext("Allows reading any groupused@... property");
break;
case ZFS_DELEG_NOTE_USERPROP:
str = gettext("Allows changing any user property");
break;
case ZFS_DELEG_NOTE_USERQUOTA:
str = gettext("Allows accessing any userquota@... property");
break;
case ZFS_DELEG_NOTE_USERUSED:
str = gettext("Allows reading any userused@... property");
break;
case ZFS_DELEG_NOTE_USEROBJQUOTA:
str = gettext("Allows accessing any userobjquota@... property");
break;
case ZFS_DELEG_NOTE_GROUPOBJQUOTA:
str = gettext("Allows accessing any \n\t\t\t\t"
"groupobjquota@... property");
break;
case ZFS_DELEG_NOTE_GROUPOBJUSED:
str = gettext("Allows reading any groupobjused@... property");
break;
case ZFS_DELEG_NOTE_USEROBJUSED:
str = gettext("Allows reading any userobjused@... property");
break;
case ZFS_DELEG_NOTE_PROJECTQUOTA:
str = gettext("Allows accessing any projectquota@... property");
break;
case ZFS_DELEG_NOTE_PROJECTOBJQUOTA:
str = gettext("Allows accessing any \n\t\t\t\t"
"projectobjquota@... property");
break;
case ZFS_DELEG_NOTE_PROJECTUSED:
str = gettext("Allows reading any projectused@... property");
break;
case ZFS_DELEG_NOTE_PROJECTOBJUSED:
str = gettext("Allows accessing any \n\t\t\t\t"
"projectobjused@... property");
break;
/* other */
default:
str = "";
}
return (str);
}
struct allow_opts {
boolean_t local;
boolean_t descend;
boolean_t user;
boolean_t group;
boolean_t everyone;
boolean_t create;
boolean_t set;
boolean_t recursive; /* unallow only */
boolean_t prt_usage;
boolean_t prt_perms;
char *who;
char *perms;
const char *dataset;
};
static inline int
prop_cmp(const void *a, const void *b)
{
const char *str1 = *(const char **)a;
const char *str2 = *(const char **)b;
return (strcmp(str1, str2));
}
static void
allow_usage(boolean_t un, boolean_t requested, const char *msg)
{
const char *opt_desc[] = {
"-h", gettext("show this help message and exit"),
"-l", gettext("set permission locally"),
"-d", gettext("set permission for descents"),
"-u", gettext("set permission for user"),
"-g", gettext("set permission for group"),
"-e", gettext("set permission for everyone"),
"-c", gettext("set create time permission"),
"-s", gettext("define permission set"),
/* unallow only */
"-r", gettext("remove permissions recursively"),
};
size_t unallow_size = sizeof (opt_desc) / sizeof (char *);
size_t allow_size = unallow_size - 2;
const char *props[ZFS_NUM_PROPS];
int i;
size_t count = 0;
FILE *fp = requested ? stdout : stderr;
zprop_desc_t *pdtbl = zfs_prop_get_table();
const char *fmt = gettext("%-16s %-14s\t%s\n");
(void) fprintf(fp, gettext("Usage: %s\n"), get_usage(un ? HELP_UNALLOW :
HELP_ALLOW));
(void) fprintf(fp, gettext("Options:\n"));
for (i = 0; i < (un ? unallow_size : allow_size); i += 2) {
const char *opt = opt_desc[i];
const char *optdsc = opt_desc[i + 1];
(void) fprintf(fp, gettext(" %-10s %s\n"), opt, optdsc);
}
(void) fprintf(fp, gettext("\nThe following permissions are "
"supported:\n\n"));
(void) fprintf(fp, fmt, gettext("NAME"), gettext("TYPE"),
gettext("NOTES"));
for (i = 0; i < ZFS_NUM_DELEG_NOTES; i++) {
const char *perm_name = zfs_deleg_perm_tbl[i].z_perm;
zfs_deleg_note_t perm_note = zfs_deleg_perm_tbl[i].z_note;
const char *perm_type = deleg_perm_type(perm_note);
const char *perm_comment = deleg_perm_comment(perm_note);
(void) fprintf(fp, fmt, perm_name, perm_type, perm_comment);
}
for (i = 0; i < ZFS_NUM_PROPS; i++) {
zprop_desc_t *pd = &pdtbl[i];
if (pd->pd_visible != B_TRUE)
continue;
if (pd->pd_attr == PROP_READONLY)
continue;
props[count++] = pd->pd_name;
}
props[count] = NULL;
qsort(props, count, sizeof (char *), prop_cmp);
for (i = 0; i < count; i++)
(void) fprintf(fp, fmt, props[i], gettext("property"), "");
if (msg != NULL)
(void) fprintf(fp, gettext("\nzfs: error: %s"), msg);
exit(requested ? 0 : 2);
}
static inline const char *
munge_args(int argc, char **argv, boolean_t un, size_t expected_argc,
char **permsp)
{
if (un && argc == expected_argc - 1)
*permsp = NULL;
else if (argc == expected_argc)
*permsp = argv[argc - 2];
else
allow_usage(un, B_FALSE,
gettext("wrong number of parameters\n"));
return (argv[argc - 1]);
}
static void
parse_allow_args(int argc, char **argv, boolean_t un, struct allow_opts *opts)
{
int uge_sum = opts->user + opts->group + opts->everyone;
int csuge_sum = opts->create + opts->set + uge_sum;
int ldcsuge_sum = csuge_sum + opts->local + opts->descend;
int all_sum = un ? ldcsuge_sum + opts->recursive : ldcsuge_sum;
if (uge_sum > 1)
allow_usage(un, B_FALSE,
gettext("-u, -g, and -e are mutually exclusive\n"));
if (opts->prt_usage) {
if (argc == 0 && all_sum == 0)
allow_usage(un, B_TRUE, NULL);
else
usage(B_FALSE);
}
if (opts->set) {
if (csuge_sum > 1)
allow_usage(un, B_FALSE,
gettext("invalid options combined with -s\n"));
opts->dataset = munge_args(argc, argv, un, 3, &opts->perms);
if (argv[0][0] != '@')
allow_usage(un, B_FALSE,
gettext("invalid set name: missing '@' prefix\n"));
opts->who = argv[0];
} else if (opts->create) {
if (ldcsuge_sum > 1)
allow_usage(un, B_FALSE,
gettext("invalid options combined with -c\n"));
opts->dataset = munge_args(argc, argv, un, 2, &opts->perms);
} else if (opts->everyone) {
if (csuge_sum > 1)
allow_usage(un, B_FALSE,
gettext("invalid options combined with -e\n"));
opts->dataset = munge_args(argc, argv, un, 2, &opts->perms);
} else if (uge_sum == 0 && argc > 0 && strcmp(argv[0], "everyone")
== 0) {
opts->everyone = B_TRUE;
argc--;
argv++;
opts->dataset = munge_args(argc, argv, un, 2, &opts->perms);
} else if (argc == 1 && !un) {
opts->prt_perms = B_TRUE;
opts->dataset = argv[argc-1];
} else {
opts->dataset = munge_args(argc, argv, un, 3, &opts->perms);
opts->who = argv[0];
}
if (!opts->local && !opts->descend) {
opts->local = B_TRUE;
opts->descend = B_TRUE;
}
}
static void
store_allow_perm(zfs_deleg_who_type_t type, boolean_t local, boolean_t descend,
const char *who, char *perms, nvlist_t *top_nvl)
{
int i;
char ld[2] = { '\0', '\0' };
char who_buf[MAXNAMELEN + 32];
char base_type = '\0';
char set_type = '\0';
nvlist_t *base_nvl = NULL;
nvlist_t *set_nvl = NULL;
nvlist_t *nvl;
if (nvlist_alloc(&base_nvl, NV_UNIQUE_NAME, 0) != 0)
nomem();
if (nvlist_alloc(&set_nvl, NV_UNIQUE_NAME, 0) != 0)
nomem();
switch (type) {
case ZFS_DELEG_NAMED_SET_SETS:
case ZFS_DELEG_NAMED_SET:
set_type = ZFS_DELEG_NAMED_SET_SETS;
base_type = ZFS_DELEG_NAMED_SET;
ld[0] = ZFS_DELEG_NA;
break;
case ZFS_DELEG_CREATE_SETS:
case ZFS_DELEG_CREATE:
set_type = ZFS_DELEG_CREATE_SETS;
base_type = ZFS_DELEG_CREATE;
ld[0] = ZFS_DELEG_NA;
break;
case ZFS_DELEG_USER_SETS:
case ZFS_DELEG_USER:
set_type = ZFS_DELEG_USER_SETS;
base_type = ZFS_DELEG_USER;
if (local)
ld[0] = ZFS_DELEG_LOCAL;
if (descend)
ld[1] = ZFS_DELEG_DESCENDENT;
break;
case ZFS_DELEG_GROUP_SETS:
case ZFS_DELEG_GROUP:
set_type = ZFS_DELEG_GROUP_SETS;
base_type = ZFS_DELEG_GROUP;
if (local)
ld[0] = ZFS_DELEG_LOCAL;
if (descend)
ld[1] = ZFS_DELEG_DESCENDENT;
break;
case ZFS_DELEG_EVERYONE_SETS:
case ZFS_DELEG_EVERYONE:
set_type = ZFS_DELEG_EVERYONE_SETS;
base_type = ZFS_DELEG_EVERYONE;
if (local)
ld[0] = ZFS_DELEG_LOCAL;
if (descend)
ld[1] = ZFS_DELEG_DESCENDENT;
break;
default:
assert(set_type != '\0' && base_type != '\0');
}
if (perms != NULL) {
char *curr = perms;
char *end = curr + strlen(perms);
while (curr < end) {
char *delim = strchr(curr, ',');
if (delim == NULL)
delim = end;
else
*delim = '\0';
if (curr[0] == '@')
nvl = set_nvl;
else
nvl = base_nvl;
(void) nvlist_add_boolean(nvl, curr);
if (delim != end)
*delim = ',';
curr = delim + 1;
}
for (i = 0; i < 2; i++) {
char locality = ld[i];
if (locality == 0)
continue;
if (!nvlist_empty(base_nvl)) {
if (who != NULL)
(void) snprintf(who_buf,
sizeof (who_buf), "%c%c$%s",
base_type, locality, who);
else
(void) snprintf(who_buf,
sizeof (who_buf), "%c%c$",
base_type, locality);
(void) nvlist_add_nvlist(top_nvl, who_buf,
base_nvl);
}
if (!nvlist_empty(set_nvl)) {
if (who != NULL)
(void) snprintf(who_buf,
sizeof (who_buf), "%c%c$%s",
set_type, locality, who);
else
(void) snprintf(who_buf,
sizeof (who_buf), "%c%c$",
set_type, locality);
(void) nvlist_add_nvlist(top_nvl, who_buf,
set_nvl);
}
}
} else {
for (i = 0; i < 2; i++) {
char locality = ld[i];
if (locality == 0)
continue;
if (who != NULL)
(void) snprintf(who_buf, sizeof (who_buf),
"%c%c$%s", base_type, locality, who);
else
(void) snprintf(who_buf, sizeof (who_buf),
"%c%c$", base_type, locality);
(void) nvlist_add_boolean(top_nvl, who_buf);
if (who != NULL)
(void) snprintf(who_buf, sizeof (who_buf),
"%c%c$%s", set_type, locality, who);
else
(void) snprintf(who_buf, sizeof (who_buf),
"%c%c$", set_type, locality);
(void) nvlist_add_boolean(top_nvl, who_buf);
}
}
}
static int
construct_fsacl_list(boolean_t un, struct allow_opts *opts, nvlist_t **nvlp)
{
if (nvlist_alloc(nvlp, NV_UNIQUE_NAME, 0) != 0)
nomem();
if (opts->set) {
store_allow_perm(ZFS_DELEG_NAMED_SET, opts->local,
opts->descend, opts->who, opts->perms, *nvlp);
} else if (opts->create) {
store_allow_perm(ZFS_DELEG_CREATE, opts->local,
opts->descend, NULL, opts->perms, *nvlp);
} else if (opts->everyone) {
store_allow_perm(ZFS_DELEG_EVERYONE, opts->local,
opts->descend, NULL, opts->perms, *nvlp);
} else {
char *curr = opts->who;
char *end = curr + strlen(curr);
while (curr < end) {
const char *who;
zfs_deleg_who_type_t who_type = ZFS_DELEG_WHO_UNKNOWN;
char *endch;
char *delim = strchr(curr, ',');
char errbuf[256];
char id[64];
struct passwd *p = NULL;
struct group *g = NULL;
uid_t rid;
if (delim == NULL)
delim = end;
else
*delim = '\0';
rid = (uid_t)strtol(curr, &endch, 0);
if (opts->user) {
who_type = ZFS_DELEG_USER;
if (*endch != '\0')
p = getpwnam(curr);
else
p = getpwuid(rid);
if (p != NULL)
rid = p->pw_uid;
else if (*endch != '\0') {
(void) snprintf(errbuf, 256, gettext(
"invalid user %s\n"), curr);
allow_usage(un, B_TRUE, errbuf);
}
} else if (opts->group) {
who_type = ZFS_DELEG_GROUP;
if (*endch != '\0')
g = getgrnam(curr);
else
g = getgrgid(rid);
if (g != NULL)
rid = g->gr_gid;
else if (*endch != '\0') {
(void) snprintf(errbuf, 256, gettext(
"invalid group %s\n"), curr);
allow_usage(un, B_TRUE, errbuf);
}
} else {
if (*endch != '\0') {
p = getpwnam(curr);
} else {
p = getpwuid(rid);
}
if (p == NULL) {
if (*endch != '\0') {
g = getgrnam(curr);
} else {
g = getgrgid(rid);
}
}
if (p != NULL) {
who_type = ZFS_DELEG_USER;
rid = p->pw_uid;
} else if (g != NULL) {
who_type = ZFS_DELEG_GROUP;
rid = g->gr_gid;
} else {
(void) snprintf(errbuf, 256, gettext(
"invalid user/group %s\n"), curr);
allow_usage(un, B_TRUE, errbuf);
}
}
(void) sprintf(id, "%u", rid);
who = id;
store_allow_perm(who_type, opts->local,
opts->descend, who, opts->perms, *nvlp);
curr = delim + 1;
}
}
return (0);
}
static void
print_set_creat_perms(uu_avl_t *who_avl)
{
const char *sc_title[] = {
gettext("Permission sets:\n"),
gettext("Create time permissions:\n"),
NULL
};
who_perm_node_t *who_node = NULL;
int prev_weight = -1;
for (who_node = uu_avl_first(who_avl); who_node != NULL;
who_node = uu_avl_next(who_avl, who_node)) {
uu_avl_t *avl = who_node->who_perm.who_deleg_perm_avl;
zfs_deleg_who_type_t who_type = who_node->who_perm.who_type;
const char *who_name = who_node->who_perm.who_name;
int weight = who_type2weight(who_type);
boolean_t first = B_TRUE;
deleg_perm_node_t *deleg_node;
if (prev_weight != weight) {
(void) printf("%s", sc_title[weight]);
prev_weight = weight;
}
if (who_name == NULL || strnlen(who_name, 1) == 0)
(void) printf("\t");
else
(void) printf("\t%s ", who_name);
for (deleg_node = uu_avl_first(avl); deleg_node != NULL;
deleg_node = uu_avl_next(avl, deleg_node)) {
if (first) {
(void) printf("%s",
deleg_node->dpn_perm.dp_name);
first = B_FALSE;
} else
(void) printf(",%s",
deleg_node->dpn_perm.dp_name);
}
(void) printf("\n");
}
}
static void
print_uge_deleg_perms(uu_avl_t *who_avl, boolean_t local, boolean_t descend,
const char *title)
{
who_perm_node_t *who_node = NULL;
boolean_t prt_title = B_TRUE;
uu_avl_walk_t *walk;
if ((walk = uu_avl_walk_start(who_avl, UU_WALK_ROBUST)) == NULL)
nomem();
while ((who_node = uu_avl_walk_next(walk)) != NULL) {
const char *who_name = who_node->who_perm.who_name;
const char *nice_who_name = who_node->who_perm.who_ug_name;
uu_avl_t *avl = who_node->who_perm.who_deleg_perm_avl;
zfs_deleg_who_type_t who_type = who_node->who_perm.who_type;
char delim = ' ';
deleg_perm_node_t *deleg_node;
boolean_t prt_who = B_TRUE;
for (deleg_node = uu_avl_first(avl);
deleg_node != NULL;
deleg_node = uu_avl_next(avl, deleg_node)) {
if (local != deleg_node->dpn_perm.dp_local ||
descend != deleg_node->dpn_perm.dp_descend)
continue;
if (prt_who) {
const char *who = NULL;
if (prt_title) {
prt_title = B_FALSE;
(void) printf("%s", title);
}
switch (who_type) {
case ZFS_DELEG_USER_SETS:
case ZFS_DELEG_USER:
who = gettext("user");
if (nice_who_name)
who_name = nice_who_name;
break;
case ZFS_DELEG_GROUP_SETS:
case ZFS_DELEG_GROUP:
who = gettext("group");
if (nice_who_name)
who_name = nice_who_name;
break;
case ZFS_DELEG_EVERYONE_SETS:
case ZFS_DELEG_EVERYONE:
who = gettext("everyone");
who_name = NULL;
break;
default:
assert(who != NULL);
}
prt_who = B_FALSE;
if (who_name == NULL)
(void) printf("\t%s", who);
else
(void) printf("\t%s %s", who, who_name);
}
(void) printf("%c%s", delim,
deleg_node->dpn_perm.dp_name);
delim = ',';
}
if (!prt_who)
(void) printf("\n");
}
uu_avl_walk_end(walk);
}
static void
print_fs_perms(fs_perm_set_t *fspset)
{
fs_perm_node_t *node = NULL;
char buf[MAXNAMELEN + 32];
const char *dsname = buf;
for (node = uu_list_first(fspset->fsps_list); node != NULL;
node = uu_list_next(fspset->fsps_list, node)) {
uu_avl_t *sc_avl = node->fspn_fsperm.fsp_sc_avl;
uu_avl_t *uge_avl = node->fspn_fsperm.fsp_uge_avl;
int left = 0;
(void) snprintf(buf, sizeof (buf),
gettext("---- Permissions on %s "),
node->fspn_fsperm.fsp_name);
(void) printf("%s", dsname);
left = 70 - strlen(buf);
while (left-- > 0)
(void) printf("-");
(void) printf("\n");
print_set_creat_perms(sc_avl);
print_uge_deleg_perms(uge_avl, B_TRUE, B_FALSE,
gettext("Local permissions:\n"));
print_uge_deleg_perms(uge_avl, B_FALSE, B_TRUE,
gettext("Descendent permissions:\n"));
print_uge_deleg_perms(uge_avl, B_TRUE, B_TRUE,
gettext("Local+Descendent permissions:\n"));
}
}
static fs_perm_set_t fs_perm_set = { NULL, NULL, NULL, NULL };
struct deleg_perms {
boolean_t un;
nvlist_t *nvl;
};
static int
set_deleg_perms(zfs_handle_t *zhp, void *data)
{
struct deleg_perms *perms = (struct deleg_perms *)data;
zfs_type_t zfs_type = zfs_get_type(zhp);
if (zfs_type != ZFS_TYPE_FILESYSTEM && zfs_type != ZFS_TYPE_VOLUME)
return (0);
return (zfs_set_fsacl(zhp, perms->un, perms->nvl));
}
static int
zfs_do_allow_unallow_impl(int argc, char **argv, boolean_t un)
{
zfs_handle_t *zhp;
nvlist_t *perm_nvl = NULL;
nvlist_t *update_perm_nvl = NULL;
int error = 1;
int c;
struct allow_opts opts = { 0 };
const char *optstr = un ? "ldugecsrh" : "ldugecsh";
/* check opts */
while ((c = getopt(argc, argv, optstr)) != -1) {
switch (c) {
case 'l':
opts.local = B_TRUE;
break;
case 'd':
opts.descend = B_TRUE;
break;
case 'u':
opts.user = B_TRUE;
break;
case 'g':
opts.group = B_TRUE;
break;
case 'e':
opts.everyone = B_TRUE;
break;
case 's':
opts.set = B_TRUE;
break;
case 'c':
opts.create = B_TRUE;
break;
case 'r':
opts.recursive = B_TRUE;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case 'h':
opts.prt_usage = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check arguments */
parse_allow_args(argc, argv, un, &opts);
/* try to open the dataset */
if ((zhp = zfs_open(g_zfs, opts.dataset, ZFS_TYPE_FILESYSTEM |
ZFS_TYPE_VOLUME)) == NULL) {
(void) fprintf(stderr, "Failed to open dataset: %s\n",
opts.dataset);
return (-1);
}
if (zfs_get_fsacl(zhp, &perm_nvl) != 0)
goto cleanup2;
fs_perm_set_init(&fs_perm_set);
if (parse_fs_perm_set(&fs_perm_set, perm_nvl) != 0) {
(void) fprintf(stderr, "Failed to parse fsacl permissions\n");
goto cleanup1;
}
if (opts.prt_perms)
print_fs_perms(&fs_perm_set);
else {
(void) construct_fsacl_list(un, &opts, &update_perm_nvl);
if (zfs_set_fsacl(zhp, un, update_perm_nvl) != 0)
goto cleanup0;
if (un && opts.recursive) {
struct deleg_perms data = { un, update_perm_nvl };
if (zfs_iter_filesystems(zhp, set_deleg_perms,
&data) != 0)
goto cleanup0;
}
}
error = 0;
cleanup0:
nvlist_free(perm_nvl);
nvlist_free(update_perm_nvl);
cleanup1:
fs_perm_set_fini(&fs_perm_set);
cleanup2:
zfs_close(zhp);
return (error);
}
static int
zfs_do_allow(int argc, char **argv)
{
return (zfs_do_allow_unallow_impl(argc, argv, B_FALSE));
}
static int
zfs_do_unallow(int argc, char **argv)
{
return (zfs_do_allow_unallow_impl(argc, argv, B_TRUE));
}
static int
zfs_do_hold_rele_impl(int argc, char **argv, boolean_t holding)
{
int errors = 0;
int i;
const char *tag;
boolean_t recursive = B_FALSE;
const char *opts = holding ? "rt" : "r";
int c;
/* check options */
while ((c = getopt(argc, argv, opts)) != -1) {
switch (c) {
case 'r':
recursive = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 2)
usage(B_FALSE);
tag = argv[0];
--argc;
++argv;
if (holding && tag[0] == '.') {
/* tags starting with '.' are reserved for libzfs */
(void) fprintf(stderr, gettext("tag may not start with '.'\n"));
usage(B_FALSE);
}
for (i = 0; i < argc; ++i) {
zfs_handle_t *zhp;
char parent[ZFS_MAX_DATASET_NAME_LEN];
const char *delim;
char *path = argv[i];
delim = strchr(path, '@');
if (delim == NULL) {
(void) fprintf(stderr,
gettext("'%s' is not a snapshot\n"), path);
++errors;
continue;
}
(void) strncpy(parent, path, delim - path);
parent[delim - path] = '\0';
zhp = zfs_open(g_zfs, parent,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL) {
++errors;
continue;
}
if (holding) {
if (zfs_hold(zhp, delim+1, tag, recursive, -1) != 0)
++errors;
} else {
if (zfs_release(zhp, delim+1, tag, recursive) != 0)
++errors;
}
zfs_close(zhp);
}
return (errors != 0);
}
/*
* zfs hold [-r] [-t] <tag> <snap> ...
*
* -r Recursively hold
*
* Apply a user-hold with the given tag to the list of snapshots.
*/
static int
zfs_do_hold(int argc, char **argv)
{
return (zfs_do_hold_rele_impl(argc, argv, B_TRUE));
}
/*
* zfs release [-r] <tag> <snap> ...
*
* -r Recursively release
*
* Release a user-hold with the given tag from the list of snapshots.
*/
static int
zfs_do_release(int argc, char **argv)
{
return (zfs_do_hold_rele_impl(argc, argv, B_FALSE));
}
typedef struct holds_cbdata {
boolean_t cb_recursive;
const char *cb_snapname;
nvlist_t **cb_nvlp;
size_t cb_max_namelen;
size_t cb_max_taglen;
} holds_cbdata_t;
#define STRFTIME_FMT_STR "%a %b %e %H:%M %Y"
#define DATETIME_BUF_LEN (32)
/*
*
*/
static void
print_holds(boolean_t scripted, int nwidth, int tagwidth, nvlist_t *nvl)
{
int i;
nvpair_t *nvp = NULL;
char *hdr_cols[] = { "NAME", "TAG", "TIMESTAMP" };
const char *col;
if (!scripted) {
for (i = 0; i < 3; i++) {
col = gettext(hdr_cols[i]);
if (i < 2)
(void) printf("%-*s ", i ? tagwidth : nwidth,
col);
else
(void) printf("%s\n", col);
}
}
while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
char *zname = nvpair_name(nvp);
nvlist_t *nvl2;
nvpair_t *nvp2 = NULL;
(void) nvpair_value_nvlist(nvp, &nvl2);
while ((nvp2 = nvlist_next_nvpair(nvl2, nvp2)) != NULL) {
char tsbuf[DATETIME_BUF_LEN];
char *tagname = nvpair_name(nvp2);
uint64_t val = 0;
time_t time;
struct tm t;
(void) nvpair_value_uint64(nvp2, &val);
time = (time_t)val;
(void) localtime_r(&time, &t);
(void) strftime(tsbuf, DATETIME_BUF_LEN,
gettext(STRFTIME_FMT_STR), &t);
if (scripted) {
(void) printf("%s\t%s\t%s\n", zname,
tagname, tsbuf);
} else {
(void) printf("%-*s %-*s %s\n", nwidth,
zname, tagwidth, tagname, tsbuf);
}
}
}
}
/*
* Generic callback function to list a dataset or snapshot.
*/
static int
holds_callback(zfs_handle_t *zhp, void *data)
{
holds_cbdata_t *cbp = data;
nvlist_t *top_nvl = *cbp->cb_nvlp;
nvlist_t *nvl = NULL;
nvpair_t *nvp = NULL;
const char *zname = zfs_get_name(zhp);
size_t znamelen = strlen(zname);
if (cbp->cb_recursive) {
const char *snapname;
char *delim = strchr(zname, '@');
if (delim == NULL)
return (0);
snapname = delim + 1;
if (strcmp(cbp->cb_snapname, snapname))
return (0);
}
if (zfs_get_holds(zhp, &nvl) != 0)
return (-1);
if (znamelen > cbp->cb_max_namelen)
cbp->cb_max_namelen = znamelen;
while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
const char *tag = nvpair_name(nvp);
size_t taglen = strlen(tag);
if (taglen > cbp->cb_max_taglen)
cbp->cb_max_taglen = taglen;
}
return (nvlist_add_nvlist(top_nvl, zname, nvl));
}
/*
* zfs holds [-rH] <snap> ...
*
* -r Lists holds that are set on the named snapshots recursively.
* -H Scripted mode; elide headers and separate columns by tabs.
*/
static int
zfs_do_holds(int argc, char **argv)
{
int errors = 0;
int c;
int i;
boolean_t scripted = B_FALSE;
boolean_t recursive = B_FALSE;
const char *opts = "rH";
nvlist_t *nvl;
int types = ZFS_TYPE_SNAPSHOT;
holds_cbdata_t cb = { 0 };
int limit = 0;
int ret = 0;
int flags = 0;
/* check options */
while ((c = getopt(argc, argv, opts)) != -1) {
switch (c) {
case 'r':
recursive = B_TRUE;
break;
case 'H':
scripted = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
if (recursive) {
types |= ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME;
flags |= ZFS_ITER_RECURSE;
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 1)
usage(B_FALSE);
if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
nomem();
for (i = 0; i < argc; ++i) {
char *snapshot = argv[i];
const char *delim;
const char *snapname;
delim = strchr(snapshot, '@');
if (delim == NULL) {
(void) fprintf(stderr,
gettext("'%s' is not a snapshot\n"), snapshot);
++errors;
continue;
}
snapname = delim + 1;
if (recursive)
snapshot[delim - snapshot] = '\0';
cb.cb_recursive = recursive;
cb.cb_snapname = snapname;
cb.cb_nvlp = &nvl;
/*
* 1. collect holds data, set format options
*/
ret = zfs_for_each(argc, argv, flags, types, NULL, NULL, limit,
holds_callback, &cb);
if (ret != 0)
++errors;
}
/*
* 2. print holds data
*/
print_holds(scripted, cb.cb_max_namelen, cb.cb_max_taglen, nvl);
if (nvlist_empty(nvl))
(void) fprintf(stderr, gettext("no datasets available\n"));
nvlist_free(nvl);
return (0 != errors);
}
#define CHECK_SPINNER 30
#define SPINNER_TIME 3 /* seconds */
#define MOUNT_TIME 1 /* seconds */
typedef struct get_all_state {
boolean_t ga_verbose;
get_all_cb_t *ga_cbp;
} get_all_state_t;
static int
get_one_dataset(zfs_handle_t *zhp, void *data)
{
static char *spin[] = { "-", "\\", "|", "/" };
static int spinval = 0;
static int spincheck = 0;
static time_t last_spin_time = (time_t)0;
get_all_state_t *state = data;
zfs_type_t type = zfs_get_type(zhp);
if (state->ga_verbose) {
if (--spincheck < 0) {
time_t now = time(NULL);
if (last_spin_time + SPINNER_TIME < now) {
update_progress(spin[spinval++ % 4]);
last_spin_time = now;
}
spincheck = CHECK_SPINNER;
}
}
/*
* Iterate over any nested datasets.
*/
if (zfs_iter_filesystems(zhp, get_one_dataset, data) != 0) {
zfs_close(zhp);
return (1);
}
/*
* Skip any datasets whose type does not match.
*/
if ((type & ZFS_TYPE_FILESYSTEM) == 0) {
zfs_close(zhp);
return (0);
}
libzfs_add_handle(state->ga_cbp, zhp);
assert(state->ga_cbp->cb_used <= state->ga_cbp->cb_alloc);
return (0);
}
static void
get_all_datasets(get_all_cb_t *cbp, boolean_t verbose)
{
get_all_state_t state = {
.ga_verbose = verbose,
.ga_cbp = cbp
};
if (verbose)
set_progress_header(gettext("Reading ZFS config"));
(void) zfs_iter_root(g_zfs, get_one_dataset, &state);
if (verbose)
finish_progress(gettext("done."));
}
/*
* Generic callback for sharing or mounting filesystems. Because the code is so
* similar, we have a common function with an extra parameter to determine which
* mode we are using.
*/
typedef enum { OP_SHARE, OP_MOUNT } share_mount_op_t;
typedef struct share_mount_state {
share_mount_op_t sm_op;
boolean_t sm_verbose;
int sm_flags;
char *sm_options;
char *sm_proto; /* only valid for OP_SHARE */
pthread_mutex_t sm_lock; /* protects the remaining fields */
uint_t sm_total; /* number of filesystems to process */
uint_t sm_done; /* number of filesystems processed */
int sm_status; /* -1 if any of the share/mount operations failed */
} share_mount_state_t;
/*
* Share or mount a dataset.
*/
static int
share_mount_one(zfs_handle_t *zhp, int op, int flags, char *protocol,
boolean_t explicit, const char *options)
{
char mountpoint[ZFS_MAXPROPLEN];
char shareopts[ZFS_MAXPROPLEN];
char smbshareopts[ZFS_MAXPROPLEN];
const char *cmdname = op == OP_SHARE ? "share" : "mount";
struct mnttab mnt;
uint64_t zoned, canmount;
boolean_t shared_nfs, shared_smb;
assert(zfs_get_type(zhp) & ZFS_TYPE_FILESYSTEM);
/*
* Check to make sure we can mount/share this dataset. If we
* are in the global zone and the filesystem is exported to a
* local zone, or if we are in a local zone and the
* filesystem is not exported, then it is an error.
*/
zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED);
if (zoned && getzoneid() == GLOBAL_ZONEID) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot %s '%s': "
"dataset is exported to a local zone\n"), cmdname,
zfs_get_name(zhp));
return (1);
} else if (!zoned && getzoneid() != GLOBAL_ZONEID) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot %s '%s': "
"permission denied\n"), cmdname,
zfs_get_name(zhp));
return (1);
}
/*
* Ignore any filesystems which don't apply to us. This
* includes those with a legacy mountpoint, or those with
* legacy share options.
*/
verify(zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, mountpoint,
sizeof (mountpoint), NULL, NULL, 0, B_FALSE) == 0);
verify(zfs_prop_get(zhp, ZFS_PROP_SHARENFS, shareopts,
sizeof (shareopts), NULL, NULL, 0, B_FALSE) == 0);
verify(zfs_prop_get(zhp, ZFS_PROP_SHARESMB, smbshareopts,
sizeof (smbshareopts), NULL, NULL, 0, B_FALSE) == 0);
if (op == OP_SHARE && strcmp(shareopts, "off") == 0 &&
strcmp(smbshareopts, "off") == 0) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot share '%s': "
"legacy share\n"), zfs_get_name(zhp));
(void) fprintf(stderr, gettext("use exports(5) or "
"smb.conf(5) to share this filesystem, or set "
"the sharenfs or sharesmb property\n"));
return (1);
}
/*
* We cannot share or mount legacy filesystems. If the
* shareopts is non-legacy but the mountpoint is legacy, we
* treat it as a legacy share.
*/
if (strcmp(mountpoint, "legacy") == 0) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot %s '%s': "
"legacy mountpoint\n"), cmdname, zfs_get_name(zhp));
(void) fprintf(stderr, gettext("use %s(8) to "
"%s this filesystem\n"), cmdname, cmdname);
return (1);
}
if (strcmp(mountpoint, "none") == 0) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot %s '%s': no "
"mountpoint set\n"), cmdname, zfs_get_name(zhp));
return (1);
}
/*
* canmount explicit outcome
* on no pass through
* on yes pass through
* off no return 0
* off yes display error, return 1
* noauto no return 0
* noauto yes pass through
*/
canmount = zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT);
if (canmount == ZFS_CANMOUNT_OFF) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot %s '%s': "
"'canmount' property is set to 'off'\n"), cmdname,
zfs_get_name(zhp));
return (1);
} else if (canmount == ZFS_CANMOUNT_NOAUTO && !explicit) {
/*
* When performing a 'zfs mount -a', we skip any mounts for
* datasets that have 'noauto' set. Sharing a dataset with
* 'noauto' set is only allowed if it's mounted.
*/
if (op == OP_MOUNT)
return (0);
if (op == OP_SHARE && !zfs_is_mounted(zhp, NULL)) {
/* also purge it from existing exports */
zfs_unshareall_bypath(zhp, mountpoint);
return (0);
}
}
/*
* If this filesystem is encrypted and does not have
* a loaded key, we can not mount it.
*/
if ((flags & MS_CRYPT) == 0 &&
zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION) != ZIO_CRYPT_OFF &&
zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS) ==
ZFS_KEYSTATUS_UNAVAILABLE) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot %s '%s': "
"encryption key not loaded\n"), cmdname, zfs_get_name(zhp));
return (1);
}
/*
* If this filesystem is inconsistent and has a receive resume
* token, we can not mount it.
*/
if (zfs_prop_get_int(zhp, ZFS_PROP_INCONSISTENT) &&
zfs_prop_get(zhp, ZFS_PROP_RECEIVE_RESUME_TOKEN,
NULL, 0, NULL, NULL, 0, B_TRUE) == 0) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot %s '%s': "
"Contains partially-completed state from "
"\"zfs receive -s\", which can be resumed with "
"\"zfs send -t\"\n"),
cmdname, zfs_get_name(zhp));
return (1);
}
if (zfs_prop_get_int(zhp, ZFS_PROP_REDACTED) && !(flags & MS_FORCE)) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot %s '%s': "
"Dataset is not complete, was created by receiving "
"a redacted zfs send stream.\n"), cmdname,
zfs_get_name(zhp));
return (1);
}
/*
* At this point, we have verified that the mountpoint and/or
* shareopts are appropriate for auto management. If the
* filesystem is already mounted or shared, return (failing
* for explicit requests); otherwise mount or share the
* filesystem.
*/
switch (op) {
case OP_SHARE:
shared_nfs = zfs_is_shared_nfs(zhp, NULL);
shared_smb = zfs_is_shared_smb(zhp, NULL);
if ((shared_nfs && shared_smb) ||
(shared_nfs && strcmp(shareopts, "on") == 0 &&
strcmp(smbshareopts, "off") == 0) ||
(shared_smb && strcmp(smbshareopts, "on") == 0 &&
strcmp(shareopts, "off") == 0)) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot share "
"'%s': filesystem already shared\n"),
zfs_get_name(zhp));
return (1);
}
if (!zfs_is_mounted(zhp, NULL) &&
zfs_mount(zhp, NULL, flags) != 0)
return (1);
if (protocol == NULL) {
if (zfs_shareall(zhp) != 0)
return (1);
} else if (strcmp(protocol, "nfs") == 0) {
if (zfs_share_nfs(zhp))
return (1);
} else if (strcmp(protocol, "smb") == 0) {
if (zfs_share_smb(zhp))
return (1);
} else {
(void) fprintf(stderr, gettext("cannot share "
"'%s': invalid share type '%s' "
"specified\n"),
zfs_get_name(zhp), protocol);
return (1);
}
break;
case OP_MOUNT:
if (options == NULL)
mnt.mnt_mntopts = "";
else
mnt.mnt_mntopts = (char *)options;
if (!hasmntopt(&mnt, MNTOPT_REMOUNT) &&
zfs_is_mounted(zhp, NULL)) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot mount "
"'%s': filesystem already mounted\n"),
zfs_get_name(zhp));
return (1);
}
if (zfs_mount(zhp, options, flags) != 0)
return (1);
break;
}
return (0);
}
/*
* Reports progress in the form "(current/total)". Not thread-safe.
*/
static void
report_mount_progress(int current, int total)
{
static time_t last_progress_time = 0;
time_t now = time(NULL);
char info[32];
/* display header if we're here for the first time */
if (current == 1) {
set_progress_header(gettext("Mounting ZFS filesystems"));
} else if (current != total && last_progress_time + MOUNT_TIME >= now) {
/* too soon to report again */
return;
}
last_progress_time = now;
(void) sprintf(info, "(%d/%d)", current, total);
if (current == total)
finish_progress(info);
else
update_progress(info);
}
/*
* zfs_foreach_mountpoint() callback that mounts or shares one filesystem and
* updates the progress meter.
*/
static int
share_mount_one_cb(zfs_handle_t *zhp, void *arg)
{
share_mount_state_t *sms = arg;
int ret;
ret = share_mount_one(zhp, sms->sm_op, sms->sm_flags, sms->sm_proto,
B_FALSE, sms->sm_options);
pthread_mutex_lock(&sms->sm_lock);
if (ret != 0)
sms->sm_status = ret;
sms->sm_done++;
if (sms->sm_verbose)
report_mount_progress(sms->sm_done, sms->sm_total);
pthread_mutex_unlock(&sms->sm_lock);
return (ret);
}
static void
append_options(char *mntopts, char *newopts)
{
int len = strlen(mntopts);
/* original length plus new string to append plus 1 for the comma */
if (len + 1 + strlen(newopts) >= MNT_LINE_MAX) {
(void) fprintf(stderr, gettext("the opts argument for "
"'%s' option is too long (more than %d chars)\n"),
"-o", MNT_LINE_MAX);
usage(B_FALSE);
}
if (*mntopts)
mntopts[len++] = ',';
(void) strcpy(&mntopts[len], newopts);
}
static int
share_mount(int op, int argc, char **argv)
{
int do_all = 0;
boolean_t verbose = B_FALSE;
int c, ret = 0;
char *options = NULL;
int flags = 0;
/* check options */
while ((c = getopt(argc, argv, op == OP_MOUNT ? ":alvo:Of" : "al"))
!= -1) {
switch (c) {
case 'a':
do_all = 1;
break;
case 'v':
verbose = B_TRUE;
break;
case 'l':
flags |= MS_CRYPT;
break;
case 'o':
if (*optarg == '\0') {
(void) fprintf(stderr, gettext("empty mount "
"options (-o) specified\n"));
usage(B_FALSE);
}
if (options == NULL)
options = safe_malloc(MNT_LINE_MAX + 1);
/* option validation is done later */
append_options(options, optarg);
break;
case 'O':
flags |= MS_OVERLAY;
break;
case 'f':
flags |= MS_FORCE;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (do_all) {
char *protocol = NULL;
if (op == OP_SHARE && argc > 0) {
if (strcmp(argv[0], "nfs") != 0 &&
strcmp(argv[0], "smb") != 0) {
(void) fprintf(stderr, gettext("share type "
"must be 'nfs' or 'smb'\n"));
usage(B_FALSE);
}
protocol = argv[0];
argc--;
argv++;
}
if (argc != 0) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
start_progress_timer();
get_all_cb_t cb = { 0 };
get_all_datasets(&cb, verbose);
if (cb.cb_used == 0) {
free(options);
return (0);
}
share_mount_state_t share_mount_state = { 0 };
share_mount_state.sm_op = op;
share_mount_state.sm_verbose = verbose;
share_mount_state.sm_flags = flags;
share_mount_state.sm_options = options;
share_mount_state.sm_proto = protocol;
share_mount_state.sm_total = cb.cb_used;
pthread_mutex_init(&share_mount_state.sm_lock, NULL);
/*
* libshare isn't mt-safe, so only do the operation in parallel
* if we're mounting. Additionally, the key-loading option must
* be serialized so that we can prompt the user for their keys
* in a consistent manner.
*/
zfs_foreach_mountpoint(g_zfs, cb.cb_handles, cb.cb_used,
share_mount_one_cb, &share_mount_state,
op == OP_MOUNT && !(flags & MS_CRYPT));
zfs_commit_all_shares();
ret = share_mount_state.sm_status;
for (int i = 0; i < cb.cb_used; i++)
zfs_close(cb.cb_handles[i]);
free(cb.cb_handles);
} else if (argc == 0) {
FILE *mnttab;
struct mnttab entry;
if ((op == OP_SHARE) || (options != NULL)) {
(void) fprintf(stderr, gettext("missing filesystem "
"argument (specify -a for all)\n"));
usage(B_FALSE);
}
/*
* When mount is given no arguments, go through
* /proc/self/mounts and display any active ZFS mounts.
* We hide any snapshots, since they are controlled
* automatically.
*/
if ((mnttab = fopen(MNTTAB, "re")) == NULL) {
free(options);
return (ENOENT);
}
while (getmntent(mnttab, &entry) == 0) {
if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0 ||
strchr(entry.mnt_special, '@') != NULL)
continue;
(void) printf("%-30s %s\n", entry.mnt_special,
entry.mnt_mountp);
}
(void) fclose(mnttab);
} else {
zfs_handle_t *zhp;
if (argc > 1) {
(void) fprintf(stderr,
gettext("too many arguments\n"));
usage(B_FALSE);
}
if ((zhp = zfs_open(g_zfs, argv[0],
ZFS_TYPE_FILESYSTEM)) == NULL) {
ret = 1;
} else {
ret = share_mount_one(zhp, op, flags, NULL, B_TRUE,
options);
zfs_commit_all_shares();
zfs_close(zhp);
}
}
free(options);
return (ret);
}
/*
* zfs mount -a [nfs]
* zfs mount filesystem
*
* Mount all filesystems, or mount the given filesystem.
*/
static int
zfs_do_mount(int argc, char **argv)
{
return (share_mount(OP_MOUNT, argc, argv));
}
/*
* zfs share -a [nfs | smb]
* zfs share filesystem
*
* Share all filesystems, or share the given filesystem.
*/
static int
zfs_do_share(int argc, char **argv)
{
return (share_mount(OP_SHARE, argc, argv));
}
typedef struct unshare_unmount_node {
zfs_handle_t *un_zhp;
char *un_mountp;
uu_avl_node_t un_avlnode;
} unshare_unmount_node_t;
/* ARGSUSED */
static int
unshare_unmount_compare(const void *larg, const void *rarg, void *unused)
{
const unshare_unmount_node_t *l = larg;
const unshare_unmount_node_t *r = rarg;
return (strcmp(l->un_mountp, r->un_mountp));
}
/*
* Convenience routine used by zfs_do_umount() and manual_unmount(). Given an
* absolute path, find the entry /proc/self/mounts, verify that it's a
* ZFS filesystem, and unmount it appropriately.
*/
static int
unshare_unmount_path(int op, char *path, int flags, boolean_t is_manual)
{
zfs_handle_t *zhp;
int ret = 0;
struct stat64 statbuf;
struct extmnttab entry;
const char *cmdname = (op == OP_SHARE) ? "unshare" : "unmount";
ino_t path_inode;
/*
* Search for the given (major,minor) pair in the mount table.
*/
if (getextmntent(path, &entry, &statbuf) != 0) {
if (op == OP_SHARE) {
(void) fprintf(stderr, gettext("cannot %s '%s': not "
"currently mounted\n"), cmdname, path);
return (1);
}
(void) fprintf(stderr, gettext("warning: %s not in"
"/proc/self/mounts\n"), path);
if ((ret = umount2(path, flags)) != 0)
(void) fprintf(stderr, gettext("%s: %s\n"), path,
strerror(errno));
return (ret != 0);
}
path_inode = statbuf.st_ino;
if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0) {
(void) fprintf(stderr, gettext("cannot %s '%s': not a ZFS "
"filesystem\n"), cmdname, path);
return (1);
}
if ((zhp = zfs_open(g_zfs, entry.mnt_special,
ZFS_TYPE_FILESYSTEM)) == NULL)
return (1);
ret = 1;
if (stat64(entry.mnt_mountp, &statbuf) != 0) {
(void) fprintf(stderr, gettext("cannot %s '%s': %s\n"),
cmdname, path, strerror(errno));
goto out;
} else if (statbuf.st_ino != path_inode) {
(void) fprintf(stderr, gettext("cannot "
"%s '%s': not a mountpoint\n"), cmdname, path);
goto out;
}
if (op == OP_SHARE) {
char nfs_mnt_prop[ZFS_MAXPROPLEN];
char smbshare_prop[ZFS_MAXPROPLEN];
verify(zfs_prop_get(zhp, ZFS_PROP_SHARENFS, nfs_mnt_prop,
sizeof (nfs_mnt_prop), NULL, NULL, 0, B_FALSE) == 0);
verify(zfs_prop_get(zhp, ZFS_PROP_SHARESMB, smbshare_prop,
sizeof (smbshare_prop), NULL, NULL, 0, B_FALSE) == 0);
if (strcmp(nfs_mnt_prop, "off") == 0 &&
strcmp(smbshare_prop, "off") == 0) {
(void) fprintf(stderr, gettext("cannot unshare "
"'%s': legacy share\n"), path);
(void) fprintf(stderr, gettext("use exportfs(8) "
"or smbcontrol(1) to unshare this filesystem\n"));
} else if (!zfs_is_shared(zhp)) {
(void) fprintf(stderr, gettext("cannot unshare '%s': "
"not currently shared\n"), path);
} else {
ret = zfs_unshareall_bypath(zhp, path);
zfs_commit_all_shares();
}
} else {
char mtpt_prop[ZFS_MAXPROPLEN];
verify(zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, mtpt_prop,
sizeof (mtpt_prop), NULL, NULL, 0, B_FALSE) == 0);
if (is_manual) {
ret = zfs_unmount(zhp, NULL, flags);
} else if (strcmp(mtpt_prop, "legacy") == 0) {
(void) fprintf(stderr, gettext("cannot unmount "
"'%s': legacy mountpoint\n"),
zfs_get_name(zhp));
(void) fprintf(stderr, gettext("use umount(8) "
"to unmount this filesystem\n"));
} else {
ret = zfs_unmountall(zhp, flags);
}
}
out:
zfs_close(zhp);
return (ret != 0);
}
/*
* Generic callback for unsharing or unmounting a filesystem.
*/
static int
unshare_unmount(int op, int argc, char **argv)
{
int do_all = 0;
int flags = 0;
int ret = 0;
int c;
zfs_handle_t *zhp;
char nfs_mnt_prop[ZFS_MAXPROPLEN];
char sharesmb[ZFS_MAXPROPLEN];
/* check options */
while ((c = getopt(argc, argv, op == OP_SHARE ? ":a" : "afu")) != -1) {
switch (c) {
case 'a':
do_all = 1;
break;
case 'f':
flags |= MS_FORCE;
break;
case 'u':
flags |= MS_CRYPT;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (do_all) {
/*
* We could make use of zfs_for_each() to walk all datasets in
* the system, but this would be very inefficient, especially
* since we would have to linearly search /proc/self/mounts for
* each one. Instead, do one pass through /proc/self/mounts
* looking for zfs entries and call zfs_unmount() for each one.
*
* Things get a little tricky if the administrator has created
* mountpoints beneath other ZFS filesystems. In this case, we
* have to unmount the deepest filesystems first. To accomplish
* this, we place all the mountpoints in an AVL tree sorted by
* the special type (dataset name), and walk the result in
* reverse to make sure to get any snapshots first.
*/
FILE *mnttab;
struct mnttab entry;
uu_avl_pool_t *pool;
uu_avl_t *tree = NULL;
unshare_unmount_node_t *node;
uu_avl_index_t idx;
uu_avl_walk_t *walk;
char *protocol = NULL;
if (op == OP_SHARE && argc > 0) {
if (strcmp(argv[0], "nfs") != 0 &&
strcmp(argv[0], "smb") != 0) {
(void) fprintf(stderr, gettext("share type "
"must be 'nfs' or 'smb'\n"));
usage(B_FALSE);
}
protocol = argv[0];
argc--;
argv++;
}
if (argc != 0) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if (((pool = uu_avl_pool_create("unmount_pool",
sizeof (unshare_unmount_node_t),
offsetof(unshare_unmount_node_t, un_avlnode),
unshare_unmount_compare, UU_DEFAULT)) == NULL) ||
((tree = uu_avl_create(pool, NULL, UU_DEFAULT)) == NULL))
nomem();
if ((mnttab = fopen(MNTTAB, "re")) == NULL)
return (ENOENT);
while (getmntent(mnttab, &entry) == 0) {
/* ignore non-ZFS entries */
if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0)
continue;
/* ignore snapshots */
if (strchr(entry.mnt_special, '@') != NULL)
continue;
if ((zhp = zfs_open(g_zfs, entry.mnt_special,
ZFS_TYPE_FILESYSTEM)) == NULL) {
ret = 1;
continue;
}
/*
* Ignore datasets that are excluded/restricted by
* parent pool name.
*/
if (zpool_skip_pool(zfs_get_pool_name(zhp))) {
zfs_close(zhp);
continue;
}
switch (op) {
case OP_SHARE:
verify(zfs_prop_get(zhp, ZFS_PROP_SHARENFS,
nfs_mnt_prop,
sizeof (nfs_mnt_prop),
NULL, NULL, 0, B_FALSE) == 0);
if (strcmp(nfs_mnt_prop, "off") != 0)
break;
verify(zfs_prop_get(zhp, ZFS_PROP_SHARESMB,
nfs_mnt_prop,
sizeof (nfs_mnt_prop),
NULL, NULL, 0, B_FALSE) == 0);
if (strcmp(nfs_mnt_prop, "off") == 0)
continue;
break;
case OP_MOUNT:
/* Ignore legacy mounts */
verify(zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT,
nfs_mnt_prop,
sizeof (nfs_mnt_prop),
NULL, NULL, 0, B_FALSE) == 0);
if (strcmp(nfs_mnt_prop, "legacy") == 0)
continue;
/* Ignore canmount=noauto mounts */
if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) ==
ZFS_CANMOUNT_NOAUTO)
continue;
default:
break;
}
node = safe_malloc(sizeof (unshare_unmount_node_t));
node->un_zhp = zhp;
node->un_mountp = safe_strdup(entry.mnt_mountp);
uu_avl_node_init(node, &node->un_avlnode, pool);
if (uu_avl_find(tree, node, NULL, &idx) == NULL) {
uu_avl_insert(tree, node, idx);
} else {
zfs_close(node->un_zhp);
free(node->un_mountp);
free(node);
}
}
(void) fclose(mnttab);
/*
* Walk the AVL tree in reverse, unmounting each filesystem and
* removing it from the AVL tree in the process.
*/
if ((walk = uu_avl_walk_start(tree,
UU_WALK_REVERSE | UU_WALK_ROBUST)) == NULL)
nomem();
while ((node = uu_avl_walk_next(walk)) != NULL) {
const char *mntarg = NULL;
uu_avl_remove(tree, node);
switch (op) {
case OP_SHARE:
if (zfs_unshareall_bytype(node->un_zhp,
node->un_mountp, protocol) != 0)
ret = 1;
break;
case OP_MOUNT:
if (zfs_unmount(node->un_zhp,
mntarg, flags) != 0)
ret = 1;
break;
}
zfs_close(node->un_zhp);
free(node->un_mountp);
free(node);
}
if (op == OP_SHARE)
zfs_commit_shares(protocol);
uu_avl_walk_end(walk);
uu_avl_destroy(tree);
uu_avl_pool_destroy(pool);
} else {
if (argc != 1) {
if (argc == 0)
(void) fprintf(stderr,
gettext("missing filesystem argument\n"));
else
(void) fprintf(stderr,
gettext("too many arguments\n"));
usage(B_FALSE);
}
/*
* We have an argument, but it may be a full path or a ZFS
* filesystem. Pass full paths off to unmount_path() (shared by
* manual_unmount), otherwise open the filesystem and pass to
* zfs_unmount().
*/
if (argv[0][0] == '/')
return (unshare_unmount_path(op, argv[0],
flags, B_FALSE));
if ((zhp = zfs_open(g_zfs, argv[0],
ZFS_TYPE_FILESYSTEM)) == NULL)
return (1);
verify(zfs_prop_get(zhp, op == OP_SHARE ?
ZFS_PROP_SHARENFS : ZFS_PROP_MOUNTPOINT,
nfs_mnt_prop, sizeof (nfs_mnt_prop), NULL,
NULL, 0, B_FALSE) == 0);
switch (op) {
case OP_SHARE:
verify(zfs_prop_get(zhp, ZFS_PROP_SHARENFS,
nfs_mnt_prop,
sizeof (nfs_mnt_prop),
NULL, NULL, 0, B_FALSE) == 0);
verify(zfs_prop_get(zhp, ZFS_PROP_SHARESMB,
sharesmb, sizeof (sharesmb), NULL, NULL,
0, B_FALSE) == 0);
if (strcmp(nfs_mnt_prop, "off") == 0 &&
strcmp(sharesmb, "off") == 0) {
(void) fprintf(stderr, gettext("cannot "
"unshare '%s': legacy share\n"),
zfs_get_name(zhp));
(void) fprintf(stderr, gettext("use "
"exports(5) or smb.conf(5) to unshare "
"this filesystem\n"));
ret = 1;
} else if (!zfs_is_shared(zhp)) {
(void) fprintf(stderr, gettext("cannot "
"unshare '%s': not currently "
"shared\n"), zfs_get_name(zhp));
ret = 1;
} else if (zfs_unshareall(zhp) != 0) {
ret = 1;
}
break;
case OP_MOUNT:
if (strcmp(nfs_mnt_prop, "legacy") == 0) {
(void) fprintf(stderr, gettext("cannot "
"unmount '%s': legacy "
"mountpoint\n"), zfs_get_name(zhp));
(void) fprintf(stderr, gettext("use "
"umount(8) to unmount this "
"filesystem\n"));
ret = 1;
} else if (!zfs_is_mounted(zhp, NULL)) {
(void) fprintf(stderr, gettext("cannot "
"unmount '%s': not currently "
"mounted\n"),
zfs_get_name(zhp));
ret = 1;
} else if (zfs_unmountall(zhp, flags) != 0) {
ret = 1;
}
break;
}
zfs_close(zhp);
}
return (ret);
}
/*
* zfs unmount [-fu] -a
* zfs unmount [-fu] filesystem
*
* Unmount all filesystems, or a specific ZFS filesystem.
*/
static int
zfs_do_unmount(int argc, char **argv)
{
return (unshare_unmount(OP_MOUNT, argc, argv));
}
/*
* zfs unshare -a
* zfs unshare filesystem
*
* Unshare all filesystems, or a specific ZFS filesystem.
*/
static int
zfs_do_unshare(int argc, char **argv)
{
return (unshare_unmount(OP_SHARE, argc, argv));
}
static int
find_command_idx(char *command, int *idx)
{
int i;
for (i = 0; i < NCOMMAND; i++) {
if (command_table[i].name == NULL)
continue;
if (strcmp(command, command_table[i].name) == 0) {
*idx = i;
return (0);
}
}
return (1);
}
static int
zfs_do_diff(int argc, char **argv)
{
zfs_handle_t *zhp;
int flags = 0;
char *tosnap = NULL;
char *fromsnap = NULL;
char *atp, *copy;
int err = 0;
int c;
struct sigaction sa;
while ((c = getopt(argc, argv, "FHt")) != -1) {
switch (c) {
case 'F':
flags |= ZFS_DIFF_CLASSIFY;
break;
case 'H':
flags |= ZFS_DIFF_PARSEABLE;
break;
case 't':
flags |= ZFS_DIFF_TIMESTAMP;
break;
default:
(void) fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr,
gettext("must provide at least one snapshot name\n"));
usage(B_FALSE);
}
if (argc > 2) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
fromsnap = argv[0];
tosnap = (argc == 2) ? argv[1] : NULL;
copy = NULL;
if (*fromsnap != '@')
copy = strdup(fromsnap);
else if (tosnap)
copy = strdup(tosnap);
if (copy == NULL)
usage(B_FALSE);
if ((atp = strchr(copy, '@')) != NULL)
*atp = '\0';
if ((zhp = zfs_open(g_zfs, copy, ZFS_TYPE_FILESYSTEM)) == NULL) {
free(copy);
return (1);
}
free(copy);
/*
* Ignore SIGPIPE so that the library can give us
* information on any failure
*/
if (sigemptyset(&sa.sa_mask) == -1) {
err = errno;
goto out;
}
sa.sa_flags = 0;
sa.sa_handler = SIG_IGN;
if (sigaction(SIGPIPE, &sa, NULL) == -1) {
err = errno;
goto out;
}
err = zfs_show_diffs(zhp, STDOUT_FILENO, fromsnap, tosnap, flags);
out:
zfs_close(zhp);
return (err != 0);
}
/*
* zfs bookmark <fs@source>|<fs#source> <fs#bookmark>
*
* Creates a bookmark with the given name from the source snapshot
* or creates a copy of an existing source bookmark.
*/
static int
zfs_do_bookmark(int argc, char **argv)
{
char *source, *bookname;
char expbuf[ZFS_MAX_DATASET_NAME_LEN];
int source_type;
nvlist_t *nvl;
int ret = 0;
int c;
/* check options */
while ((c = getopt(argc, argv, "")) != -1) {
switch (c) {
case '?':
(void) fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
goto usage;
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing source argument\n"));
goto usage;
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing bookmark argument\n"));
goto usage;
}
source = argv[0];
bookname = argv[1];
if (strchr(source, '@') == NULL && strchr(source, '#') == NULL) {
(void) fprintf(stderr,
gettext("invalid source name '%s': "
"must contain a '@' or '#'\n"), source);
goto usage;
}
if (strchr(bookname, '#') == NULL) {
(void) fprintf(stderr,
gettext("invalid bookmark name '%s': "
"must contain a '#'\n"), bookname);
goto usage;
}
/*
* expand source or bookname to full path:
* one of them may be specified as short name
*/
{
char **expand;
char *source_short, *bookname_short;
source_short = strpbrk(source, "@#");
bookname_short = strpbrk(bookname, "#");
if (source_short == source &&
bookname_short == bookname) {
(void) fprintf(stderr, gettext(
"either source or bookmark must be specified as "
"full dataset paths"));
goto usage;
} else if (source_short != source &&
bookname_short != bookname) {
expand = NULL;
} else if (source_short != source) {
strlcpy(expbuf, source, sizeof (expbuf));
expand = &bookname;
} else if (bookname_short != bookname) {
strlcpy(expbuf, bookname, sizeof (expbuf));
expand = &source;
} else {
abort();
}
if (expand != NULL) {
*strpbrk(expbuf, "@#") = '\0'; /* dataset name in buf */
(void) strlcat(expbuf, *expand, sizeof (expbuf));
*expand = expbuf;
}
}
/* determine source type */
switch (*strpbrk(source, "@#")) {
case '@': source_type = ZFS_TYPE_SNAPSHOT; break;
case '#': source_type = ZFS_TYPE_BOOKMARK; break;
default: abort();
}
/* test the source exists */
zfs_handle_t *zhp;
zhp = zfs_open(g_zfs, source, source_type);
if (zhp == NULL)
goto usage;
zfs_close(zhp);
nvl = fnvlist_alloc();
fnvlist_add_string(nvl, bookname, source);
ret = lzc_bookmark(nvl, NULL);
fnvlist_free(nvl);
if (ret != 0) {
const char *err_msg = NULL;
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot create bookmark '%s'"), bookname);
switch (ret) {
case EXDEV:
err_msg = "bookmark is in a different pool";
break;
case ZFS_ERR_BOOKMARK_SOURCE_NOT_ANCESTOR:
err_msg = "source is not an ancestor of the "
"new bookmark's dataset";
break;
case EEXIST:
err_msg = "bookmark exists";
break;
case EINVAL:
err_msg = "invalid argument";
break;
case ENOTSUP:
err_msg = "bookmark feature not enabled";
break;
case ENOSPC:
err_msg = "out of space";
break;
case ENOENT:
err_msg = "dataset does not exist";
break;
default:
(void) zfs_standard_error(g_zfs, ret, errbuf);
break;
}
if (err_msg != NULL) {
(void) fprintf(stderr, "%s: %s\n", errbuf,
dgettext(TEXT_DOMAIN, err_msg));
}
}
return (ret != 0);
usage:
usage(B_FALSE);
return (-1);
}
static int
zfs_do_channel_program(int argc, char **argv)
{
int ret, fd, c;
char *progbuf, *filename, *poolname;
size_t progsize, progread;
nvlist_t *outnvl = NULL;
uint64_t instrlimit = ZCP_DEFAULT_INSTRLIMIT;
uint64_t memlimit = ZCP_DEFAULT_MEMLIMIT;
boolean_t sync_flag = B_TRUE, json_output = B_FALSE;
zpool_handle_t *zhp;
/* check options */
while ((c = getopt(argc, argv, "nt:m:j")) != -1) {
switch (c) {
case 't':
case 'm': {
uint64_t arg;
char *endp;
errno = 0;
arg = strtoull(optarg, &endp, 0);
if (errno != 0 || *endp != '\0') {
(void) fprintf(stderr, gettext(
"invalid argument "
"'%s': expected integer\n"), optarg);
goto usage;
}
if (c == 't') {
instrlimit = arg;
} else {
ASSERT3U(c, ==, 'm');
memlimit = arg;
}
break;
}
case 'n': {
sync_flag = B_FALSE;
break;
}
case 'j': {
json_output = B_TRUE;
break;
}
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
goto usage;
}
}
argc -= optind;
argv += optind;
if (argc < 2) {
(void) fprintf(stderr,
gettext("invalid number of arguments\n"));
goto usage;
}
poolname = argv[0];
filename = argv[1];
if (strcmp(filename, "-") == 0) {
fd = 0;
filename = "standard input";
} else if ((fd = open(filename, O_RDONLY)) < 0) {
(void) fprintf(stderr, gettext("cannot open '%s': %s\n"),
filename, strerror(errno));
return (1);
}
if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {
(void) fprintf(stderr, gettext("cannot open pool '%s'\n"),
poolname);
if (fd != 0)
(void) close(fd);
return (1);
}
zpool_close(zhp);
/*
* Read in the channel program, expanding the program buffer as
* necessary.
*/
progread = 0;
progsize = 1024;
progbuf = safe_malloc(progsize);
do {
ret = read(fd, progbuf + progread, progsize - progread);
progread += ret;
if (progread == progsize && ret > 0) {
progsize *= 2;
progbuf = safe_realloc(progbuf, progsize);
}
} while (ret > 0);
if (fd != 0)
(void) close(fd);
if (ret < 0) {
free(progbuf);
(void) fprintf(stderr,
gettext("cannot read '%s': %s\n"),
filename, strerror(errno));
return (1);
}
progbuf[progread] = '\0';
/*
* Any remaining arguments are passed as arguments to the lua script as
* a string array:
* {
* "argv" -> [ "arg 1", ... "arg n" ],
* }
*/
nvlist_t *argnvl = fnvlist_alloc();
fnvlist_add_string_array(argnvl, ZCP_ARG_CLIARGV, argv + 2, argc - 2);
if (sync_flag) {
ret = lzc_channel_program(poolname, progbuf,
instrlimit, memlimit, argnvl, &outnvl);
} else {
ret = lzc_channel_program_nosync(poolname, progbuf,
instrlimit, memlimit, argnvl, &outnvl);
}
if (ret != 0) {
/*
* On error, report the error message handed back by lua if one
* exists. Otherwise, generate an appropriate error message,
* falling back on strerror() for an unexpected return code.
*/
char *errstring = NULL;
const char *msg = gettext("Channel program execution failed");
uint64_t instructions = 0;
if (outnvl != NULL && nvlist_exists(outnvl, ZCP_RET_ERROR)) {
(void) nvlist_lookup_string(outnvl,
ZCP_RET_ERROR, &errstring);
if (errstring == NULL)
errstring = strerror(ret);
if (ret == ETIME) {
(void) nvlist_lookup_uint64(outnvl,
ZCP_ARG_INSTRLIMIT, &instructions);
}
} else {
switch (ret) {
case EINVAL:
errstring =
"Invalid instruction or memory limit.";
break;
case ENOMEM:
errstring = "Return value too large.";
break;
case ENOSPC:
errstring = "Memory limit exhausted.";
break;
case ETIME:
errstring = "Timed out.";
break;
case EPERM:
errstring = "Permission denied. Channel "
"programs must be run as root.";
break;
default:
(void) zfs_standard_error(g_zfs, ret, msg);
}
}
if (errstring != NULL)
(void) fprintf(stderr, "%s:\n%s\n", msg, errstring);
if (ret == ETIME && instructions != 0)
(void) fprintf(stderr,
gettext("%llu Lua instructions\n"),
(u_longlong_t)instructions);
} else {
if (json_output) {
(void) nvlist_print_json(stdout, outnvl);
} else if (nvlist_empty(outnvl)) {
(void) fprintf(stdout, gettext("Channel program fully "
"executed and did not produce output.\n"));
} else {
(void) fprintf(stdout, gettext("Channel program fully "
"executed and produced output:\n"));
dump_nvlist(outnvl, 4);
}
}
free(progbuf);
fnvlist_free(outnvl);
fnvlist_free(argnvl);
return (ret != 0);
usage:
usage(B_FALSE);
return (-1);
}
typedef struct loadkey_cbdata {
boolean_t cb_loadkey;
boolean_t cb_recursive;
boolean_t cb_noop;
char *cb_keylocation;
uint64_t cb_numfailed;
uint64_t cb_numattempted;
} loadkey_cbdata_t;
static int
load_key_callback(zfs_handle_t *zhp, void *data)
{
int ret;
boolean_t is_encroot;
loadkey_cbdata_t *cb = data;
uint64_t keystatus = zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS);
/*
* If we are working recursively, we want to skip loading / unloading
* keys for non-encryption roots and datasets whose keys are already
* in the desired end-state.
*/
if (cb->cb_recursive) {
ret = zfs_crypto_get_encryption_root(zhp, &is_encroot, NULL);
if (ret != 0)
return (ret);
if (!is_encroot)
return (0);
if ((cb->cb_loadkey && keystatus == ZFS_KEYSTATUS_AVAILABLE) ||
(!cb->cb_loadkey && keystatus == ZFS_KEYSTATUS_UNAVAILABLE))
return (0);
}
cb->cb_numattempted++;
if (cb->cb_loadkey)
ret = zfs_crypto_load_key(zhp, cb->cb_noop, cb->cb_keylocation);
else
ret = zfs_crypto_unload_key(zhp);
if (ret != 0) {
cb->cb_numfailed++;
return (ret);
}
return (0);
}
static int
load_unload_keys(int argc, char **argv, boolean_t loadkey)
{
int c, ret = 0, flags = 0;
boolean_t do_all = B_FALSE;
loadkey_cbdata_t cb = { 0 };
cb.cb_loadkey = loadkey;
while ((c = getopt(argc, argv, "anrL:")) != -1) {
/* noop and alternate keylocations only apply to zfs load-key */
if (loadkey) {
switch (c) {
case 'n':
cb.cb_noop = B_TRUE;
continue;
case 'L':
cb.cb_keylocation = optarg;
continue;
default:
break;
}
}
switch (c) {
case 'a':
do_all = B_TRUE;
cb.cb_recursive = B_TRUE;
break;
case 'r':
flags |= ZFS_ITER_RECURSE;
cb.cb_recursive = B_TRUE;
break;
default:
(void) fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (!do_all && argc == 0) {
(void) fprintf(stderr,
gettext("Missing dataset argument or -a option\n"));
usage(B_FALSE);
}
if (do_all && argc != 0) {
(void) fprintf(stderr,
gettext("Cannot specify dataset with -a option\n"));
usage(B_FALSE);
}
if (cb.cb_recursive && cb.cb_keylocation != NULL &&
strcmp(cb.cb_keylocation, "prompt") != 0) {
(void) fprintf(stderr, gettext("alternate keylocation may only "
"be 'prompt' with -r or -a\n"));
usage(B_FALSE);
}
ret = zfs_for_each(argc, argv, flags,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME, NULL, NULL, 0,
load_key_callback, &cb);
if (cb.cb_noop || (cb.cb_recursive && cb.cb_numattempted != 0)) {
(void) printf(gettext("%llu / %llu key(s) successfully %s\n"),
(u_longlong_t)(cb.cb_numattempted - cb.cb_numfailed),
(u_longlong_t)cb.cb_numattempted,
loadkey ? (cb.cb_noop ? "verified" : "loaded") :
"unloaded");
}
if (cb.cb_numfailed != 0)
ret = -1;
return (ret);
}
static int
zfs_do_load_key(int argc, char **argv)
{
return (load_unload_keys(argc, argv, B_TRUE));
}
static int
zfs_do_unload_key(int argc, char **argv)
{
return (load_unload_keys(argc, argv, B_FALSE));
}
static int
zfs_do_change_key(int argc, char **argv)
{
int c, ret;
uint64_t keystatus;
boolean_t loadkey = B_FALSE, inheritkey = B_FALSE;
zfs_handle_t *zhp = NULL;
nvlist_t *props = fnvlist_alloc();
while ((c = getopt(argc, argv, "lio:")) != -1) {
switch (c) {
case 'l':
loadkey = B_TRUE;
break;
case 'i':
inheritkey = B_TRUE;
break;
case 'o':
if (!parseprop(props, optarg)) {
nvlist_free(props);
return (1);
}
break;
default:
(void) fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
usage(B_FALSE);
}
}
if (inheritkey && !nvlist_empty(props)) {
(void) fprintf(stderr,
gettext("Properties not allowed for inheriting\n"));
usage(B_FALSE);
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("Missing dataset argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("Too many arguments\n"));
usage(B_FALSE);
}
zhp = zfs_open(g_zfs, argv[argc - 1],
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL)
usage(B_FALSE);
if (loadkey) {
keystatus = zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS);
if (keystatus != ZFS_KEYSTATUS_AVAILABLE) {
ret = zfs_crypto_load_key(zhp, B_FALSE, NULL);
if (ret != 0) {
nvlist_free(props);
zfs_close(zhp);
return (-1);
}
}
/* refresh the properties so the new keystatus is visible */
zfs_refresh_properties(zhp);
}
ret = zfs_crypto_rewrap(zhp, props, inheritkey);
if (ret != 0) {
nvlist_free(props);
zfs_close(zhp);
return (-1);
}
nvlist_free(props);
zfs_close(zhp);
return (0);
}
/*
* 1) zfs project [-d|-r] <file|directory ...>
* List project ID and inherit flag of file(s) or directories.
* -d: List the directory itself, not its children.
* -r: List subdirectories recursively.
*
* 2) zfs project -C [-k] [-r] <file|directory ...>
* Clear project inherit flag and/or ID on the file(s) or directories.
* -k: Keep the project ID unchanged. If not specified, the project ID
* will be reset as zero.
* -r: Clear on subdirectories recursively.
*
* 3) zfs project -c [-0] [-d|-r] [-p id] <file|directory ...>
* Check project ID and inherit flag on the file(s) or directories,
* report the outliers.
* -0: Print file name followed by a NUL instead of newline.
* -d: Check the directory itself, not its children.
* -p: Specify the referenced ID for comparing with the target file(s)
* or directories' project IDs. If not specified, the target (top)
* directory's project ID will be used as the referenced one.
* -r: Check subdirectories recursively.
*
* 4) zfs project [-p id] [-r] [-s] <file|directory ...>
* Set project ID and/or inherit flag on the file(s) or directories.
* -p: Set the project ID as the given id.
* -r: Set on subdirectories recursively. If not specify "-p" option,
* it will use top-level directory's project ID as the given id,
* then set both project ID and inherit flag on all descendants
* of the top-level directory.
* -s: Set project inherit flag.
*/
static int
zfs_do_project(int argc, char **argv)
{
zfs_project_control_t zpc = {
.zpc_expected_projid = ZFS_INVALID_PROJID,
.zpc_op = ZFS_PROJECT_OP_DEFAULT,
.zpc_dironly = B_FALSE,
.zpc_keep_projid = B_FALSE,
.zpc_newline = B_TRUE,
.zpc_recursive = B_FALSE,
.zpc_set_flag = B_FALSE,
};
int ret = 0, c;
if (argc < 2)
usage(B_FALSE);
while ((c = getopt(argc, argv, "0Ccdkp:rs")) != -1) {
switch (c) {
case '0':
zpc.zpc_newline = B_FALSE;
break;
case 'C':
if (zpc.zpc_op != ZFS_PROJECT_OP_DEFAULT) {
(void) fprintf(stderr, gettext("cannot "
"specify '-C' '-c' '-s' together\n"));
usage(B_FALSE);
}
zpc.zpc_op = ZFS_PROJECT_OP_CLEAR;
break;
case 'c':
if (zpc.zpc_op != ZFS_PROJECT_OP_DEFAULT) {
(void) fprintf(stderr, gettext("cannot "
"specify '-C' '-c' '-s' together\n"));
usage(B_FALSE);
}
zpc.zpc_op = ZFS_PROJECT_OP_CHECK;
break;
case 'd':
zpc.zpc_dironly = B_TRUE;
/* overwrite "-r" option */
zpc.zpc_recursive = B_FALSE;
break;
case 'k':
zpc.zpc_keep_projid = B_TRUE;
break;
case 'p': {
char *endptr;
errno = 0;
zpc.zpc_expected_projid = strtoull(optarg, &endptr, 0);
if (errno != 0 || *endptr != '\0') {
(void) fprintf(stderr,
gettext("project ID must be less than "
"%u\n"), UINT32_MAX);
usage(B_FALSE);
}
if (zpc.zpc_expected_projid >= UINT32_MAX) {
(void) fprintf(stderr,
gettext("invalid project ID\n"));
usage(B_FALSE);
}
break;
}
case 'r':
zpc.zpc_recursive = B_TRUE;
/* overwrite "-d" option */
zpc.zpc_dironly = B_FALSE;
break;
case 's':
if (zpc.zpc_op != ZFS_PROJECT_OP_DEFAULT) {
(void) fprintf(stderr, gettext("cannot "
"specify '-C' '-c' '-s' together\n"));
usage(B_FALSE);
}
zpc.zpc_set_flag = B_TRUE;
zpc.zpc_op = ZFS_PROJECT_OP_SET;
break;
default:
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
if (zpc.zpc_op == ZFS_PROJECT_OP_DEFAULT) {
if (zpc.zpc_expected_projid != ZFS_INVALID_PROJID)
zpc.zpc_op = ZFS_PROJECT_OP_SET;
else
zpc.zpc_op = ZFS_PROJECT_OP_LIST;
}
switch (zpc.zpc_op) {
case ZFS_PROJECT_OP_LIST:
if (zpc.zpc_keep_projid) {
(void) fprintf(stderr,
gettext("'-k' is only valid together with '-C'\n"));
usage(B_FALSE);
}
if (!zpc.zpc_newline) {
(void) fprintf(stderr,
gettext("'-0' is only valid together with '-c'\n"));
usage(B_FALSE);
}
break;
case ZFS_PROJECT_OP_CHECK:
if (zpc.zpc_keep_projid) {
(void) fprintf(stderr,
gettext("'-k' is only valid together with '-C'\n"));
usage(B_FALSE);
}
break;
case ZFS_PROJECT_OP_CLEAR:
if (zpc.zpc_dironly) {
(void) fprintf(stderr,
gettext("'-d' is useless together with '-C'\n"));
usage(B_FALSE);
}
if (!zpc.zpc_newline) {
(void) fprintf(stderr,
gettext("'-0' is only valid together with '-c'\n"));
usage(B_FALSE);
}
if (zpc.zpc_expected_projid != ZFS_INVALID_PROJID) {
(void) fprintf(stderr,
gettext("'-p' is useless together with '-C'\n"));
usage(B_FALSE);
}
break;
case ZFS_PROJECT_OP_SET:
if (zpc.zpc_dironly) {
(void) fprintf(stderr,
gettext("'-d' is useless for set project ID and/or "
"inherit flag\n"));
usage(B_FALSE);
}
if (zpc.zpc_keep_projid) {
(void) fprintf(stderr,
gettext("'-k' is only valid together with '-C'\n"));
usage(B_FALSE);
}
if (!zpc.zpc_newline) {
(void) fprintf(stderr,
gettext("'-0' is only valid together with '-c'\n"));
usage(B_FALSE);
}
break;
default:
ASSERT(0);
break;
}
argv += optind;
argc -= optind;
if (argc == 0) {
(void) fprintf(stderr,
gettext("missing file or directory target(s)\n"));
usage(B_FALSE);
}
for (int i = 0; i < argc; i++) {
int err;
err = zfs_project_handle(argv[i], &zpc);
if (err && !ret)
ret = err;
}
return (ret);
}
static int
zfs_do_wait(int argc, char **argv)
{
boolean_t enabled[ZFS_WAIT_NUM_ACTIVITIES];
int error, i;
int c;
/* By default, wait for all types of activity. */
for (i = 0; i < ZFS_WAIT_NUM_ACTIVITIES; i++)
enabled[i] = B_TRUE;
while ((c = getopt(argc, argv, "t:")) != -1) {
switch (c) {
case 't':
{
static char *col_subopts[] = { "deleteq", NULL };
char *value;
/* Reset activities array */
bzero(&enabled, sizeof (enabled));
while (*optarg != '\0') {
int activity = getsubopt(&optarg, col_subopts,
&value);
if (activity < 0) {
(void) fprintf(stderr,
gettext("invalid activity '%s'\n"),
value);
usage(B_FALSE);
}
enabled[activity] = B_TRUE;
}
break;
}
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argv += optind;
argc -= optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing 'filesystem' "
"argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
zfs_handle_t *zhp = zfs_open(g_zfs, argv[0], ZFS_TYPE_FILESYSTEM);
if (zhp == NULL)
return (1);
for (;;) {
boolean_t missing = B_FALSE;
boolean_t any_waited = B_FALSE;
for (int i = 0; i < ZFS_WAIT_NUM_ACTIVITIES; i++) {
boolean_t waited;
if (!enabled[i])
continue;
error = zfs_wait_status(zhp, i, &missing, &waited);
if (error != 0 || missing)
break;
any_waited = (any_waited || waited);
}
if (error != 0 || missing || !any_waited)
break;
}
zfs_close(zhp);
return (error);
}
/*
* Display version message
*/
static int
zfs_do_version(int argc, char **argv)
{
if (zfs_version_print() == -1)
return (1);
return (0);
}
int
main(int argc, char **argv)
{
int ret = 0;
int i = 0;
char *cmdname;
char **newargv;
(void) setlocale(LC_ALL, "");
(void) setlocale(LC_NUMERIC, "C");
(void) textdomain(TEXT_DOMAIN);
opterr = 0;
/*
* Make sure the user has specified some command.
*/
if (argc < 2) {
(void) fprintf(stderr, gettext("missing command\n"));
usage(B_FALSE);
}
cmdname = argv[1];
/*
* The 'umount' command is an alias for 'unmount'
*/
if (strcmp(cmdname, "umount") == 0)
cmdname = "unmount";
/*
* The 'recv' command is an alias for 'receive'
*/
if (strcmp(cmdname, "recv") == 0)
cmdname = "receive";
/*
* The 'snap' command is an alias for 'snapshot'
*/
if (strcmp(cmdname, "snap") == 0)
cmdname = "snapshot";
/*
* Special case '-?'
*/
if ((strcmp(cmdname, "-?") == 0) ||
(strcmp(cmdname, "--help") == 0))
usage(B_TRUE);
/*
* Special case '-V|--version'
*/
if ((strcmp(cmdname, "-V") == 0) || (strcmp(cmdname, "--version") == 0))
return (zfs_do_version(argc, argv));
if ((g_zfs = libzfs_init()) == NULL) {
(void) fprintf(stderr, "%s\n", libzfs_error_init(errno));
return (1);
}
zfs_save_arguments(argc, argv, history_str, sizeof (history_str));
libzfs_print_on_error(g_zfs, B_TRUE);
/*
* Many commands modify input strings for string parsing reasons.
* We create a copy to protect the original argv.
*/
newargv = malloc((argc + 1) * sizeof (newargv[0]));
for (i = 0; i < argc; i++)
newargv[i] = strdup(argv[i]);
newargv[argc] = NULL;
/*
* Run the appropriate command.
*/
libzfs_mnttab_cache(g_zfs, B_TRUE);
if (find_command_idx(cmdname, &i) == 0) {
current_command = &command_table[i];
ret = command_table[i].func(argc - 1, newargv + 1);
} else if (strchr(cmdname, '=') != NULL) {
verify(find_command_idx("set", &i) == 0);
current_command = &command_table[i];
ret = command_table[i].func(argc, newargv);
} else {
(void) fprintf(stderr, gettext("unrecognized "
"command '%s'\n"), cmdname);
usage(B_FALSE);
ret = 1;
}
for (i = 0; i < argc; i++)
free(newargv[i]);
free(newargv);
if (ret == 0 && log_history)
(void) zpool_log_history(g_zfs, history_str);
libzfs_fini(g_zfs);
/*
* The 'ZFS_ABORT' environment variable causes us to dump core on exit
* for the purposes of running ::findleaks.
*/
if (getenv("ZFS_ABORT") != NULL) {
(void) printf("dumping core by request\n");
abort();
}
return (ret);
}
#ifdef __FreeBSD__
#include <sys/jail.h>
#include <jail.h>
/*
* Attach/detach the given dataset to/from the given jail
*/
/* ARGSUSED */
static int
zfs_do_jail_impl(int argc, char **argv, boolean_t attach)
{
zfs_handle_t *zhp;
int jailid, ret;
/* check number of arguments */
if (argc < 3) {
(void) fprintf(stderr, gettext("missing argument(s)\n"));
usage(B_FALSE);
}
if (argc > 3) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
jailid = jail_getid(argv[1]);
if (jailid < 0) {
(void) fprintf(stderr, gettext("invalid jail id or name\n"));
usage(B_FALSE);
}
zhp = zfs_open(g_zfs, argv[2], ZFS_TYPE_FILESYSTEM);
if (zhp == NULL)
return (1);
ret = (zfs_jail(zhp, jailid, attach) != 0);
zfs_close(zhp);
return (ret);
}
/*
* zfs jail jailid filesystem
*
* Attach the given dataset to the given jail
*/
/* ARGSUSED */
static int
zfs_do_jail(int argc, char **argv)
{
return (zfs_do_jail_impl(argc, argv, B_TRUE));
}
/*
* zfs unjail jailid filesystem
*
* Detach the given dataset from the given jail
*/
/* ARGSUSED */
static int
zfs_do_unjail(int argc, char **argv)
{
return (zfs_do_jail_impl(argc, argv, B_FALSE));
}
#endif
diff --git a/sys/contrib/openzfs/cmd/zgenhostid/zgenhostid.c b/sys/contrib/openzfs/cmd/zgenhostid/zgenhostid.c
index 4a4ca80e00e0..853931c6ad6e 100644
--- a/sys/contrib/openzfs/cmd/zgenhostid/zgenhostid.c
+++ b/sys/contrib/openzfs/cmd/zgenhostid/zgenhostid.c
@@ -1,142 +1,141 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2020, Georgy Yakovlev. All rights reserved.
*/
#include <errno.h>
#include <fcntl.h>
#include <getopt.h>
#include <inttypes.h>
#include <limits.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <time.h>
#include <unistd.h>
-static void
+static __attribute__((noreturn)) void
usage(void)
{
(void) fprintf(stderr,
"usage: zgenhostid [-fh] [-o path] [value]\n\n"
" -f\t\t force hostid file write\n"
" -h\t\t print this usage and exit\n"
" -o <filename>\t write hostid to this file\n\n"
"If hostid file is not present, store a hostid in it.\n"
"The optional value should be an 8-digit hex number between"
" 1 and 2^32-1.\n"
"If the value is 0 or no value is provided, a random one"
" will be generated.\n"
"The value must be unique among your systems.\n");
exit(EXIT_FAILURE);
- /* NOTREACHED */
}
int
main(int argc, char **argv)
{
/* default file path, can be optionally set by user */
const char *path = "/etc/hostid";
/* holds converted user input or lrand48() generated value */
unsigned long input_i = 0;
int opt;
int force_fwrite = 0;
while ((opt = getopt_long(argc, argv, "fo:h?", 0, 0)) != -1) {
switch (opt) {
case 'f':
force_fwrite = 1;
break;
case 'o':
path = optarg;
break;
case 'h':
case '?':
usage();
}
}
char *in_s = argv[optind];
if (in_s != NULL) {
/* increment pointer by 2 if string is 0x prefixed */
if (strncasecmp("0x", in_s, 2) == 0) {
in_s += 2;
}
/* need to be exactly 8 characters */
const char *hex = "0123456789abcdefABCDEF";
if (strlen(in_s) != 8 || strspn(in_s, hex) != 8) {
fprintf(stderr, "%s\n", strerror(ERANGE));
usage();
}
input_i = strtoul(in_s, NULL, 16);
if (errno != 0) {
perror("strtoul");
exit(EXIT_FAILURE);
}
if (input_i > UINT32_MAX) {
fprintf(stderr, "%s\n", strerror(ERANGE));
usage();
}
}
struct stat fstat;
if (force_fwrite == 0 && stat(path, &fstat) == 0 &&
S_ISREG(fstat.st_mode)) {
fprintf(stderr, "%s: %s\n", path, strerror(EEXIST));
exit(EXIT_FAILURE);
}
/*
* generate if not provided by user
* also handle unlikely zero return from lrand48()
*/
while (input_i == 0) {
srand48(getpid() ^ time(NULL));
input_i = lrand48();
}
FILE *fp = fopen(path, "wb");
if (!fp) {
perror("fopen");
exit(EXIT_FAILURE);
}
/*
* we need just 4 bytes in native endianness
* not using sethostid() because it may be missing or just a stub
*/
uint32_t hostid = input_i;
int written = fwrite(&hostid, 1, 4, fp);
if (written != 4) {
perror("fwrite");
exit(EXIT_FAILURE);
}
fclose(fp);
exit(EXIT_SUCCESS);
}
diff --git a/sys/contrib/openzfs/cmd/zhack/zhack.c b/sys/contrib/openzfs/cmd/zhack/zhack.c
index c1017ec2386c..b27423f538e7 100644
--- a/sys/contrib/openzfs/cmd/zhack/zhack.c
+++ b/sys/contrib/openzfs/cmd/zhack/zhack.c
@@ -1,532 +1,533 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2011, 2015 by Delphix. All rights reserved.
* Copyright (c) 2013 Steven Hartland. All rights reserved.
*/
/*
* zhack is a debugging tool that can write changes to ZFS pool using libzpool
* for testing purposes. Altering pools with zhack is unsupported and may
* result in corrupted pools.
*/
#include <stdio.h>
#include <stdlib.h>
#include <ctype.h>
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/dmu.h>
#include <sys/zap.h>
#include <sys/zfs_znode.h>
#include <sys/dsl_synctask.h>
#include <sys/vdev.h>
#include <sys/fs/zfs.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_pool.h>
#include <sys/zio_checksum.h>
#include <sys/zio_compress.h>
#include <sys/zfeature.h>
#include <sys/dmu_tx.h>
#include <zfeature_common.h>
#include <libzutil.h>
const char cmdname[] = "zhack";
static importargs_t g_importargs;
static char *g_pool;
static boolean_t g_readonly;
-static void
+static __attribute__((noreturn)) void
usage(void)
{
(void) fprintf(stderr,
"Usage: %s [-c cachefile] [-d dir] <subcommand> <args> ...\n"
"where <subcommand> <args> is one of the following:\n"
"\n", cmdname);
(void) fprintf(stderr,
" feature stat <pool>\n"
" print information about enabled features\n"
" feature enable [-r] [-d desc] <pool> <feature>\n"
" add a new enabled feature to the pool\n"
" -d <desc> sets the feature's description\n"
" -r set read-only compatible flag for feature\n"
" feature ref [-md] <pool> <feature>\n"
" change the refcount on the given feature\n"
" -d decrease instead of increase the refcount\n"
" -m add the feature to the label if increasing refcount\n"
"\n"
" <feature> : should be a feature guid\n");
exit(1);
}
-static void
+static __attribute__((noreturn)) __attribute__((format(printf, 3, 4))) void
fatal(spa_t *spa, void *tag, const char *fmt, ...)
{
va_list ap;
if (spa != NULL) {
spa_close(spa, tag);
(void) spa_export(g_pool, NULL, B_TRUE, B_FALSE);
}
va_start(ap, fmt);
(void) fprintf(stderr, "%s: ", cmdname);
(void) vfprintf(stderr, fmt, ap);
va_end(ap);
(void) fprintf(stderr, "\n");
exit(1);
}
/* ARGSUSED */
static int
space_delta_cb(dmu_object_type_t bonustype, const void *data,
zfs_file_info_t *zoi)
{
/*
* Is it a valid type of object to track?
*/
if (bonustype != DMU_OT_ZNODE && bonustype != DMU_OT_SA)
return (ENOENT);
(void) fprintf(stderr, "modifying object that needs user accounting");
abort();
- /* NOTREACHED */
}
/*
* Target is the dataset whose pool we want to open.
*/
static void
zhack_import(char *target, boolean_t readonly)
{
nvlist_t *config;
nvlist_t *props;
int error;
kernel_init(readonly ? SPA_MODE_READ :
(SPA_MODE_READ | SPA_MODE_WRITE));
dmu_objset_register_type(DMU_OST_ZFS, space_delta_cb);
g_readonly = readonly;
g_importargs.can_be_active = readonly;
g_pool = strdup(target);
error = zpool_find_config(NULL, target, &config, &g_importargs,
&libzpool_config_ops);
if (error)
fatal(NULL, FTAG, "cannot import '%s'", target);
props = NULL;
if (readonly) {
VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0);
VERIFY(nvlist_add_uint64(props,
zpool_prop_to_name(ZPOOL_PROP_READONLY), 1) == 0);
}
zfeature_checks_disable = B_TRUE;
error = spa_import(target, config, props,
(readonly ? ZFS_IMPORT_SKIP_MMP : ZFS_IMPORT_NORMAL));
fnvlist_free(config);
zfeature_checks_disable = B_FALSE;
if (error == EEXIST)
error = 0;
if (error)
fatal(NULL, FTAG, "can't import '%s': %s", target,
strerror(error));
}
static void
zhack_spa_open(char *target, boolean_t readonly, void *tag, spa_t **spa)
{
int err;
zhack_import(target, readonly);
zfeature_checks_disable = B_TRUE;
err = spa_open(target, spa, tag);
zfeature_checks_disable = B_FALSE;
if (err != 0)
fatal(*spa, FTAG, "cannot open '%s': %s", target,
strerror(err));
if (spa_version(*spa) < SPA_VERSION_FEATURES) {
fatal(*spa, FTAG, "'%s' has version %d, features not enabled",
target, (int)spa_version(*spa));
}
}
static void
dump_obj(objset_t *os, uint64_t obj, const char *name)
{
zap_cursor_t zc;
zap_attribute_t za;
(void) printf("%s_obj:\n", name);
for (zap_cursor_init(&zc, os, obj);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
if (za.za_integer_length == 8) {
ASSERT(za.za_num_integers == 1);
(void) printf("\t%s = %llu\n",
za.za_name, (u_longlong_t)za.za_first_integer);
} else {
ASSERT(za.za_integer_length == 1);
char val[1024];
VERIFY(zap_lookup(os, obj, za.za_name,
1, sizeof (val), val) == 0);
(void) printf("\t%s = %s\n", za.za_name, val);
}
}
zap_cursor_fini(&zc);
}
static void
dump_mos(spa_t *spa)
{
nvlist_t *nv = spa->spa_label_features;
nvpair_t *pair;
(void) printf("label config:\n");
for (pair = nvlist_next_nvpair(nv, NULL);
pair != NULL;
pair = nvlist_next_nvpair(nv, pair)) {
(void) printf("\t%s\n", nvpair_name(pair));
}
}
static void
zhack_do_feature_stat(int argc, char **argv)
{
spa_t *spa;
objset_t *os;
char *target;
argc--;
argv++;
if (argc < 1) {
(void) fprintf(stderr, "error: missing pool name\n");
usage();
}
target = argv[0];
zhack_spa_open(target, B_TRUE, FTAG, &spa);
os = spa->spa_meta_objset;
dump_obj(os, spa->spa_feat_for_read_obj, "for_read");
dump_obj(os, spa->spa_feat_for_write_obj, "for_write");
dump_obj(os, spa->spa_feat_desc_obj, "descriptions");
if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) {
dump_obj(os, spa->spa_feat_enabled_txg_obj, "enabled_txg");
}
dump_mos(spa);
spa_close(spa, FTAG);
}
static void
zhack_feature_enable_sync(void *arg, dmu_tx_t *tx)
{
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
zfeature_info_t *feature = arg;
feature_enable_sync(spa, feature, tx);
spa_history_log_internal(spa, "zhack enable feature", tx,
"name=%s flags=%u",
feature->fi_guid, feature->fi_flags);
}
static void
zhack_do_feature_enable(int argc, char **argv)
{
int c;
char *desc, *target;
spa_t *spa;
objset_t *mos;
zfeature_info_t feature;
spa_feature_t nodeps[] = { SPA_FEATURE_NONE };
/*
* Features are not added to the pool's label until their refcounts
* are incremented, so fi_mos can just be left as false for now.
*/
desc = NULL;
feature.fi_uname = "zhack";
feature.fi_flags = 0;
feature.fi_depends = nodeps;
feature.fi_feature = SPA_FEATURE_NONE;
optind = 1;
while ((c = getopt(argc, argv, "+rd:")) != -1) {
switch (c) {
case 'r':
feature.fi_flags |= ZFEATURE_FLAG_READONLY_COMPAT;
break;
case 'd':
desc = strdup(optarg);
break;
default:
usage();
break;
}
}
if (desc == NULL)
desc = strdup("zhack injected");
feature.fi_desc = desc;
argc -= optind;
argv += optind;
if (argc < 2) {
(void) fprintf(stderr, "error: missing feature or pool name\n");
usage();
}
target = argv[0];
feature.fi_guid = argv[1];
if (!zfeature_is_valid_guid(feature.fi_guid))
fatal(NULL, FTAG, "invalid feature guid: %s", feature.fi_guid);
zhack_spa_open(target, B_FALSE, FTAG, &spa);
mos = spa->spa_meta_objset;
if (zfeature_is_supported(feature.fi_guid))
- fatal(spa, FTAG, "'%s' is a real feature, will not enable");
+ fatal(spa, FTAG, "'%s' is a real feature, will not enable",
+ feature.fi_guid);
if (0 == zap_contains(mos, spa->spa_feat_desc_obj, feature.fi_guid))
fatal(spa, FTAG, "feature already enabled: %s",
feature.fi_guid);
VERIFY0(dsl_sync_task(spa_name(spa), NULL,
zhack_feature_enable_sync, &feature, 5, ZFS_SPACE_CHECK_NORMAL));
spa_close(spa, FTAG);
free(desc);
}
static void
feature_incr_sync(void *arg, dmu_tx_t *tx)
{
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
zfeature_info_t *feature = arg;
uint64_t refcount;
VERIFY0(feature_get_refcount_from_disk(spa, feature, &refcount));
feature_sync(spa, feature, refcount + 1, tx);
spa_history_log_internal(spa, "zhack feature incr", tx,
"name=%s", feature->fi_guid);
}
static void
feature_decr_sync(void *arg, dmu_tx_t *tx)
{
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
zfeature_info_t *feature = arg;
uint64_t refcount;
VERIFY0(feature_get_refcount_from_disk(spa, feature, &refcount));
feature_sync(spa, feature, refcount - 1, tx);
spa_history_log_internal(spa, "zhack feature decr", tx,
"name=%s", feature->fi_guid);
}
static void
zhack_do_feature_ref(int argc, char **argv)
{
int c;
char *target;
boolean_t decr = B_FALSE;
spa_t *spa;
objset_t *mos;
zfeature_info_t feature;
spa_feature_t nodeps[] = { SPA_FEATURE_NONE };
/*
* fi_desc does not matter here because it was written to disk
* when the feature was enabled, but we need to properly set the
* feature for read or write based on the information we read off
* disk later.
*/
feature.fi_uname = "zhack";
feature.fi_flags = 0;
feature.fi_desc = NULL;
feature.fi_depends = nodeps;
feature.fi_feature = SPA_FEATURE_NONE;
optind = 1;
while ((c = getopt(argc, argv, "+md")) != -1) {
switch (c) {
case 'm':
feature.fi_flags |= ZFEATURE_FLAG_MOS;
break;
case 'd':
decr = B_TRUE;
break;
default:
usage();
break;
}
}
argc -= optind;
argv += optind;
if (argc < 2) {
(void) fprintf(stderr, "error: missing feature or pool name\n");
usage();
}
target = argv[0];
feature.fi_guid = argv[1];
if (!zfeature_is_valid_guid(feature.fi_guid))
fatal(NULL, FTAG, "invalid feature guid: %s", feature.fi_guid);
zhack_spa_open(target, B_FALSE, FTAG, &spa);
mos = spa->spa_meta_objset;
if (zfeature_is_supported(feature.fi_guid)) {
fatal(spa, FTAG,
- "'%s' is a real feature, will not change refcount");
+ "'%s' is a real feature, will not change refcount",
+ feature.fi_guid);
}
if (0 == zap_contains(mos, spa->spa_feat_for_read_obj,
feature.fi_guid)) {
feature.fi_flags &= ~ZFEATURE_FLAG_READONLY_COMPAT;
} else if (0 == zap_contains(mos, spa->spa_feat_for_write_obj,
feature.fi_guid)) {
feature.fi_flags |= ZFEATURE_FLAG_READONLY_COMPAT;
} else {
fatal(spa, FTAG, "feature is not enabled: %s", feature.fi_guid);
}
if (decr) {
uint64_t count;
if (feature_get_refcount_from_disk(spa, &feature,
&count) == 0 && count == 0) {
fatal(spa, FTAG, "feature refcount already 0: %s",
feature.fi_guid);
}
}
VERIFY0(dsl_sync_task(spa_name(spa), NULL,
decr ? feature_decr_sync : feature_incr_sync, &feature,
5, ZFS_SPACE_CHECK_NORMAL));
spa_close(spa, FTAG);
}
static int
zhack_do_feature(int argc, char **argv)
{
char *subcommand;
argc--;
argv++;
if (argc == 0) {
(void) fprintf(stderr,
"error: no feature operation specified\n");
usage();
}
subcommand = argv[0];
if (strcmp(subcommand, "stat") == 0) {
zhack_do_feature_stat(argc, argv);
} else if (strcmp(subcommand, "enable") == 0) {
zhack_do_feature_enable(argc, argv);
} else if (strcmp(subcommand, "ref") == 0) {
zhack_do_feature_ref(argc, argv);
} else {
(void) fprintf(stderr, "error: unknown subcommand: %s\n",
subcommand);
usage();
}
return (0);
}
#define MAX_NUM_PATHS 1024
int
main(int argc, char **argv)
{
extern void zfs_prop_init(void);
char *path[MAX_NUM_PATHS];
const char *subcommand;
int rv = 0;
int c;
g_importargs.path = path;
dprintf_setup(&argc, argv);
zfs_prop_init();
while ((c = getopt(argc, argv, "+c:d:")) != -1) {
switch (c) {
case 'c':
g_importargs.cachefile = optarg;
break;
case 'd':
assert(g_importargs.paths < MAX_NUM_PATHS);
g_importargs.path[g_importargs.paths++] = optarg;
break;
default:
usage();
break;
}
}
argc -= optind;
argv += optind;
optind = 1;
if (argc == 0) {
(void) fprintf(stderr, "error: no command specified\n");
usage();
}
subcommand = argv[0];
if (strcmp(subcommand, "feature") == 0) {
rv = zhack_do_feature(argc, argv);
} else {
(void) fprintf(stderr, "error: unknown subcommand: %s\n",
subcommand);
usage();
}
if (!g_readonly && spa_export(g_pool, NULL, B_TRUE, B_FALSE) != 0) {
fatal(NULL, FTAG, "pool export failed; "
"changes may not be committed to disk\n");
}
kernel_fini();
return (rv);
}
diff --git a/sys/contrib/openzfs/cmd/zpool/os/freebsd/zpool_vdev_os.c b/sys/contrib/openzfs/cmd/zpool/os/freebsd/zpool_vdev_os.c
index aa66d29fa604..66bfe28f13f8 100644
--- a/sys/contrib/openzfs/cmd/zpool/os/freebsd/zpool_vdev_os.c
+++ b/sys/contrib/openzfs/cmd/zpool/os/freebsd/zpool_vdev_os.c
@@ -1,118 +1,124 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2018 by Delphix. All rights reserved.
* Copyright (c) 2016, 2017 Intel Corporation.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>.
*/
/*
* Functions to convert between a list of vdevs and an nvlist representing the
* configuration. Each entry in the list can be one of:
*
* Device vdevs
* disk=(path=..., devid=...)
* file=(path=...)
*
* Group vdevs
* raidz[1|2]=(...)
* mirror=(...)
*
* Hot spares
*
* While the underlying implementation supports it, group vdevs cannot contain
* other group vdevs. All userland verification of devices is contained within
* this file. If successful, the nvlist returned can be passed directly to the
* kernel; we've done as much verification as possible in userland.
*
* Hot spares are a special case, and passed down as an array of disk vdevs, at
* the same level as the root of the vdev tree.
*
* The only function exported by this file is 'make_root_vdev'. The
* function performs several passes:
*
* 1. Construct the vdev specification. Performs syntax validation and
* makes sure each device is valid.
* 2. Check for devices in use. Using libdiskmgt, makes sure that no
* devices are also in use. Some can be overridden using the 'force'
* flag, others cannot.
* 3. Check for replication errors if the 'force' flag is not specified.
* validates that the replication level is consistent across the
* entire pool.
* 4. Call libzfs to label any whole disks with an EFI label.
*/
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <libintl.h>
#include <libnvpair.h>
#include <libzutil.h>
#include <limits.h>
#include <sys/spa.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <paths.h>
#include <sys/stat.h>
#include <sys/disk.h>
#include <sys/mntent.h>
#include <libgeom.h>
#include "zpool_util.h"
#include <sys/zfs_context.h>
int
check_device(const char *name, boolean_t force, boolean_t isspare,
boolean_t iswholedisk)
{
char path[MAXPATHLEN];
if (strncmp(name, _PATH_DEV, sizeof (_PATH_DEV) - 1) != 0)
snprintf(path, sizeof (path), "%s%s", _PATH_DEV, name);
else
strlcpy(path, name, sizeof (path));
return (check_file(path, force, isspare));
}
boolean_t
check_sector_size_database(char *path, int *sector_size)
{
return (0);
}
void
after_zpool_upgrade(zpool_handle_t *zhp)
{
char bootfs[ZPOOL_MAXPROPLEN];
if (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
sizeof (bootfs), NULL, B_FALSE) == 0 &&
strcmp(bootfs, "-") != 0) {
(void) printf(gettext("Pool '%s' has the bootfs "
"property set, you might need to update\nthe boot "
"code. See gptzfsboot(8) and loader.efi(8) for "
"details.\n"), zpool_get_name(zhp));
}
}
+
+int
+check_file(const char *file, boolean_t force, boolean_t isspare)
+{
+ return (check_file_generic(file, force, isspare));
+}
diff --git a/sys/contrib/openzfs/cmd/zpool/os/linux/zpool_vdev_os.c b/sys/contrib/openzfs/cmd/zpool/os/linux/zpool_vdev_os.c
index da87aa79f365..10929fa65a11 100644
--- a/sys/contrib/openzfs/cmd/zpool/os/linux/zpool_vdev_os.c
+++ b/sys/contrib/openzfs/cmd/zpool/os/linux/zpool_vdev_os.c
@@ -1,412 +1,418 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2018 by Delphix. All rights reserved.
* Copyright (c) 2016, 2017 Intel Corporation.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>.
*/
/*
* Functions to convert between a list of vdevs and an nvlist representing the
* configuration. Each entry in the list can be one of:
*
* Device vdevs
* disk=(path=..., devid=...)
* file=(path=...)
*
* Group vdevs
* raidz[1|2]=(...)
* mirror=(...)
*
* Hot spares
*
* While the underlying implementation supports it, group vdevs cannot contain
* other group vdevs. All userland verification of devices is contained within
* this file. If successful, the nvlist returned can be passed directly to the
* kernel; we've done as much verification as possible in userland.
*
* Hot spares are a special case, and passed down as an array of disk vdevs, at
* the same level as the root of the vdev tree.
*
* The only function exported by this file is 'make_root_vdev'. The
* function performs several passes:
*
* 1. Construct the vdev specification. Performs syntax validation and
* makes sure each device is valid.
* 2. Check for devices in use. Using libblkid to make sure that no
* devices are also in use. Some can be overridden using the 'force'
* flag, others cannot.
* 3. Check for replication errors if the 'force' flag is not specified.
* validates that the replication level is consistent across the
* entire pool.
* 4. Call libzfs to label any whole disks with an EFI label.
*/
#include <assert.h>
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
#include <libintl.h>
#include <libnvpair.h>
#include <libzutil.h>
#include <limits.h>
#include <sys/spa.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include "zpool_util.h"
#include <sys/zfs_context.h>
#include <scsi/scsi.h>
#include <scsi/sg.h>
#include <sys/efi_partition.h>
#include <sys/stat.h>
#include <sys/vtoc.h>
#include <sys/mntent.h>
#include <uuid/uuid.h>
#include <blkid/blkid.h>
typedef struct vdev_disk_db_entry
{
char id[24];
int sector_size;
} vdev_disk_db_entry_t;
/*
* Database of block devices that lie about physical sector sizes. The
* identification string must be precisely 24 characters to avoid false
* negatives
*/
static vdev_disk_db_entry_t vdev_disk_database[] = {
{"ATA ADATA SSD S396 3", 8192},
{"ATA APPLE SSD SM128E", 8192},
{"ATA APPLE SSD SM256E", 8192},
{"ATA APPLE SSD SM512E", 8192},
{"ATA APPLE SSD SM768E", 8192},
{"ATA C400-MTFDDAC064M", 8192},
{"ATA C400-MTFDDAC128M", 8192},
{"ATA C400-MTFDDAC256M", 8192},
{"ATA C400-MTFDDAC512M", 8192},
{"ATA Corsair Force 3 ", 8192},
{"ATA Corsair Force GS", 8192},
{"ATA INTEL SSDSA2CT04", 8192},
{"ATA INTEL SSDSA2BZ10", 8192},
{"ATA INTEL SSDSA2BZ20", 8192},
{"ATA INTEL SSDSA2BZ30", 8192},
{"ATA INTEL SSDSA2CW04", 8192},
{"ATA INTEL SSDSA2CW08", 8192},
{"ATA INTEL SSDSA2CW12", 8192},
{"ATA INTEL SSDSA2CW16", 8192},
{"ATA INTEL SSDSA2CW30", 8192},
{"ATA INTEL SSDSA2CW60", 8192},
{"ATA INTEL SSDSC2CT06", 8192},
{"ATA INTEL SSDSC2CT12", 8192},
{"ATA INTEL SSDSC2CT18", 8192},
{"ATA INTEL SSDSC2CT24", 8192},
{"ATA INTEL SSDSC2CW06", 8192},
{"ATA INTEL SSDSC2CW12", 8192},
{"ATA INTEL SSDSC2CW18", 8192},
{"ATA INTEL SSDSC2CW24", 8192},
{"ATA INTEL SSDSC2CW48", 8192},
{"ATA KINGSTON SH100S3", 8192},
{"ATA KINGSTON SH103S3", 8192},
{"ATA M4-CT064M4SSD2 ", 8192},
{"ATA M4-CT128M4SSD2 ", 8192},
{"ATA M4-CT256M4SSD2 ", 8192},
{"ATA M4-CT512M4SSD2 ", 8192},
{"ATA OCZ-AGILITY2 ", 8192},
{"ATA OCZ-AGILITY3 ", 8192},
{"ATA OCZ-VERTEX2 3.5 ", 8192},
{"ATA OCZ-VERTEX3 ", 8192},
{"ATA OCZ-VERTEX3 LT ", 8192},
{"ATA OCZ-VERTEX3 MI ", 8192},
{"ATA OCZ-VERTEX4 ", 8192},
{"ATA SAMSUNG MZ7WD120", 8192},
{"ATA SAMSUNG MZ7WD240", 8192},
{"ATA SAMSUNG MZ7WD480", 8192},
{"ATA SAMSUNG MZ7WD960", 8192},
{"ATA SAMSUNG SSD 830 ", 8192},
{"ATA Samsung SSD 840 ", 8192},
{"ATA SanDisk SSD U100", 8192},
{"ATA TOSHIBA THNSNH06", 8192},
{"ATA TOSHIBA THNSNH12", 8192},
{"ATA TOSHIBA THNSNH25", 8192},
{"ATA TOSHIBA THNSNH51", 8192},
{"ATA APPLE SSD TS064C", 4096},
{"ATA APPLE SSD TS128C", 4096},
{"ATA APPLE SSD TS256C", 4096},
{"ATA APPLE SSD TS512C", 4096},
{"ATA INTEL SSDSA2M040", 4096},
{"ATA INTEL SSDSA2M080", 4096},
{"ATA INTEL SSDSA2M160", 4096},
{"ATA INTEL SSDSC2MH12", 4096},
{"ATA INTEL SSDSC2MH25", 4096},
{"ATA OCZ CORE_SSD ", 4096},
{"ATA OCZ-VERTEX ", 4096},
{"ATA SAMSUNG MCCOE32G", 4096},
{"ATA SAMSUNG MCCOE64G", 4096},
{"ATA SAMSUNG SSD PM80", 4096},
/* Flash drives optimized for 4KB IOs on larger pages */
{"ATA INTEL SSDSC2BA10", 4096},
{"ATA INTEL SSDSC2BA20", 4096},
{"ATA INTEL SSDSC2BA40", 4096},
{"ATA INTEL SSDSC2BA80", 4096},
{"ATA INTEL SSDSC2BB08", 4096},
{"ATA INTEL SSDSC2BB12", 4096},
{"ATA INTEL SSDSC2BB16", 4096},
{"ATA INTEL SSDSC2BB24", 4096},
{"ATA INTEL SSDSC2BB30", 4096},
{"ATA INTEL SSDSC2BB40", 4096},
{"ATA INTEL SSDSC2BB48", 4096},
{"ATA INTEL SSDSC2BB60", 4096},
{"ATA INTEL SSDSC2BB80", 4096},
{"ATA INTEL SSDSC2BW24", 4096},
{"ATA INTEL SSDSC2BW48", 4096},
{"ATA INTEL SSDSC2BP24", 4096},
{"ATA INTEL SSDSC2BP48", 4096},
{"NA SmrtStorSDLKAE9W", 4096},
{"NVMe Amazon EC2 NVMe ", 4096},
/* Imported from Open Solaris */
{"ATA MARVELL SD88SA02", 4096},
/* Advanced format Hard drives */
{"ATA Hitachi HDS5C303", 4096},
{"ATA SAMSUNG HD204UI ", 4096},
{"ATA ST2000DL004 HD20", 4096},
{"ATA WDC WD10EARS-00M", 4096},
{"ATA WDC WD10EARS-00S", 4096},
{"ATA WDC WD10EARS-00Z", 4096},
{"ATA WDC WD15EARS-00M", 4096},
{"ATA WDC WD15EARS-00S", 4096},
{"ATA WDC WD15EARS-00Z", 4096},
{"ATA WDC WD20EARS-00M", 4096},
{"ATA WDC WD20EARS-00S", 4096},
{"ATA WDC WD20EARS-00Z", 4096},
{"ATA WDC WD1600BEVT-0", 4096},
{"ATA WDC WD2500BEVT-0", 4096},
{"ATA WDC WD3200BEVT-0", 4096},
{"ATA WDC WD5000BEVT-0", 4096},
};
#define INQ_REPLY_LEN 96
#define INQ_CMD_LEN 6
static const int vdev_disk_database_size =
sizeof (vdev_disk_database) / sizeof (vdev_disk_database[0]);
boolean_t
check_sector_size_database(char *path, int *sector_size)
{
unsigned char inq_buff[INQ_REPLY_LEN];
unsigned char sense_buffer[32];
unsigned char inq_cmd_blk[INQ_CMD_LEN] =
{INQUIRY, 0, 0, 0, INQ_REPLY_LEN, 0};
sg_io_hdr_t io_hdr;
int error;
int fd;
int i;
/* Prepare INQUIRY command */
memset(&io_hdr, 0, sizeof (sg_io_hdr_t));
io_hdr.interface_id = 'S';
io_hdr.cmd_len = sizeof (inq_cmd_blk);
io_hdr.mx_sb_len = sizeof (sense_buffer);
io_hdr.dxfer_direction = SG_DXFER_FROM_DEV;
io_hdr.dxfer_len = INQ_REPLY_LEN;
io_hdr.dxferp = inq_buff;
io_hdr.cmdp = inq_cmd_blk;
io_hdr.sbp = sense_buffer;
io_hdr.timeout = 10; /* 10 milliseconds is ample time */
if ((fd = open(path, O_RDONLY|O_DIRECT)) < 0)
return (B_FALSE);
error = ioctl(fd, SG_IO, (unsigned long) &io_hdr);
(void) close(fd);
if (error < 0)
return (B_FALSE);
if ((io_hdr.info & SG_INFO_OK_MASK) != SG_INFO_OK)
return (B_FALSE);
for (i = 0; i < vdev_disk_database_size; i++) {
if (memcmp(inq_buff + 8, vdev_disk_database[i].id, 24))
continue;
*sector_size = vdev_disk_database[i].sector_size;
return (B_TRUE);
}
return (B_FALSE);
}
static int
check_slice(const char *path, blkid_cache cache, int force, boolean_t isspare)
{
int err;
char *value;
/* No valid type detected device is safe to use */
value = blkid_get_tag_value(cache, "TYPE", path);
if (value == NULL)
return (0);
/*
* If libblkid detects a ZFS device, we check the device
* using check_file() to see if it's safe. The one safe
* case is a spare device shared between multiple pools.
*/
if (strcmp(value, "zfs_member") == 0) {
err = check_file(path, force, isspare);
} else {
if (force) {
err = 0;
} else {
err = -1;
vdev_error(gettext("%s contains a filesystem of "
"type '%s'\n"), path, value);
}
}
free(value);
return (err);
}
/*
* Validate that a disk including all partitions are safe to use.
*
* For EFI labeled disks this can done relatively easily with the libefi
* library. The partition numbers are extracted from the label and used
* to generate the expected /dev/ paths. Each partition can then be
* checked for conflicts.
*
* For non-EFI labeled disks (MBR/EBR/etc) the same process is possible
* but due to the lack of a readily available libraries this scanning is
* not implemented. Instead only the device path as given is checked.
*/
static int
check_disk(const char *path, blkid_cache cache, int force,
boolean_t isspare, boolean_t iswholedisk)
{
struct dk_gpt *vtoc;
char slice_path[MAXPATHLEN];
int err = 0;
int fd, i;
int flags = O_RDONLY|O_DIRECT;
if (!iswholedisk)
return (check_slice(path, cache, force, isspare));
/* only spares can be shared, other devices require exclusive access */
if (!isspare)
flags |= O_EXCL;
if ((fd = open(path, flags)) < 0) {
char *value = blkid_get_tag_value(cache, "TYPE", path);
(void) fprintf(stderr, gettext("%s is in use and contains "
"a %s filesystem.\n"), path, value ? value : "unknown");
free(value);
return (-1);
}
/*
* Expected to fail for non-EFI labeled disks. Just check the device
* as given and do not attempt to detect and scan partitions.
*/
err = efi_alloc_and_read(fd, &vtoc);
if (err) {
(void) close(fd);
return (check_slice(path, cache, force, isspare));
}
/*
* The primary efi partition label is damaged however the secondary
* label at the end of the device is intact. Rather than use this
* label we should play it safe and treat this as a non efi device.
*/
if (vtoc->efi_flags & EFI_GPT_PRIMARY_CORRUPT) {
efi_free(vtoc);
(void) close(fd);
if (force) {
/* Partitions will now be created using the backup */
return (0);
} else {
vdev_error(gettext("%s contains a corrupt primary "
"EFI label.\n"), path);
return (-1);
}
}
for (i = 0; i < vtoc->efi_nparts; i++) {
if (vtoc->efi_parts[i].p_tag == V_UNASSIGNED ||
uuid_is_null((uchar_t *)&vtoc->efi_parts[i].p_guid))
continue;
if (strncmp(path, UDISK_ROOT, strlen(UDISK_ROOT)) == 0)
(void) snprintf(slice_path, sizeof (slice_path),
"%s%s%d", path, "-part", i+1);
else
(void) snprintf(slice_path, sizeof (slice_path),
"%s%s%d", path, isdigit(path[strlen(path)-1]) ?
"p" : "", i+1);
err = check_slice(slice_path, cache, force, isspare);
if (err)
break;
}
efi_free(vtoc);
(void) close(fd);
return (err);
}
int
check_device(const char *path, boolean_t force,
boolean_t isspare, boolean_t iswholedisk)
{
blkid_cache cache;
int error;
error = blkid_get_cache(&cache, NULL);
if (error != 0) {
(void) fprintf(stderr, gettext("unable to access the blkid "
"cache.\n"));
return (-1);
}
error = check_disk(path, cache, force, isspare, iswholedisk);
blkid_put_cache(cache);
return (error);
}
void
after_zpool_upgrade(zpool_handle_t *zhp)
{
}
+
+int
+check_file(const char *file, boolean_t force, boolean_t isspare)
+{
+ return (check_file_generic(file, force, isspare));
+}
diff --git a/sys/contrib/openzfs/cmd/zpool/zpool_main.c b/sys/contrib/openzfs/cmd/zpool/zpool_main.c
index 35a59710c05e..a053bd65dbdb 100644
--- a/sys/contrib/openzfs/cmd/zpool/zpool_main.c
+++ b/sys/contrib/openzfs/cmd/zpool/zpool_main.c
@@ -1,10726 +1,10727 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2012 by Frederik Wessels. All rights reserved.
* Copyright (c) 2012 by Cyril Plisko. All rights reserved.
* Copyright (c) 2013 by Prasad Joshi (sTec). All rights reserved.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>.
* Copyright (c) 2017 Datto Inc.
* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>
* Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
* Copyright [2021] Hewlett Packard Enterprise Development LP
*/
#include <assert.h>
#include <ctype.h>
#include <dirent.h>
#include <errno.h>
#include <fcntl.h>
#include <getopt.h>
#include <libgen.h>
#include <libintl.h>
#include <libuutil.h>
#include <locale.h>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <strings.h>
#include <time.h>
#include <unistd.h>
#include <pwd.h>
#include <zone.h>
#include <sys/wait.h>
#include <zfs_prop.h>
#include <sys/fs/zfs.h>
#include <sys/stat.h>
#include <sys/systeminfo.h>
#include <sys/fm/fs/zfs.h>
#include <sys/fm/util.h>
#include <sys/fm/protocol.h>
#include <sys/zfs_ioctl.h>
#include <sys/mount.h>
#include <sys/sysmacros.h>
#include <math.h>
#include <libzfs.h>
#include <libzutil.h>
#include "zpool_util.h"
#include "zfs_comutil.h"
#include "zfeature_common.h"
#include "statcommon.h"
libzfs_handle_t *g_zfs;
static int zpool_do_create(int, char **);
static int zpool_do_destroy(int, char **);
static int zpool_do_add(int, char **);
static int zpool_do_remove(int, char **);
static int zpool_do_labelclear(int, char **);
static int zpool_do_checkpoint(int, char **);
static int zpool_do_list(int, char **);
static int zpool_do_iostat(int, char **);
static int zpool_do_status(int, char **);
static int zpool_do_online(int, char **);
static int zpool_do_offline(int, char **);
static int zpool_do_clear(int, char **);
static int zpool_do_reopen(int, char **);
static int zpool_do_reguid(int, char **);
static int zpool_do_attach(int, char **);
static int zpool_do_detach(int, char **);
static int zpool_do_replace(int, char **);
static int zpool_do_split(int, char **);
static int zpool_do_initialize(int, char **);
static int zpool_do_scrub(int, char **);
static int zpool_do_resilver(int, char **);
static int zpool_do_trim(int, char **);
static int zpool_do_import(int, char **);
static int zpool_do_export(int, char **);
static int zpool_do_upgrade(int, char **);
static int zpool_do_history(int, char **);
static int zpool_do_events(int, char **);
static int zpool_do_get(int, char **);
static int zpool_do_set(int, char **);
static int zpool_do_sync(int, char **);
static int zpool_do_version(int, char **);
static int zpool_do_wait(int, char **);
static zpool_compat_status_t zpool_do_load_compat(
const char *, boolean_t *);
/*
* These libumem hooks provide a reasonable set of defaults for the allocator's
* debugging facilities.
*/
#ifdef DEBUG
const char *
_umem_debug_init(void)
{
return ("default,verbose"); /* $UMEM_DEBUG setting */
}
const char *
_umem_logging_init(void)
{
return ("fail,contents"); /* $UMEM_LOGGING setting */
}
#endif
typedef enum {
HELP_ADD,
HELP_ATTACH,
HELP_CLEAR,
HELP_CREATE,
HELP_CHECKPOINT,
HELP_DESTROY,
HELP_DETACH,
HELP_EXPORT,
HELP_HISTORY,
HELP_IMPORT,
HELP_IOSTAT,
HELP_LABELCLEAR,
HELP_LIST,
HELP_OFFLINE,
HELP_ONLINE,
HELP_REPLACE,
HELP_REMOVE,
HELP_INITIALIZE,
HELP_SCRUB,
HELP_RESILVER,
HELP_TRIM,
HELP_STATUS,
HELP_UPGRADE,
HELP_EVENTS,
HELP_GET,
HELP_SET,
HELP_SPLIT,
HELP_SYNC,
HELP_REGUID,
HELP_REOPEN,
HELP_VERSION,
HELP_WAIT
} zpool_help_t;
/*
* Flags for stats to display with "zpool iostats"
*/
enum iostat_type {
IOS_DEFAULT = 0,
IOS_LATENCY = 1,
IOS_QUEUES = 2,
IOS_L_HISTO = 3,
IOS_RQ_HISTO = 4,
IOS_COUNT, /* always last element */
};
/* iostat_type entries as bitmasks */
#define IOS_DEFAULT_M (1ULL << IOS_DEFAULT)
#define IOS_LATENCY_M (1ULL << IOS_LATENCY)
#define IOS_QUEUES_M (1ULL << IOS_QUEUES)
#define IOS_L_HISTO_M (1ULL << IOS_L_HISTO)
#define IOS_RQ_HISTO_M (1ULL << IOS_RQ_HISTO)
/* Mask of all the histo bits */
#define IOS_ANYHISTO_M (IOS_L_HISTO_M | IOS_RQ_HISTO_M)
/*
* Lookup table for iostat flags to nvlist names. Basically a list
* of all the nvlists a flag requires. Also specifies the order in
* which data gets printed in zpool iostat.
*/
static const char *vsx_type_to_nvlist[IOS_COUNT][13] = {
[IOS_L_HISTO] = {
ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
NULL},
[IOS_LATENCY] = {
ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
NULL},
[IOS_QUEUES] = {
ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
NULL},
[IOS_RQ_HISTO] = {
ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO,
ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO,
ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO,
ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO,
ZPOOL_CONFIG_VDEV_AGG_TRIM_HISTO,
NULL},
};
/*
* Given a cb->cb_flags with a histogram bit set, return the iostat_type.
* Right now, only one histo bit is ever set at one time, so we can
* just do a highbit64(a)
*/
#define IOS_HISTO_IDX(a) (highbit64(a & IOS_ANYHISTO_M) - 1)
typedef struct zpool_command {
const char *name;
int (*func)(int, char **);
zpool_help_t usage;
} zpool_command_t;
/*
* Master command table. Each ZFS command has a name, associated function, and
* usage message. The usage messages need to be internationalized, so we have
* to have a function to return the usage message based on a command index.
*
* These commands are organized according to how they are displayed in the usage
* message. An empty command (one with a NULL name) indicates an empty line in
* the generic usage message.
*/
static zpool_command_t command_table[] = {
{ "version", zpool_do_version, HELP_VERSION },
{ NULL },
{ "create", zpool_do_create, HELP_CREATE },
{ "destroy", zpool_do_destroy, HELP_DESTROY },
{ NULL },
{ "add", zpool_do_add, HELP_ADD },
{ "remove", zpool_do_remove, HELP_REMOVE },
{ NULL },
{ "labelclear", zpool_do_labelclear, HELP_LABELCLEAR },
{ NULL },
{ "checkpoint", zpool_do_checkpoint, HELP_CHECKPOINT },
{ NULL },
{ "list", zpool_do_list, HELP_LIST },
{ "iostat", zpool_do_iostat, HELP_IOSTAT },
{ "status", zpool_do_status, HELP_STATUS },
{ NULL },
{ "online", zpool_do_online, HELP_ONLINE },
{ "offline", zpool_do_offline, HELP_OFFLINE },
{ "clear", zpool_do_clear, HELP_CLEAR },
{ "reopen", zpool_do_reopen, HELP_REOPEN },
{ NULL },
{ "attach", zpool_do_attach, HELP_ATTACH },
{ "detach", zpool_do_detach, HELP_DETACH },
{ "replace", zpool_do_replace, HELP_REPLACE },
{ "split", zpool_do_split, HELP_SPLIT },
{ NULL },
{ "initialize", zpool_do_initialize, HELP_INITIALIZE },
{ "resilver", zpool_do_resilver, HELP_RESILVER },
{ "scrub", zpool_do_scrub, HELP_SCRUB },
{ "trim", zpool_do_trim, HELP_TRIM },
{ NULL },
{ "import", zpool_do_import, HELP_IMPORT },
{ "export", zpool_do_export, HELP_EXPORT },
{ "upgrade", zpool_do_upgrade, HELP_UPGRADE },
{ "reguid", zpool_do_reguid, HELP_REGUID },
{ NULL },
{ "history", zpool_do_history, HELP_HISTORY },
{ "events", zpool_do_events, HELP_EVENTS },
{ NULL },
{ "get", zpool_do_get, HELP_GET },
{ "set", zpool_do_set, HELP_SET },
{ "sync", zpool_do_sync, HELP_SYNC },
{ NULL },
{ "wait", zpool_do_wait, HELP_WAIT },
};
#define NCOMMAND (ARRAY_SIZE(command_table))
#define VDEV_ALLOC_CLASS_LOGS "logs"
static zpool_command_t *current_command;
static char history_str[HIS_MAX_RECORD_LEN];
static boolean_t log_history = B_TRUE;
static uint_t timestamp_fmt = NODATE;
static const char *
get_usage(zpool_help_t idx)
{
switch (idx) {
case HELP_ADD:
return (gettext("\tadd [-fgLnP] [-o property=value] "
"<pool> <vdev> ...\n"));
case HELP_ATTACH:
return (gettext("\tattach [-fsw] [-o property=value] "
"<pool> <device> <new-device>\n"));
case HELP_CLEAR:
return (gettext("\tclear [-nF] <pool> [device]\n"));
case HELP_CREATE:
return (gettext("\tcreate [-fnd] [-o property=value] ... \n"
"\t [-O file-system-property=value] ... \n"
"\t [-m mountpoint] [-R root] <pool> <vdev> ...\n"));
case HELP_CHECKPOINT:
return (gettext("\tcheckpoint [-d [-w]] <pool> ...\n"));
case HELP_DESTROY:
return (gettext("\tdestroy [-f] <pool>\n"));
case HELP_DETACH:
return (gettext("\tdetach <pool> <device>\n"));
case HELP_EXPORT:
return (gettext("\texport [-af] <pool> ...\n"));
case HELP_HISTORY:
return (gettext("\thistory [-il] [<pool>] ...\n"));
case HELP_IMPORT:
return (gettext("\timport [-d dir] [-D]\n"
"\timport [-o mntopts] [-o property=value] ... \n"
"\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "
"[-R root] [-F [-n]] -a\n"
"\timport [-o mntopts] [-o property=value] ... \n"
"\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "
"[-R root] [-F [-n]]\n"
"\t [--rewind-to-checkpoint] <pool | id> [newpool]\n"));
case HELP_IOSTAT:
return (gettext("\tiostat [[[-c [script1,script2,...]"
"[-lq]]|[-rw]] [-T d | u] [-ghHLpPvy]\n"
"\t [[pool ...]|[pool vdev ...]|[vdev ...]]"
" [[-n] interval [count]]\n"));
case HELP_LABELCLEAR:
return (gettext("\tlabelclear [-f] <vdev>\n"));
case HELP_LIST:
return (gettext("\tlist [-gHLpPv] [-o property[,...]] "
"[-T d|u] [pool] ... \n"
"\t [interval [count]]\n"));
case HELP_OFFLINE:
return (gettext("\toffline [-f] [-t] <pool> <device> ...\n"));
case HELP_ONLINE:
return (gettext("\tonline [-e] <pool> <device> ...\n"));
case HELP_REPLACE:
return (gettext("\treplace [-fsw] [-o property=value] "
"<pool> <device> [new-device]\n"));
case HELP_REMOVE:
return (gettext("\tremove [-npsw] <pool> <device> ...\n"));
case HELP_REOPEN:
return (gettext("\treopen [-n] <pool>\n"));
case HELP_INITIALIZE:
return (gettext("\tinitialize [-c | -s] [-w] <pool> "
"[<device> ...]\n"));
case HELP_SCRUB:
return (gettext("\tscrub [-s | -p] [-w] <pool> ...\n"));
case HELP_RESILVER:
return (gettext("\tresilver <pool> ...\n"));
case HELP_TRIM:
return (gettext("\ttrim [-dw] [-r <rate>] [-c | -s] <pool> "
"[<device> ...]\n"));
case HELP_STATUS:
return (gettext("\tstatus [-c [script1,script2,...]] "
"[-igLpPstvxD] [-T d|u] [pool] ... \n"
"\t [interval [count]]\n"));
case HELP_UPGRADE:
return (gettext("\tupgrade\n"
"\tupgrade -v\n"
"\tupgrade [-V version] <-a | pool ...>\n"));
case HELP_EVENTS:
return (gettext("\tevents [-vHf [pool] | -c]\n"));
case HELP_GET:
return (gettext("\tget [-Hp] [-o \"all\" | field[,...]] "
"<\"all\" | property[,...]> <pool> ...\n"));
case HELP_SET:
return (gettext("\tset <property=value> <pool> \n"));
case HELP_SPLIT:
return (gettext("\tsplit [-gLnPl] [-R altroot] [-o mntopts]\n"
"\t [-o property=value] <pool> <newpool> "
"[<device> ...]\n"));
case HELP_REGUID:
return (gettext("\treguid <pool>\n"));
case HELP_SYNC:
return (gettext("\tsync [pool] ...\n"));
case HELP_VERSION:
return (gettext("\tversion\n"));
case HELP_WAIT:
return (gettext("\twait [-Hp] [-T d|u] [-t <activity>[,...]] "
"<pool> [interval]\n"));
+ default:
+ __builtin_unreachable();
}
-
- abort();
- /* NOTREACHED */
}
static void
zpool_collect_leaves(zpool_handle_t *zhp, nvlist_t *nvroot, nvlist_t *res)
{
uint_t children = 0;
nvlist_t **child;
uint_t i;
(void) nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children);
if (children == 0) {
char *path = zpool_vdev_name(g_zfs, zhp, nvroot,
VDEV_NAME_PATH);
if (strcmp(path, VDEV_TYPE_INDIRECT) != 0 &&
strcmp(path, VDEV_TYPE_HOLE) != 0)
fnvlist_add_boolean(res, path);
free(path);
return;
}
for (i = 0; i < children; i++) {
zpool_collect_leaves(zhp, child[i], res);
}
}
/*
* Callback routine that will print out a pool property value.
*/
static int
print_prop_cb(int prop, void *cb)
{
FILE *fp = cb;
(void) fprintf(fp, "\t%-19s ", zpool_prop_to_name(prop));
if (zpool_prop_readonly(prop))
(void) fprintf(fp, " NO ");
else
(void) fprintf(fp, " YES ");
if (zpool_prop_values(prop) == NULL)
(void) fprintf(fp, "-\n");
else
(void) fprintf(fp, "%s\n", zpool_prop_values(prop));
return (ZPROP_CONT);
}
/*
* Display usage message. If we're inside a command, display only the usage for
* that command. Otherwise, iterate over the entire command table and display
* a complete usage message.
*/
static void
usage(boolean_t requested)
{
FILE *fp = requested ? stdout : stderr;
if (current_command == NULL) {
int i;
(void) fprintf(fp, gettext("usage: zpool command args ...\n"));
(void) fprintf(fp,
gettext("where 'command' is one of the following:\n\n"));
for (i = 0; i < NCOMMAND; i++) {
if (command_table[i].name == NULL)
(void) fprintf(fp, "\n");
else
(void) fprintf(fp, "%s",
get_usage(command_table[i].usage));
}
} else {
(void) fprintf(fp, gettext("usage:\n"));
(void) fprintf(fp, "%s", get_usage(current_command->usage));
}
if (current_command != NULL &&
((strcmp(current_command->name, "set") == 0) ||
(strcmp(current_command->name, "get") == 0) ||
(strcmp(current_command->name, "list") == 0))) {
(void) fprintf(fp,
gettext("\nthe following properties are supported:\n"));
(void) fprintf(fp, "\n\t%-19s %s %s\n\n",
"PROPERTY", "EDIT", "VALUES");
/* Iterate over all properties */
(void) zprop_iter(print_prop_cb, fp, B_FALSE, B_TRUE,
ZFS_TYPE_POOL);
(void) fprintf(fp, "\t%-19s ", "feature@...");
(void) fprintf(fp, "YES disabled | enabled | active\n");
(void) fprintf(fp, gettext("\nThe feature@ properties must be "
"appended with a feature name.\nSee zpool-features(7).\n"));
}
/*
* See comments at end of main().
*/
if (getenv("ZFS_ABORT") != NULL) {
(void) printf("dumping core by request\n");
abort();
}
exit(requested ? 0 : 2);
}
/*
* zpool initialize [-c | -s] [-w] <pool> [<vdev> ...]
* Initialize all unused blocks in the specified vdevs, or all vdevs in the pool
* if none specified.
*
* -c Cancel. Ends active initializing.
* -s Suspend. Initializing can then be restarted with no flags.
* -w Wait. Blocks until initializing has completed.
*/
int
zpool_do_initialize(int argc, char **argv)
{
int c;
char *poolname;
zpool_handle_t *zhp;
nvlist_t *vdevs;
int err = 0;
boolean_t wait = B_FALSE;
struct option long_options[] = {
{"cancel", no_argument, NULL, 'c'},
{"suspend", no_argument, NULL, 's'},
{"wait", no_argument, NULL, 'w'},
{0, 0, 0, 0}
};
pool_initialize_func_t cmd_type = POOL_INITIALIZE_START;
while ((c = getopt_long(argc, argv, "csw", long_options, NULL)) != -1) {
switch (c) {
case 'c':
if (cmd_type != POOL_INITIALIZE_START &&
cmd_type != POOL_INITIALIZE_CANCEL) {
(void) fprintf(stderr, gettext("-c cannot be "
"combined with other options\n"));
usage(B_FALSE);
}
cmd_type = POOL_INITIALIZE_CANCEL;
break;
case 's':
if (cmd_type != POOL_INITIALIZE_START &&
cmd_type != POOL_INITIALIZE_SUSPEND) {
(void) fprintf(stderr, gettext("-s cannot be "
"combined with other options\n"));
usage(B_FALSE);
}
cmd_type = POOL_INITIALIZE_SUSPEND;
break;
case 'w':
wait = B_TRUE;
break;
case '?':
if (optopt != 0) {
(void) fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
} else {
(void) fprintf(stderr,
gettext("invalid option '%s'\n"),
argv[optind - 1]);
}
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
return (-1);
}
if (wait && (cmd_type != POOL_INITIALIZE_START)) {
(void) fprintf(stderr, gettext("-w cannot be used with -c or "
"-s\n"));
usage(B_FALSE);
}
poolname = argv[0];
zhp = zpool_open(g_zfs, poolname);
if (zhp == NULL)
return (-1);
vdevs = fnvlist_alloc();
if (argc == 1) {
/* no individual leaf vdevs specified, so add them all */
nvlist_t *config = zpool_get_config(zhp, NULL);
nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
ZPOOL_CONFIG_VDEV_TREE);
zpool_collect_leaves(zhp, nvroot, vdevs);
} else {
for (int i = 1; i < argc; i++) {
fnvlist_add_boolean(vdevs, argv[i]);
}
}
if (wait)
err = zpool_initialize_wait(zhp, cmd_type, vdevs);
else
err = zpool_initialize(zhp, cmd_type, vdevs);
fnvlist_free(vdevs);
zpool_close(zhp);
return (err);
}
/*
* print a pool vdev config for dry runs
*/
static void
print_vdev_tree(zpool_handle_t *zhp, const char *name, nvlist_t *nv, int indent,
const char *match, int name_flags)
{
nvlist_t **child;
uint_t c, children;
char *vname;
boolean_t printed = B_FALSE;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0) {
if (name != NULL)
(void) printf("\t%*s%s\n", indent, "", name);
return;
}
for (c = 0; c < children; c++) {
uint64_t is_log = B_FALSE, is_hole = B_FALSE;
char *class = "";
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
&is_hole);
if (is_hole == B_TRUE) {
continue;
}
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&is_log);
if (is_log)
class = VDEV_ALLOC_BIAS_LOG;
(void) nvlist_lookup_string(child[c],
ZPOOL_CONFIG_ALLOCATION_BIAS, &class);
if (strcmp(match, class) != 0)
continue;
if (!printed && name != NULL) {
(void) printf("\t%*s%s\n", indent, "", name);
printed = B_TRUE;
}
vname = zpool_vdev_name(g_zfs, zhp, child[c], name_flags);
print_vdev_tree(zhp, vname, child[c], indent + 2, "",
name_flags);
free(vname);
}
}
/*
* Print the list of l2cache devices for dry runs.
*/
static void
print_cache_list(nvlist_t *nv, int indent)
{
nvlist_t **child;
uint_t c, children;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
&child, &children) == 0 && children > 0) {
(void) printf("\t%*s%s\n", indent, "", "cache");
} else {
return;
}
for (c = 0; c < children; c++) {
char *vname;
vname = zpool_vdev_name(g_zfs, NULL, child[c], 0);
(void) printf("\t%*s%s\n", indent + 2, "", vname);
free(vname);
}
}
/*
* Print the list of spares for dry runs.
*/
static void
print_spare_list(nvlist_t *nv, int indent)
{
nvlist_t **child;
uint_t c, children;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
&child, &children) == 0 && children > 0) {
(void) printf("\t%*s%s\n", indent, "", "spares");
} else {
return;
}
for (c = 0; c < children; c++) {
char *vname;
vname = zpool_vdev_name(g_zfs, NULL, child[c], 0);
(void) printf("\t%*s%s\n", indent + 2, "", vname);
free(vname);
}
}
static boolean_t
prop_list_contains_feature(nvlist_t *proplist)
{
nvpair_t *nvp;
for (nvp = nvlist_next_nvpair(proplist, NULL); NULL != nvp;
nvp = nvlist_next_nvpair(proplist, nvp)) {
if (zpool_prop_feature(nvpair_name(nvp)))
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Add a property pair (name, string-value) into a property nvlist.
*/
static int
add_prop_list(const char *propname, char *propval, nvlist_t **props,
boolean_t poolprop)
{
zpool_prop_t prop = ZPOOL_PROP_INVAL;
nvlist_t *proplist;
const char *normnm;
char *strval;
if (*props == NULL &&
nvlist_alloc(props, NV_UNIQUE_NAME, 0) != 0) {
(void) fprintf(stderr,
gettext("internal error: out of memory\n"));
return (1);
}
proplist = *props;
if (poolprop) {
const char *vname = zpool_prop_to_name(ZPOOL_PROP_VERSION);
const char *cname =
zpool_prop_to_name(ZPOOL_PROP_COMPATIBILITY);
if ((prop = zpool_name_to_prop(propname)) == ZPOOL_PROP_INVAL &&
!zpool_prop_feature(propname)) {
(void) fprintf(stderr, gettext("property '%s' is "
"not a valid pool property\n"), propname);
return (2);
}
/*
* feature@ properties and version should not be specified
* at the same time.
*/
if ((prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname) &&
nvlist_exists(proplist, vname)) ||
(prop == ZPOOL_PROP_VERSION &&
prop_list_contains_feature(proplist))) {
(void) fprintf(stderr, gettext("'feature@' and "
"'version' properties cannot be specified "
"together\n"));
return (2);
}
/*
* if version is specified, only "legacy" compatibility
* may be requested
*/
if ((prop == ZPOOL_PROP_COMPATIBILITY &&
strcmp(propval, ZPOOL_COMPAT_LEGACY) != 0 &&
nvlist_exists(proplist, vname)) ||
(prop == ZPOOL_PROP_VERSION &&
nvlist_exists(proplist, cname) &&
strcmp(fnvlist_lookup_string(proplist, cname),
ZPOOL_COMPAT_LEGACY) != 0)) {
(void) fprintf(stderr, gettext("when 'version' is "
"specified, the 'compatibility' feature may only "
"be set to '" ZPOOL_COMPAT_LEGACY "'\n"));
return (2);
}
if (zpool_prop_feature(propname))
normnm = propname;
else
normnm = zpool_prop_to_name(prop);
} else {
zfs_prop_t fsprop = zfs_name_to_prop(propname);
if (zfs_prop_valid_for_type(fsprop, ZFS_TYPE_FILESYSTEM,
B_FALSE)) {
normnm = zfs_prop_to_name(fsprop);
} else if (zfs_prop_user(propname) ||
zfs_prop_userquota(propname)) {
normnm = propname;
} else {
(void) fprintf(stderr, gettext("property '%s' is "
"not a valid filesystem property\n"), propname);
return (2);
}
}
if (nvlist_lookup_string(proplist, normnm, &strval) == 0 &&
prop != ZPOOL_PROP_CACHEFILE) {
(void) fprintf(stderr, gettext("property '%s' "
"specified multiple times\n"), propname);
return (2);
}
if (nvlist_add_string(proplist, normnm, propval) != 0) {
(void) fprintf(stderr, gettext("internal "
"error: out of memory\n"));
return (1);
}
return (0);
}
/*
* Set a default property pair (name, string-value) in a property nvlist
*/
static int
add_prop_list_default(const char *propname, char *propval, nvlist_t **props,
boolean_t poolprop)
{
char *pval;
if (nvlist_lookup_string(*props, propname, &pval) == 0)
return (0);
return (add_prop_list(propname, propval, props, B_TRUE));
}
/*
* zpool add [-fgLnP] [-o property=value] <pool> <vdev> ...
*
* -f Force addition of devices, even if they appear in use
* -g Display guid for individual vdev name.
* -L Follow links when resolving vdev path name.
* -n Do not add the devices, but display the resulting layout if
* they were to be added.
* -o Set property=value.
* -P Display full path for vdev name.
*
* Adds the given vdevs to 'pool'. As with create, the bulk of this work is
* handled by make_root_vdev(), which constructs the nvlist needed to pass to
* libzfs.
*/
int
zpool_do_add(int argc, char **argv)
{
boolean_t force = B_FALSE;
boolean_t dryrun = B_FALSE;
int name_flags = 0;
int c;
nvlist_t *nvroot;
char *poolname;
int ret;
zpool_handle_t *zhp;
nvlist_t *config;
nvlist_t *props = NULL;
char *propval;
/* check options */
while ((c = getopt(argc, argv, "fgLno:P")) != -1) {
switch (c) {
case 'f':
force = B_TRUE;
break;
case 'g':
name_flags |= VDEV_NAME_GUID;
break;
case 'L':
name_flags |= VDEV_NAME_FOLLOW_LINKS;
break;
case 'n':
dryrun = B_TRUE;
break;
case 'o':
if ((propval = strchr(optarg, '=')) == NULL) {
(void) fprintf(stderr, gettext("missing "
"'=' for -o option\n"));
usage(B_FALSE);
}
*propval = '\0';
propval++;
if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) ||
(add_prop_list(optarg, propval, &props, B_TRUE)))
usage(B_FALSE);
break;
case 'P':
name_flags |= VDEV_NAME_PATH;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing vdev specification\n"));
usage(B_FALSE);
}
poolname = argv[0];
argc--;
argv++;
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
if ((config = zpool_get_config(zhp, NULL)) == NULL) {
(void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),
poolname);
zpool_close(zhp);
return (1);
}
/* unless manually specified use "ashift" pool property (if set) */
if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {
int intval;
zprop_source_t src;
char strval[ZPOOL_MAXPROPLEN];
intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);
if (src != ZPROP_SRC_DEFAULT) {
(void) sprintf(strval, "%" PRId32, intval);
verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,
&props, B_TRUE) == 0);
}
}
/* pass off to make_root_vdev for processing */
nvroot = make_root_vdev(zhp, props, force, !force, B_FALSE, dryrun,
argc, argv);
if (nvroot == NULL) {
zpool_close(zhp);
return (1);
}
if (dryrun) {
nvlist_t *poolnvroot;
nvlist_t **l2child, **sparechild;
uint_t l2children, sparechildren, c;
char *vname;
boolean_t hadcache = B_FALSE, hadspare = B_FALSE;
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&poolnvroot) == 0);
(void) printf(gettext("would update '%s' to the following "
"configuration:\n\n"), zpool_get_name(zhp));
/* print original main pool and new tree */
print_vdev_tree(zhp, poolname, poolnvroot, 0, "",
name_flags | VDEV_NAME_TYPE_ID);
print_vdev_tree(zhp, NULL, nvroot, 0, "", name_flags);
/* print other classes: 'dedup', 'special', and 'log' */
if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_DEDUP)) {
print_vdev_tree(zhp, "dedup", poolnvroot, 0,
VDEV_ALLOC_BIAS_DEDUP, name_flags);
print_vdev_tree(zhp, NULL, nvroot, 0,
VDEV_ALLOC_BIAS_DEDUP, name_flags);
} else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_DEDUP)) {
print_vdev_tree(zhp, "dedup", nvroot, 0,
VDEV_ALLOC_BIAS_DEDUP, name_flags);
}
if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_SPECIAL)) {
print_vdev_tree(zhp, "special", poolnvroot, 0,
VDEV_ALLOC_BIAS_SPECIAL, name_flags);
print_vdev_tree(zhp, NULL, nvroot, 0,
VDEV_ALLOC_BIAS_SPECIAL, name_flags);
} else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_SPECIAL)) {
print_vdev_tree(zhp, "special", nvroot, 0,
VDEV_ALLOC_BIAS_SPECIAL, name_flags);
}
if (num_logs(poolnvroot) > 0) {
print_vdev_tree(zhp, "logs", poolnvroot, 0,
VDEV_ALLOC_BIAS_LOG, name_flags);
print_vdev_tree(zhp, NULL, nvroot, 0,
VDEV_ALLOC_BIAS_LOG, name_flags);
} else if (num_logs(nvroot) > 0) {
print_vdev_tree(zhp, "logs", nvroot, 0,
VDEV_ALLOC_BIAS_LOG, name_flags);
}
/* Do the same for the caches */
if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_L2CACHE,
&l2child, &l2children) == 0 && l2children) {
hadcache = B_TRUE;
(void) printf(gettext("\tcache\n"));
for (c = 0; c < l2children; c++) {
vname = zpool_vdev_name(g_zfs, NULL,
l2child[c], name_flags);
(void) printf("\t %s\n", vname);
free(vname);
}
}
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2child, &l2children) == 0 && l2children) {
if (!hadcache)
(void) printf(gettext("\tcache\n"));
for (c = 0; c < l2children; c++) {
vname = zpool_vdev_name(g_zfs, NULL,
l2child[c], name_flags);
(void) printf("\t %s\n", vname);
free(vname);
}
}
/* And finally the spares */
if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_SPARES,
&sparechild, &sparechildren) == 0 && sparechildren > 0) {
hadspare = B_TRUE;
(void) printf(gettext("\tspares\n"));
for (c = 0; c < sparechildren; c++) {
vname = zpool_vdev_name(g_zfs, NULL,
sparechild[c], name_flags);
(void) printf("\t %s\n", vname);
free(vname);
}
}
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&sparechild, &sparechildren) == 0 && sparechildren > 0) {
if (!hadspare)
(void) printf(gettext("\tspares\n"));
for (c = 0; c < sparechildren; c++) {
vname = zpool_vdev_name(g_zfs, NULL,
sparechild[c], name_flags);
(void) printf("\t %s\n", vname);
free(vname);
}
}
ret = 0;
} else {
ret = (zpool_add(zhp, nvroot) != 0);
}
nvlist_free(props);
nvlist_free(nvroot);
zpool_close(zhp);
return (ret);
}
/*
* zpool remove [-npsw] <pool> <vdev> ...
*
* Removes the given vdev from the pool.
*/
int
zpool_do_remove(int argc, char **argv)
{
char *poolname;
int i, ret = 0;
zpool_handle_t *zhp = NULL;
boolean_t stop = B_FALSE;
int c;
boolean_t noop = B_FALSE;
boolean_t parsable = B_FALSE;
boolean_t wait = B_FALSE;
/* check options */
while ((c = getopt(argc, argv, "npsw")) != -1) {
switch (c) {
case 'n':
noop = B_TRUE;
break;
case 'p':
parsable = B_TRUE;
break;
case 's':
stop = B_TRUE;
break;
case 'w':
wait = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
}
poolname = argv[0];
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
if (stop && noop) {
(void) fprintf(stderr, gettext("stop request ignored\n"));
return (0);
}
if (stop) {
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if (zpool_vdev_remove_cancel(zhp) != 0)
ret = 1;
if (wait) {
(void) fprintf(stderr, gettext("invalid option "
"combination: -w cannot be used with -s\n"));
usage(B_FALSE);
}
} else {
if (argc < 2) {
(void) fprintf(stderr, gettext("missing device\n"));
usage(B_FALSE);
}
for (i = 1; i < argc; i++) {
if (noop) {
uint64_t size;
if (zpool_vdev_indirect_size(zhp, argv[i],
&size) != 0) {
ret = 1;
break;
}
if (parsable) {
(void) printf("%s %llu\n",
argv[i], (unsigned long long)size);
} else {
char valstr[32];
zfs_nicenum(size, valstr,
sizeof (valstr));
(void) printf("Memory that will be "
"used after removing %s: %s\n",
argv[i], valstr);
}
} else {
if (zpool_vdev_remove(zhp, argv[i]) != 0)
ret = 1;
}
}
if (ret == 0 && wait)
ret = zpool_wait(zhp, ZPOOL_WAIT_REMOVE);
}
zpool_close(zhp);
return (ret);
}
/*
* zpool labelclear [-f] <vdev>
*
* -f Force clearing the label for the vdevs which are members of
* the exported or foreign pools.
*
* Verifies that the vdev is not active and zeros out the label information
* on the device.
*/
int
zpool_do_labelclear(int argc, char **argv)
{
char vdev[MAXPATHLEN];
char *name = NULL;
struct stat st;
int c, fd = -1, ret = 0;
nvlist_t *config;
pool_state_t state;
boolean_t inuse = B_FALSE;
boolean_t force = B_FALSE;
/* check options */
while ((c = getopt(argc, argv, "f")) != -1) {
switch (c) {
case 'f':
force = B_TRUE;
break;
default:
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get vdev name */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing vdev name\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
/*
* Check if we were given absolute path and use it as is.
* Otherwise if the provided vdev name doesn't point to a file,
* try prepending expected disk paths and partition numbers.
*/
(void) strlcpy(vdev, argv[0], sizeof (vdev));
if (vdev[0] != '/' && stat(vdev, &st) != 0) {
int error;
error = zfs_resolve_shortname(argv[0], vdev, MAXPATHLEN);
if (error == 0 && zfs_dev_is_whole_disk(vdev)) {
if (zfs_append_partition(vdev, MAXPATHLEN) == -1)
error = ENOENT;
}
if (error || (stat(vdev, &st) != 0)) {
(void) fprintf(stderr, gettext(
"failed to find device %s, try specifying absolute "
"path instead\n"), argv[0]);
return (1);
}
}
if ((fd = open(vdev, O_RDWR)) < 0) {
(void) fprintf(stderr, gettext("failed to open %s: %s\n"),
vdev, strerror(errno));
return (1);
}
/*
* Flush all dirty pages for the block device. This should not be
* fatal when the device does not support BLKFLSBUF as would be the
* case for a file vdev.
*/
if ((zfs_dev_flush(fd) != 0) && (errno != ENOTTY))
(void) fprintf(stderr, gettext("failed to invalidate "
"cache for %s: %s\n"), vdev, strerror(errno));
if (zpool_read_label(fd, &config, NULL) != 0) {
(void) fprintf(stderr,
gettext("failed to read label from %s\n"), vdev);
ret = 1;
goto errout;
}
nvlist_free(config);
ret = zpool_in_use(g_zfs, fd, &state, &name, &inuse);
if (ret != 0) {
(void) fprintf(stderr,
gettext("failed to check state for %s\n"), vdev);
ret = 1;
goto errout;
}
if (!inuse)
goto wipe_label;
switch (state) {
default:
case POOL_STATE_ACTIVE:
case POOL_STATE_SPARE:
case POOL_STATE_L2CACHE:
(void) fprintf(stderr, gettext(
"%s is a member (%s) of pool \"%s\"\n"),
vdev, zpool_pool_state_to_name(state), name);
ret = 1;
goto errout;
case POOL_STATE_EXPORTED:
if (force)
break;
(void) fprintf(stderr, gettext(
"use '-f' to override the following error:\n"
"%s is a member of exported pool \"%s\"\n"),
vdev, name);
ret = 1;
goto errout;
case POOL_STATE_POTENTIALLY_ACTIVE:
if (force)
break;
(void) fprintf(stderr, gettext(
"use '-f' to override the following error:\n"
"%s is a member of potentially active pool \"%s\"\n"),
vdev, name);
ret = 1;
goto errout;
case POOL_STATE_DESTROYED:
/* inuse should never be set for a destroyed pool */
assert(0);
break;
}
wipe_label:
ret = zpool_clear_label(fd);
if (ret != 0) {
(void) fprintf(stderr,
gettext("failed to clear label for %s\n"), vdev);
}
errout:
free(name);
(void) close(fd);
return (ret);
}
/*
* zpool create [-fnd] [-o property=value] ...
* [-O file-system-property=value] ...
* [-R root] [-m mountpoint] <pool> <dev> ...
*
* -f Force creation, even if devices appear in use
* -n Do not create the pool, but display the resulting layout if it
* were to be created.
* -R Create a pool under an alternate root
* -m Set default mountpoint for the root dataset. By default it's
* '/<pool>'
* -o Set property=value.
* -o Set feature@feature=enabled|disabled.
* -d Don't automatically enable all supported pool features
* (individual features can be enabled with -o).
* -O Set fsproperty=value in the pool's root file system
*
* Creates the named pool according to the given vdev specification. The
* bulk of the vdev processing is done in make_root_vdev() in zpool_vdev.c.
* Once we get the nvlist back from make_root_vdev(), we either print out the
* contents (if '-n' was specified), or pass it to libzfs to do the creation.
*/
int
zpool_do_create(int argc, char **argv)
{
boolean_t force = B_FALSE;
boolean_t dryrun = B_FALSE;
boolean_t enable_pool_features = B_TRUE;
int c;
nvlist_t *nvroot = NULL;
char *poolname;
char *tname = NULL;
int ret = 1;
char *altroot = NULL;
char *compat = NULL;
char *mountpoint = NULL;
nvlist_t *fsprops = NULL;
nvlist_t *props = NULL;
char *propval;
/* check options */
while ((c = getopt(argc, argv, ":fndR:m:o:O:t:")) != -1) {
switch (c) {
case 'f':
force = B_TRUE;
break;
case 'n':
dryrun = B_TRUE;
break;
case 'd':
enable_pool_features = B_FALSE;
break;
case 'R':
altroot = optarg;
if (add_prop_list(zpool_prop_to_name(
ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))
goto errout;
if (add_prop_list_default(zpool_prop_to_name(
ZPOOL_PROP_CACHEFILE), "none", &props, B_TRUE))
goto errout;
break;
case 'm':
/* Equivalent to -O mountpoint=optarg */
mountpoint = optarg;
break;
case 'o':
if ((propval = strchr(optarg, '=')) == NULL) {
(void) fprintf(stderr, gettext("missing "
"'=' for -o option\n"));
goto errout;
}
*propval = '\0';
propval++;
if (add_prop_list(optarg, propval, &props, B_TRUE))
goto errout;
/*
* If the user is creating a pool that doesn't support
* feature flags, don't enable any features.
*/
if (zpool_name_to_prop(optarg) == ZPOOL_PROP_VERSION) {
char *end;
u_longlong_t ver;
ver = strtoull(propval, &end, 10);
if (*end == '\0' &&
ver < SPA_VERSION_FEATURES) {
enable_pool_features = B_FALSE;
}
}
if (zpool_name_to_prop(optarg) == ZPOOL_PROP_ALTROOT)
altroot = propval;
if (zpool_name_to_prop(optarg) ==
ZPOOL_PROP_COMPATIBILITY)
compat = propval;
break;
case 'O':
if ((propval = strchr(optarg, '=')) == NULL) {
(void) fprintf(stderr, gettext("missing "
"'=' for -O option\n"));
goto errout;
}
*propval = '\0';
propval++;
/*
* Mountpoints are checked and then added later.
* Uniquely among properties, they can be specified
* more than once, to avoid conflict with -m.
*/
if (0 == strcmp(optarg,
zfs_prop_to_name(ZFS_PROP_MOUNTPOINT))) {
mountpoint = propval;
} else if (add_prop_list(optarg, propval, &fsprops,
B_FALSE)) {
goto errout;
}
break;
case 't':
/*
* Sanity check temporary pool name.
*/
if (strchr(optarg, '/') != NULL) {
(void) fprintf(stderr, gettext("cannot create "
"'%s': invalid character '/' in temporary "
"name\n"), optarg);
(void) fprintf(stderr, gettext("use 'zfs "
"create' to create a dataset\n"));
goto errout;
}
if (add_prop_list(zpool_prop_to_name(
ZPOOL_PROP_TNAME), optarg, &props, B_TRUE))
goto errout;
if (add_prop_list_default(zpool_prop_to_name(
ZPOOL_PROP_CACHEFILE), "none", &props, B_TRUE))
goto errout;
tname = optarg;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
goto badusage;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
goto badusage;
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
goto badusage;
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing vdev specification\n"));
goto badusage;
}
poolname = argv[0];
/*
* As a special case, check for use of '/' in the name, and direct the
* user to use 'zfs create' instead.
*/
if (strchr(poolname, '/') != NULL) {
(void) fprintf(stderr, gettext("cannot create '%s': invalid "
"character '/' in pool name\n"), poolname);
(void) fprintf(stderr, gettext("use 'zfs create' to "
"create a dataset\n"));
goto errout;
}
/* pass off to make_root_vdev for bulk processing */
nvroot = make_root_vdev(NULL, props, force, !force, B_FALSE, dryrun,
argc - 1, argv + 1);
if (nvroot == NULL)
goto errout;
/* make_root_vdev() allows 0 toplevel children if there are spares */
if (!zfs_allocatable_devs(nvroot)) {
(void) fprintf(stderr, gettext("invalid vdev "
"specification: at least one toplevel vdev must be "
"specified\n"));
goto errout;
}
if (altroot != NULL && altroot[0] != '/') {
(void) fprintf(stderr, gettext("invalid alternate root '%s': "
"must be an absolute path\n"), altroot);
goto errout;
}
/*
* Check the validity of the mountpoint and direct the user to use the
* '-m' mountpoint option if it looks like its in use.
*/
if (mountpoint == NULL ||
(strcmp(mountpoint, ZFS_MOUNTPOINT_LEGACY) != 0 &&
strcmp(mountpoint, ZFS_MOUNTPOINT_NONE) != 0)) {
char buf[MAXPATHLEN];
DIR *dirp;
if (mountpoint && mountpoint[0] != '/') {
(void) fprintf(stderr, gettext("invalid mountpoint "
"'%s': must be an absolute path, 'legacy', or "
"'none'\n"), mountpoint);
goto errout;
}
if (mountpoint == NULL) {
if (altroot != NULL)
(void) snprintf(buf, sizeof (buf), "%s/%s",
altroot, poolname);
else
(void) snprintf(buf, sizeof (buf), "/%s",
poolname);
} else {
if (altroot != NULL)
(void) snprintf(buf, sizeof (buf), "%s%s",
altroot, mountpoint);
else
(void) snprintf(buf, sizeof (buf), "%s",
mountpoint);
}
if ((dirp = opendir(buf)) == NULL && errno != ENOENT) {
(void) fprintf(stderr, gettext("mountpoint '%s' : "
"%s\n"), buf, strerror(errno));
(void) fprintf(stderr, gettext("use '-m' "
"option to provide a different default\n"));
goto errout;
} else if (dirp) {
int count = 0;
while (count < 3 && readdir(dirp) != NULL)
count++;
(void) closedir(dirp);
if (count > 2) {
(void) fprintf(stderr, gettext("mountpoint "
"'%s' exists and is not empty\n"), buf);
(void) fprintf(stderr, gettext("use '-m' "
"option to provide a "
"different default\n"));
goto errout;
}
}
}
/*
* Now that the mountpoint's validity has been checked, ensure that
* the property is set appropriately prior to creating the pool.
*/
if (mountpoint != NULL) {
ret = add_prop_list(zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
mountpoint, &fsprops, B_FALSE);
if (ret != 0)
goto errout;
}
ret = 1;
if (dryrun) {
/*
* For a dry run invocation, print out a basic message and run
* through all the vdevs in the list and print out in an
* appropriate hierarchy.
*/
(void) printf(gettext("would create '%s' with the "
"following layout:\n\n"), poolname);
print_vdev_tree(NULL, poolname, nvroot, 0, "", 0);
print_vdev_tree(NULL, "dedup", nvroot, 0,
VDEV_ALLOC_BIAS_DEDUP, 0);
print_vdev_tree(NULL, "special", nvroot, 0,
VDEV_ALLOC_BIAS_SPECIAL, 0);
print_vdev_tree(NULL, "logs", nvroot, 0,
VDEV_ALLOC_BIAS_LOG, 0);
print_cache_list(nvroot, 0);
print_spare_list(nvroot, 0);
ret = 0;
} else {
/*
* Load in feature set.
* Note: if compatibility property not given, we'll have
* NULL, which means 'all features'.
*/
boolean_t requested_features[SPA_FEATURES];
if (zpool_do_load_compat(compat, requested_features) !=
ZPOOL_COMPATIBILITY_OK)
goto errout;
/*
* props contains list of features to enable.
* For each feature:
* - remove it if feature@name=disabled
* - leave it there if feature@name=enabled
* - add it if:
* - enable_pool_features (ie: no '-d' or '-o version')
* - it's supported by the kernel module
* - it's in the requested feature set
* - warn if it's enabled but not in compat
*/
for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
char propname[MAXPATHLEN];
char *propval;
zfeature_info_t *feat = &spa_feature_table[i];
(void) snprintf(propname, sizeof (propname),
"feature@%s", feat->fi_uname);
if (!nvlist_lookup_string(props, propname, &propval)) {
if (strcmp(propval, ZFS_FEATURE_DISABLED) == 0)
(void) nvlist_remove_all(props,
propname);
if (strcmp(propval,
ZFS_FEATURE_ENABLED) == 0 &&
!requested_features[i])
(void) fprintf(stderr, gettext(
"Warning: feature \"%s\" enabled "
"but is not in specified "
"'compatibility' feature set.\n"),
feat->fi_uname);
} else if (
enable_pool_features &&
feat->fi_zfs_mod_supported &&
requested_features[i]) {
ret = add_prop_list(propname,
ZFS_FEATURE_ENABLED, &props, B_TRUE);
if (ret != 0)
goto errout;
}
}
ret = 1;
if (zpool_create(g_zfs, poolname,
nvroot, props, fsprops) == 0) {
zfs_handle_t *pool = zfs_open(g_zfs,
tname ? tname : poolname, ZFS_TYPE_FILESYSTEM);
if (pool != NULL) {
if (zfs_mount(pool, NULL, 0) == 0) {
ret = zfs_shareall(pool);
zfs_commit_all_shares();
}
zfs_close(pool);
}
} else if (libzfs_errno(g_zfs) == EZFS_INVALIDNAME) {
(void) fprintf(stderr, gettext("pool name may have "
"been omitted\n"));
}
}
errout:
nvlist_free(nvroot);
nvlist_free(fsprops);
nvlist_free(props);
return (ret);
badusage:
nvlist_free(fsprops);
nvlist_free(props);
usage(B_FALSE);
return (2);
}
/*
* zpool destroy <pool>
*
* -f Forcefully unmount any datasets
*
* Destroy the given pool. Automatically unmounts any datasets in the pool.
*/
int
zpool_do_destroy(int argc, char **argv)
{
boolean_t force = B_FALSE;
int c;
char *pool;
zpool_handle_t *zhp;
int ret;
/* check options */
while ((c = getopt(argc, argv, "f")) != -1) {
switch (c) {
case 'f':
force = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
pool = argv[0];
if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {
/*
* As a special case, check for use of '/' in the name, and
* direct the user to use 'zfs destroy' instead.
*/
if (strchr(pool, '/') != NULL)
(void) fprintf(stderr, gettext("use 'zfs destroy' to "
"destroy a dataset\n"));
return (1);
}
if (zpool_disable_datasets(zhp, force) != 0) {
(void) fprintf(stderr, gettext("could not destroy '%s': "
"could not unmount datasets\n"), zpool_get_name(zhp));
zpool_close(zhp);
return (1);
}
/* The history must be logged as part of the export */
log_history = B_FALSE;
ret = (zpool_destroy(zhp, history_str) != 0);
zpool_close(zhp);
return (ret);
}
typedef struct export_cbdata {
boolean_t force;
boolean_t hardforce;
} export_cbdata_t;
/*
* Export one pool
*/
static int
zpool_export_one(zpool_handle_t *zhp, void *data)
{
export_cbdata_t *cb = data;
if (zpool_disable_datasets(zhp, cb->force) != 0)
return (1);
/* The history must be logged as part of the export */
log_history = B_FALSE;
if (cb->hardforce) {
if (zpool_export_force(zhp, history_str) != 0)
return (1);
} else if (zpool_export(zhp, cb->force, history_str) != 0) {
return (1);
}
return (0);
}
/*
* zpool export [-f] <pool> ...
*
* -a Export all pools
* -f Forcefully unmount datasets
*
* Export the given pools. By default, the command will attempt to cleanly
* unmount any active datasets within the pool. If the '-f' flag is specified,
* then the datasets will be forcefully unmounted.
*/
int
zpool_do_export(int argc, char **argv)
{
export_cbdata_t cb;
boolean_t do_all = B_FALSE;
boolean_t force = B_FALSE;
boolean_t hardforce = B_FALSE;
int c, ret;
/* check options */
while ((c = getopt(argc, argv, "afF")) != -1) {
switch (c) {
case 'a':
do_all = B_TRUE;
break;
case 'f':
force = B_TRUE;
break;
case 'F':
hardforce = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
cb.force = force;
cb.hardforce = hardforce;
argc -= optind;
argv += optind;
if (do_all) {
if (argc != 0) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
return (for_each_pool(argc, argv, B_TRUE, NULL,
B_FALSE, zpool_export_one, &cb));
}
/* check arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool argument\n"));
usage(B_FALSE);
}
ret = for_each_pool(argc, argv, B_TRUE, NULL, B_FALSE, zpool_export_one,
&cb);
return (ret);
}
/*
* Given a vdev configuration, determine the maximum width needed for the device
* name column.
*/
static int
max_width(zpool_handle_t *zhp, nvlist_t *nv, int depth, int max,
int name_flags)
{
char *name;
nvlist_t **child;
uint_t c, children;
int ret;
name = zpool_vdev_name(g_zfs, zhp, nv, name_flags);
if (strlen(name) + depth > max)
max = strlen(name) + depth;
free(name);
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
&child, &children) == 0) {
for (c = 0; c < children; c++)
if ((ret = max_width(zhp, child[c], depth + 2,
max, name_flags)) > max)
max = ret;
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
&child, &children) == 0) {
for (c = 0; c < children; c++)
if ((ret = max_width(zhp, child[c], depth + 2,
max, name_flags)) > max)
max = ret;
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0) {
for (c = 0; c < children; c++)
if ((ret = max_width(zhp, child[c], depth + 2,
max, name_flags)) > max)
max = ret;
}
return (max);
}
typedef struct spare_cbdata {
uint64_t cb_guid;
zpool_handle_t *cb_zhp;
} spare_cbdata_t;
static boolean_t
find_vdev(nvlist_t *nv, uint64_t search)
{
uint64_t guid;
nvlist_t **child;
uint_t c, children;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 &&
search == guid)
return (B_TRUE);
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0) {
for (c = 0; c < children; c++)
if (find_vdev(child[c], search))
return (B_TRUE);
}
return (B_FALSE);
}
static int
find_spare(zpool_handle_t *zhp, void *data)
{
spare_cbdata_t *cbp = data;
nvlist_t *config, *nvroot;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
if (find_vdev(nvroot, cbp->cb_guid)) {
cbp->cb_zhp = zhp;
return (1);
}
zpool_close(zhp);
return (0);
}
typedef struct status_cbdata {
int cb_count;
int cb_name_flags;
int cb_namewidth;
boolean_t cb_allpools;
boolean_t cb_verbose;
boolean_t cb_literal;
boolean_t cb_explain;
boolean_t cb_first;
boolean_t cb_dedup_stats;
boolean_t cb_print_status;
boolean_t cb_print_slow_ios;
boolean_t cb_print_vdev_init;
boolean_t cb_print_vdev_trim;
vdev_cmd_data_list_t *vcdl;
} status_cbdata_t;
/* Return 1 if string is NULL, empty, or whitespace; return 0 otherwise. */
static int
is_blank_str(char *str)
{
while (str != NULL && *str != '\0') {
if (!isblank(*str))
return (0);
str++;
}
return (1);
}
/* Print command output lines for specific vdev in a specific pool */
static void
zpool_print_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, char *path)
{
vdev_cmd_data_t *data;
int i, j;
char *val;
for (i = 0; i < vcdl->count; i++) {
if ((strcmp(vcdl->data[i].path, path) != 0) ||
(strcmp(vcdl->data[i].pool, pool) != 0)) {
/* Not the vdev we're looking for */
continue;
}
data = &vcdl->data[i];
/* Print out all the output values for this vdev */
for (j = 0; j < vcdl->uniq_cols_cnt; j++) {
val = NULL;
/* Does this vdev have values for this column? */
for (int k = 0; k < data->cols_cnt; k++) {
if (strcmp(data->cols[k],
vcdl->uniq_cols[j]) == 0) {
/* yes it does, record the value */
val = data->lines[k];
break;
}
}
/*
* Mark empty values with dashes to make output
* awk-able.
*/
if (val == NULL || is_blank_str(val))
val = "-";
printf("%*s", vcdl->uniq_cols_width[j], val);
if (j < vcdl->uniq_cols_cnt - 1)
printf(" ");
}
/* Print out any values that aren't in a column at the end */
for (j = data->cols_cnt; j < data->lines_cnt; j++) {
/* Did we have any columns? If so print a spacer. */
if (vcdl->uniq_cols_cnt > 0)
printf(" ");
val = data->lines[j];
printf("%s", val ? val : "");
}
break;
}
}
/*
* Print vdev initialization status for leaves
*/
static void
print_status_initialize(vdev_stat_t *vs, boolean_t verbose)
{
if (verbose) {
if ((vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE ||
vs->vs_initialize_state == VDEV_INITIALIZE_SUSPENDED ||
vs->vs_initialize_state == VDEV_INITIALIZE_COMPLETE) &&
!vs->vs_scan_removing) {
char zbuf[1024];
char tbuf[256];
struct tm zaction_ts;
time_t t = vs->vs_initialize_action_time;
int initialize_pct = 100;
if (vs->vs_initialize_state !=
VDEV_INITIALIZE_COMPLETE) {
initialize_pct = (vs->vs_initialize_bytes_done *
100 / (vs->vs_initialize_bytes_est + 1));
}
(void) localtime_r(&t, &zaction_ts);
(void) strftime(tbuf, sizeof (tbuf), "%c", &zaction_ts);
switch (vs->vs_initialize_state) {
case VDEV_INITIALIZE_SUSPENDED:
(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
gettext("suspended, started at"), tbuf);
break;
case VDEV_INITIALIZE_ACTIVE:
(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
gettext("started at"), tbuf);
break;
case VDEV_INITIALIZE_COMPLETE:
(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
gettext("completed at"), tbuf);
break;
}
(void) printf(gettext(" (%d%% initialized%s)"),
initialize_pct, zbuf);
} else {
(void) printf(gettext(" (uninitialized)"));
}
} else if (vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE) {
(void) printf(gettext(" (initializing)"));
}
}
/*
* Print vdev TRIM status for leaves
*/
static void
print_status_trim(vdev_stat_t *vs, boolean_t verbose)
{
if (verbose) {
if ((vs->vs_trim_state == VDEV_TRIM_ACTIVE ||
vs->vs_trim_state == VDEV_TRIM_SUSPENDED ||
vs->vs_trim_state == VDEV_TRIM_COMPLETE) &&
!vs->vs_scan_removing) {
char zbuf[1024];
char tbuf[256];
struct tm zaction_ts;
time_t t = vs->vs_trim_action_time;
int trim_pct = 100;
if (vs->vs_trim_state != VDEV_TRIM_COMPLETE) {
trim_pct = (vs->vs_trim_bytes_done *
100 / (vs->vs_trim_bytes_est + 1));
}
(void) localtime_r(&t, &zaction_ts);
(void) strftime(tbuf, sizeof (tbuf), "%c", &zaction_ts);
switch (vs->vs_trim_state) {
case VDEV_TRIM_SUSPENDED:
(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
gettext("suspended, started at"), tbuf);
break;
case VDEV_TRIM_ACTIVE:
(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
gettext("started at"), tbuf);
break;
case VDEV_TRIM_COMPLETE:
(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
gettext("completed at"), tbuf);
break;
}
(void) printf(gettext(" (%d%% trimmed%s)"),
trim_pct, zbuf);
} else if (vs->vs_trim_notsup) {
(void) printf(gettext(" (trim unsupported)"));
} else {
(void) printf(gettext(" (untrimmed)"));
}
} else if (vs->vs_trim_state == VDEV_TRIM_ACTIVE) {
(void) printf(gettext(" (trimming)"));
}
}
/*
* Return the color associated with a health string. This includes returning
* NULL for no color change.
*/
static char *
health_str_to_color(const char *health)
{
if (strcmp(health, gettext("FAULTED")) == 0 ||
strcmp(health, gettext("SUSPENDED")) == 0 ||
strcmp(health, gettext("UNAVAIL")) == 0) {
return (ANSI_RED);
}
if (strcmp(health, gettext("OFFLINE")) == 0 ||
strcmp(health, gettext("DEGRADED")) == 0 ||
strcmp(health, gettext("REMOVED")) == 0) {
return (ANSI_YELLOW);
}
return (NULL);
}
/*
* Print out configuration state as requested by status_callback.
*/
static void
print_status_config(zpool_handle_t *zhp, status_cbdata_t *cb, const char *name,
nvlist_t *nv, int depth, boolean_t isspare, vdev_rebuild_stat_t *vrs)
{
nvlist_t **child, *root;
uint_t c, i, vsc, children;
pool_scan_stat_t *ps = NULL;
vdev_stat_t *vs;
char rbuf[6], wbuf[6], cbuf[6];
char *vname;
uint64_t notpresent;
spare_cbdata_t spare_cb;
const char *state;
char *type;
char *path = NULL;
char *rcolor = NULL, *wcolor = NULL, *ccolor = NULL;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
children = 0;
verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &vsc) == 0);
verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
if (strcmp(type, VDEV_TYPE_INDIRECT) == 0)
return;
state = zpool_state_to_name(vs->vs_state, vs->vs_aux);
if (isspare) {
/*
* For hot spares, we use the terms 'INUSE' and 'AVAILABLE' for
* online drives.
*/
if (vs->vs_aux == VDEV_AUX_SPARED)
state = gettext("INUSE");
else if (vs->vs_state == VDEV_STATE_HEALTHY)
state = gettext("AVAIL");
}
printf_color(health_str_to_color(state),
"\t%*s%-*s %-8s", depth, "", cb->cb_namewidth - depth,
name, state);
if (!isspare) {
if (vs->vs_read_errors)
rcolor = ANSI_RED;
if (vs->vs_write_errors)
wcolor = ANSI_RED;
if (vs->vs_checksum_errors)
ccolor = ANSI_RED;
if (cb->cb_literal) {
printf(" ");
printf_color(rcolor, "%5llu",
(u_longlong_t)vs->vs_read_errors);
printf(" ");
printf_color(wcolor, "%5llu",
(u_longlong_t)vs->vs_write_errors);
printf(" ");
printf_color(ccolor, "%5llu",
(u_longlong_t)vs->vs_checksum_errors);
} else {
zfs_nicenum(vs->vs_read_errors, rbuf, sizeof (rbuf));
zfs_nicenum(vs->vs_write_errors, wbuf, sizeof (wbuf));
zfs_nicenum(vs->vs_checksum_errors, cbuf,
sizeof (cbuf));
printf(" ");
printf_color(rcolor, "%5s", rbuf);
printf(" ");
printf_color(wcolor, "%5s", wbuf);
printf(" ");
printf_color(ccolor, "%5s", cbuf);
}
if (cb->cb_print_slow_ios) {
if (children == 0) {
/* Only leafs vdevs have slow IOs */
zfs_nicenum(vs->vs_slow_ios, rbuf,
sizeof (rbuf));
} else {
snprintf(rbuf, sizeof (rbuf), "-");
}
if (cb->cb_literal)
printf(" %5llu", (u_longlong_t)vs->vs_slow_ios);
else
printf(" %5s", rbuf);
}
}
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
&notpresent) == 0) {
verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0);
(void) printf(" %s %s", gettext("was"), path);
} else if (vs->vs_aux != 0) {
(void) printf(" ");
color_start(ANSI_RED);
switch (vs->vs_aux) {
case VDEV_AUX_OPEN_FAILED:
(void) printf(gettext("cannot open"));
break;
case VDEV_AUX_BAD_GUID_SUM:
(void) printf(gettext("missing device"));
break;
case VDEV_AUX_NO_REPLICAS:
(void) printf(gettext("insufficient replicas"));
break;
case VDEV_AUX_VERSION_NEWER:
(void) printf(gettext("newer version"));
break;
case VDEV_AUX_UNSUP_FEAT:
(void) printf(gettext("unsupported feature(s)"));
break;
case VDEV_AUX_ASHIFT_TOO_BIG:
(void) printf(gettext("unsupported minimum blocksize"));
break;
case VDEV_AUX_SPARED:
verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
&spare_cb.cb_guid) == 0);
if (zpool_iter(g_zfs, find_spare, &spare_cb) == 1) {
if (strcmp(zpool_get_name(spare_cb.cb_zhp),
zpool_get_name(zhp)) == 0)
(void) printf(gettext("currently in "
"use"));
else
(void) printf(gettext("in use by "
"pool '%s'"),
zpool_get_name(spare_cb.cb_zhp));
zpool_close(spare_cb.cb_zhp);
} else {
(void) printf(gettext("currently in use"));
}
break;
case VDEV_AUX_ERR_EXCEEDED:
(void) printf(gettext("too many errors"));
break;
case VDEV_AUX_IO_FAILURE:
(void) printf(gettext("experienced I/O failures"));
break;
case VDEV_AUX_BAD_LOG:
(void) printf(gettext("bad intent log"));
break;
case VDEV_AUX_EXTERNAL:
(void) printf(gettext("external device fault"));
break;
case VDEV_AUX_SPLIT_POOL:
(void) printf(gettext("split into new pool"));
break;
case VDEV_AUX_ACTIVE:
(void) printf(gettext("currently in use"));
break;
case VDEV_AUX_CHILDREN_OFFLINE:
(void) printf(gettext("all children offline"));
break;
case VDEV_AUX_BAD_LABEL:
(void) printf(gettext("invalid label"));
break;
default:
(void) printf(gettext("corrupted data"));
break;
}
color_end();
} else if (children == 0 && !isspare &&
getenv("ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE") == NULL &&
VDEV_STAT_VALID(vs_physical_ashift, vsc) &&
vs->vs_configured_ashift < vs->vs_physical_ashift) {
(void) printf(
gettext(" block size: %dB configured, %dB native"),
1 << vs->vs_configured_ashift, 1 << vs->vs_physical_ashift);
}
/* The root vdev has the scrub/resilver stats */
root = fnvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
ZPOOL_CONFIG_VDEV_TREE);
(void) nvlist_lookup_uint64_array(root, ZPOOL_CONFIG_SCAN_STATS,
(uint64_t **)&ps, &c);
if (ps != NULL && ps->pss_state == DSS_SCANNING && children == 0) {
if (vs->vs_scan_processed != 0) {
(void) printf(gettext(" (%s)"),
(ps->pss_func == POOL_SCAN_RESILVER) ?
"resilvering" : "repairing");
} else if (vs->vs_resilver_deferred) {
(void) printf(gettext(" (awaiting resilver)"));
}
}
/* The top-level vdevs have the rebuild stats */
if (vrs != NULL && vrs->vrs_state == VDEV_REBUILD_ACTIVE &&
children == 0) {
if (vs->vs_rebuild_processed != 0) {
(void) printf(gettext(" (resilvering)"));
}
}
if (cb->vcdl != NULL) {
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
printf(" ");
zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);
}
}
/* Display vdev initialization and trim status for leaves. */
if (children == 0) {
print_status_initialize(vs, cb->cb_print_vdev_init);
print_status_trim(vs, cb->cb_print_vdev_trim);
}
(void) printf("\n");
for (c = 0; c < children; c++) {
uint64_t islog = B_FALSE, ishole = B_FALSE;
/* Don't print logs or holes here */
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&islog);
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
&ishole);
if (islog || ishole)
continue;
/* Only print normal classes here */
if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
continue;
/* Provide vdev_rebuild_stats to children if available */
if (vrs == NULL) {
(void) nvlist_lookup_uint64_array(nv,
ZPOOL_CONFIG_REBUILD_STATS,
(uint64_t **)&vrs, &i);
}
vname = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags | VDEV_NAME_TYPE_ID);
print_status_config(zhp, cb, vname, child[c], depth + 2,
isspare, vrs);
free(vname);
}
}
/*
* Print the configuration of an exported pool. Iterate over all vdevs in the
* pool, printing out the name and status for each one.
*/
static void
print_import_config(status_cbdata_t *cb, const char *name, nvlist_t *nv,
int depth)
{
nvlist_t **child;
uint_t c, children;
vdev_stat_t *vs;
char *type, *vname;
verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
if (strcmp(type, VDEV_TYPE_MISSING) == 0 ||
strcmp(type, VDEV_TYPE_HOLE) == 0)
return;
verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) == 0);
(void) printf("\t%*s%-*s", depth, "", cb->cb_namewidth - depth, name);
(void) printf(" %s", zpool_state_to_name(vs->vs_state, vs->vs_aux));
if (vs->vs_aux != 0) {
(void) printf(" ");
switch (vs->vs_aux) {
case VDEV_AUX_OPEN_FAILED:
(void) printf(gettext("cannot open"));
break;
case VDEV_AUX_BAD_GUID_SUM:
(void) printf(gettext("missing device"));
break;
case VDEV_AUX_NO_REPLICAS:
(void) printf(gettext("insufficient replicas"));
break;
case VDEV_AUX_VERSION_NEWER:
(void) printf(gettext("newer version"));
break;
case VDEV_AUX_UNSUP_FEAT:
(void) printf(gettext("unsupported feature(s)"));
break;
case VDEV_AUX_ERR_EXCEEDED:
(void) printf(gettext("too many errors"));
break;
case VDEV_AUX_ACTIVE:
(void) printf(gettext("currently in use"));
break;
case VDEV_AUX_CHILDREN_OFFLINE:
(void) printf(gettext("all children offline"));
break;
case VDEV_AUX_BAD_LABEL:
(void) printf(gettext("invalid label"));
break;
default:
(void) printf(gettext("corrupted data"));
break;
}
}
(void) printf("\n");
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
return;
for (c = 0; c < children; c++) {
uint64_t is_log = B_FALSE;
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&is_log);
if (is_log)
continue;
if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
continue;
vname = zpool_vdev_name(g_zfs, NULL, child[c],
cb->cb_name_flags | VDEV_NAME_TYPE_ID);
print_import_config(cb, vname, child[c], depth + 2);
free(vname);
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
&child, &children) == 0) {
(void) printf(gettext("\tcache\n"));
for (c = 0; c < children; c++) {
vname = zpool_vdev_name(g_zfs, NULL, child[c],
cb->cb_name_flags);
(void) printf("\t %s\n", vname);
free(vname);
}
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
&child, &children) == 0) {
(void) printf(gettext("\tspares\n"));
for (c = 0; c < children; c++) {
vname = zpool_vdev_name(g_zfs, NULL, child[c],
cb->cb_name_flags);
(void) printf("\t %s\n", vname);
free(vname);
}
}
}
/*
* Print specialized class vdevs.
*
* These are recorded as top level vdevs in the main pool child array
* but with "is_log" set to 1 or an "alloc_bias" string. We use either
* print_status_config() or print_import_config() to print the top level
* class vdevs then any of their children (eg mirrored slogs) are printed
* recursively - which works because only the top level vdev is marked.
*/
static void
print_class_vdevs(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
const char *class)
{
uint_t c, children;
nvlist_t **child;
boolean_t printed = B_FALSE;
assert(zhp != NULL || !cb->cb_verbose);
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child,
&children) != 0)
return;
for (c = 0; c < children; c++) {
uint64_t is_log = B_FALSE;
char *bias = NULL;
char *type = NULL;
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&is_log);
if (is_log) {
bias = VDEV_ALLOC_CLASS_LOGS;
} else {
(void) nvlist_lookup_string(child[c],
ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
(void) nvlist_lookup_string(child[c],
ZPOOL_CONFIG_TYPE, &type);
}
if (bias == NULL || strcmp(bias, class) != 0)
continue;
if (!is_log && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
continue;
if (!printed) {
(void) printf("\t%s\t\n", gettext(class));
printed = B_TRUE;
}
char *name = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags | VDEV_NAME_TYPE_ID);
if (cb->cb_print_status)
print_status_config(zhp, cb, name, child[c], 2,
B_FALSE, NULL);
else
print_import_config(cb, name, child[c], 2);
free(name);
}
}
/*
* Display the status for the given pool.
*/
static int
show_import(nvlist_t *config, boolean_t report_error)
{
uint64_t pool_state;
vdev_stat_t *vs;
char *name;
uint64_t guid;
uint64_t hostid = 0;
char *msgid;
char *hostname = "unknown";
nvlist_t *nvroot, *nvinfo;
zpool_status_t reason;
zpool_errata_t errata;
const char *health;
uint_t vsc;
char *comment;
status_cbdata_t cb = { 0 };
verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
&name) == 0);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
&guid) == 0);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
&pool_state) == 0);
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &vsc) == 0);
health = zpool_state_to_name(vs->vs_state, vs->vs_aux);
reason = zpool_import_status(config, &msgid, &errata);
/*
* If we're importing using a cachefile, then we won't report any
* errors unless we are in the scan phase of the import.
*/
if (reason != ZPOOL_STATUS_OK && !report_error)
return (reason);
(void) printf(gettext(" pool: %s\n"), name);
(void) printf(gettext(" id: %llu\n"), (u_longlong_t)guid);
(void) printf(gettext(" state: %s"), health);
if (pool_state == POOL_STATE_DESTROYED)
(void) printf(gettext(" (DESTROYED)"));
(void) printf("\n");
switch (reason) {
case ZPOOL_STATUS_MISSING_DEV_R:
case ZPOOL_STATUS_MISSING_DEV_NR:
case ZPOOL_STATUS_BAD_GUID_SUM:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices are "
"missing from the system.\n"));
break;
case ZPOOL_STATUS_CORRUPT_LABEL_R:
case ZPOOL_STATUS_CORRUPT_LABEL_NR:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices contains"
" corrupted data.\n"));
break;
case ZPOOL_STATUS_CORRUPT_DATA:
(void) printf(
gettext(" status: The pool data is corrupted.\n"));
break;
case ZPOOL_STATUS_OFFLINE_DEV:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices "
"are offlined.\n"));
break;
case ZPOOL_STATUS_CORRUPT_POOL:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool metadata is "
"corrupted.\n"));
break;
case ZPOOL_STATUS_VERSION_OLDER:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
"a legacy on-disk version.\n"));
break;
case ZPOOL_STATUS_VERSION_NEWER:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
"an incompatible version.\n"));
break;
case ZPOOL_STATUS_FEAT_DISABLED:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("Some supported "
"features are not enabled on the pool.\n\t"
"(Note that they may be intentionally disabled "
"if the\n\t'compatibility' property is set.)\n"));
break;
case ZPOOL_STATUS_COMPATIBILITY_ERR:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("Error reading or parsing "
"the file(s) indicated by the 'compatibility'\n"
"property.\n"));
break;
case ZPOOL_STATUS_INCOMPATIBLE_FEAT:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more features "
"are enabled on the pool despite not being\n"
"requested by the 'compatibility' property.\n"));
break;
case ZPOOL_STATUS_UNSUP_FEAT_READ:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool uses the following "
"feature(s) not supported on this system:\n"));
color_start(ANSI_YELLOW);
zpool_print_unsup_feat(config);
color_end();
break;
case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool can only be "
"accessed in read-only mode on this system. It\n\tcannot be"
" accessed in read-write mode because it uses the "
"following\n\tfeature(s) not supported on this system:\n"));
color_start(ANSI_YELLOW);
zpool_print_unsup_feat(config);
color_end();
break;
case ZPOOL_STATUS_HOSTID_ACTIVE:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool is currently "
"imported by another system.\n"));
break;
case ZPOOL_STATUS_HOSTID_REQUIRED:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool has the "
"multihost property on. It cannot\n\tbe safely imported "
"when the system hostid is not set.\n"));
break;
case ZPOOL_STATUS_HOSTID_MISMATCH:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool was last accessed "
"by another system.\n"));
break;
case ZPOOL_STATUS_FAULTED_DEV_R:
case ZPOOL_STATUS_FAULTED_DEV_NR:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices are "
"faulted.\n"));
break;
case ZPOOL_STATUS_BAD_LOG:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("An intent log record cannot "
"be read.\n"));
break;
case ZPOOL_STATUS_RESILVERING:
case ZPOOL_STATUS_REBUILDING:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices were "
"being resilvered.\n"));
break;
case ZPOOL_STATUS_ERRATA:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("Errata #%d detected.\n"),
errata);
break;
case ZPOOL_STATUS_NON_NATIVE_ASHIFT:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices are "
"configured to use a non-native block size.\n"
"\tExpect reduced performance.\n"));
break;
default:
/*
* No other status can be seen when importing pools.
*/
assert(reason == ZPOOL_STATUS_OK);
}
/*
* Print out an action according to the overall state of the pool.
*/
if (vs->vs_state == VDEV_STATE_HEALTHY) {
if (reason == ZPOOL_STATUS_VERSION_OLDER ||
reason == ZPOOL_STATUS_FEAT_DISABLED) {
(void) printf(gettext(" action: The pool can be "
"imported using its name or numeric identifier, "
"though\n\tsome features will not be available "
"without an explicit 'zpool upgrade'.\n"));
} else if (reason == ZPOOL_STATUS_COMPATIBILITY_ERR) {
(void) printf(gettext(" action: The pool can be "
"imported using its name or numeric\n\tidentifier, "
"though the file(s) indicated by its "
"'compatibility'\n\tproperty cannot be parsed at "
"this time.\n"));
} else if (reason == ZPOOL_STATUS_HOSTID_MISMATCH) {
(void) printf(gettext(" action: The pool can be "
"imported using its name or numeric "
"identifier and\n\tthe '-f' flag.\n"));
} else if (reason == ZPOOL_STATUS_ERRATA) {
switch (errata) {
case ZPOOL_ERRATA_NONE:
break;
case ZPOOL_ERRATA_ZOL_2094_SCRUB:
(void) printf(gettext(" action: The pool can "
"be imported using its name or numeric "
"identifier,\n\thowever there is a compat"
"ibility issue which should be corrected"
"\n\tby running 'zpool scrub'\n"));
break;
case ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY:
(void) printf(gettext(" action: The pool can"
"not be imported with this version of ZFS "
"due to\n\tan active asynchronous destroy. "
"Revert to an earlier version\n\tand "
"allow the destroy to complete before "
"updating.\n"));
break;
case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:
(void) printf(gettext(" action: Existing "
"encrypted datasets contain an on-disk "
"incompatibility, which\n\tneeds to be "
"corrected. Backup these datasets to new "
"encrypted datasets\n\tand destroy the "
"old ones.\n"));
break;
case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION:
(void) printf(gettext(" action: Existing "
"encrypted snapshots and bookmarks contain "
"an on-disk\n\tincompatibility. This may "
"cause on-disk corruption if they are used"
"\n\twith 'zfs recv'. To correct the "
"issue, enable the bookmark_v2 feature.\n\t"
"No additional action is needed if there "
"are no encrypted snapshots or\n\t"
"bookmarks. If preserving the encrypted "
"snapshots and bookmarks is\n\trequired, "
"use a non-raw send to backup and restore "
"them. Alternately,\n\tthey may be removed"
" to resolve the incompatibility.\n"));
break;
default:
/*
* All errata must contain an action message.
*/
assert(0);
}
} else {
(void) printf(gettext(" action: The pool can be "
"imported using its name or numeric "
"identifier.\n"));
}
} else if (vs->vs_state == VDEV_STATE_DEGRADED) {
(void) printf(gettext(" action: The pool can be imported "
"despite missing or damaged devices. The\n\tfault "
"tolerance of the pool may be compromised if imported.\n"));
} else {
switch (reason) {
case ZPOOL_STATUS_VERSION_NEWER:
(void) printf(gettext(" action: The pool cannot be "
"imported. Access the pool on a system running "
"newer\n\tsoftware, or recreate the pool from "
"backup.\n"));
break;
case ZPOOL_STATUS_UNSUP_FEAT_READ:
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("The pool cannot be "
"imported. Access the pool on a system that "
"supports\n\tthe required feature(s), or recreate "
"the pool from backup.\n"));
break;
case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("The pool cannot be "
"imported in read-write mode. Import the pool "
"with\n"
"\t\"-o readonly=on\", access the pool on a system "
"that supports the\n\trequired feature(s), or "
"recreate the pool from backup.\n"));
break;
case ZPOOL_STATUS_MISSING_DEV_R:
case ZPOOL_STATUS_MISSING_DEV_NR:
case ZPOOL_STATUS_BAD_GUID_SUM:
(void) printf(gettext(" action: The pool cannot be "
"imported. Attach the missing\n\tdevices and try "
"again.\n"));
break;
case ZPOOL_STATUS_HOSTID_ACTIVE:
VERIFY0(nvlist_lookup_nvlist(config,
ZPOOL_CONFIG_LOAD_INFO, &nvinfo));
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))
hostname = fnvlist_lookup_string(nvinfo,
ZPOOL_CONFIG_MMP_HOSTNAME);
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))
hostid = fnvlist_lookup_uint64(nvinfo,
ZPOOL_CONFIG_MMP_HOSTID);
(void) printf(gettext(" action: The pool must be "
"exported from %s (hostid=%lx)\n\tbefore it "
"can be safely imported.\n"), hostname,
(unsigned long) hostid);
break;
case ZPOOL_STATUS_HOSTID_REQUIRED:
(void) printf(gettext(" action: Set a unique system "
"hostid with the zgenhostid(8) command.\n"));
break;
default:
(void) printf(gettext(" action: The pool cannot be "
"imported due to damaged devices or data.\n"));
}
}
/* Print the comment attached to the pool. */
if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
(void) printf(gettext("comment: %s\n"), comment);
/*
* If the state is "closed" or "can't open", and the aux state
* is "corrupt data":
*/
if (((vs->vs_state == VDEV_STATE_CLOSED) ||
(vs->vs_state == VDEV_STATE_CANT_OPEN)) &&
(vs->vs_aux == VDEV_AUX_CORRUPT_DATA)) {
if (pool_state == POOL_STATE_DESTROYED)
(void) printf(gettext("\tThe pool was destroyed, "
"but can be imported using the '-Df' flags.\n"));
else if (pool_state != POOL_STATE_EXPORTED)
(void) printf(gettext("\tThe pool may be active on "
"another system, but can be imported using\n\t"
"the '-f' flag.\n"));
}
if (msgid != NULL) {
(void) printf(gettext(
" see: https://openzfs.github.io/openzfs-docs/msg/%s\n"),
msgid);
}
(void) printf(gettext(" config:\n\n"));
cb.cb_namewidth = max_width(NULL, nvroot, 0, strlen(name),
VDEV_NAME_TYPE_ID);
if (cb.cb_namewidth < 10)
cb.cb_namewidth = 10;
print_import_config(&cb, name, nvroot, 0);
print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_DEDUP);
print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_SPECIAL);
print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_CLASS_LOGS);
if (reason == ZPOOL_STATUS_BAD_GUID_SUM) {
(void) printf(gettext("\n\tAdditional devices are known to "
"be part of this pool, though their\n\texact "
"configuration cannot be determined.\n"));
}
return (0);
}
static boolean_t
zfs_force_import_required(nvlist_t *config)
{
uint64_t state;
uint64_t hostid = 0;
nvlist_t *nvinfo;
state = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE);
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID, &hostid);
if (state != POOL_STATE_EXPORTED && hostid != get_system_hostid())
return (B_TRUE);
nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE)) {
mmp_state_t mmp_state = fnvlist_lookup_uint64(nvinfo,
ZPOOL_CONFIG_MMP_STATE);
if (mmp_state != MMP_STATE_INACTIVE)
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Perform the import for the given configuration. This passes the heavy
* lifting off to zpool_import_props(), and then mounts the datasets contained
* within the pool.
*/
static int
do_import(nvlist_t *config, const char *newname, const char *mntopts,
nvlist_t *props, int flags)
{
int ret = 0;
zpool_handle_t *zhp;
char *name;
uint64_t version;
name = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME);
version = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);
if (!SPA_VERSION_IS_SUPPORTED(version)) {
(void) fprintf(stderr, gettext("cannot import '%s': pool "
"is formatted using an unsupported ZFS version\n"), name);
return (1);
} else if (zfs_force_import_required(config) &&
!(flags & ZFS_IMPORT_ANY_HOST)) {
mmp_state_t mmp_state = MMP_STATE_INACTIVE;
nvlist_t *nvinfo;
nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE))
mmp_state = fnvlist_lookup_uint64(nvinfo,
ZPOOL_CONFIG_MMP_STATE);
if (mmp_state == MMP_STATE_ACTIVE) {
char *hostname = "<unknown>";
uint64_t hostid = 0;
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))
hostname = fnvlist_lookup_string(nvinfo,
ZPOOL_CONFIG_MMP_HOSTNAME);
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))
hostid = fnvlist_lookup_uint64(nvinfo,
ZPOOL_CONFIG_MMP_HOSTID);
(void) fprintf(stderr, gettext("cannot import '%s': "
"pool is imported on %s (hostid: "
"0x%lx)\nExport the pool on the other system, "
"then run 'zpool import'.\n"),
name, hostname, (unsigned long) hostid);
} else if (mmp_state == MMP_STATE_NO_HOSTID) {
(void) fprintf(stderr, gettext("Cannot import '%s': "
"pool has the multihost property on and the\n"
"system's hostid is not set. Set a unique hostid "
"with the zgenhostid(8) command.\n"), name);
} else {
char *hostname = "<unknown>";
uint64_t timestamp = 0;
uint64_t hostid = 0;
if (nvlist_exists(config, ZPOOL_CONFIG_HOSTNAME))
hostname = fnvlist_lookup_string(config,
ZPOOL_CONFIG_HOSTNAME);
if (nvlist_exists(config, ZPOOL_CONFIG_TIMESTAMP))
timestamp = fnvlist_lookup_uint64(config,
ZPOOL_CONFIG_TIMESTAMP);
if (nvlist_exists(config, ZPOOL_CONFIG_HOSTID))
hostid = fnvlist_lookup_uint64(config,
ZPOOL_CONFIG_HOSTID);
(void) fprintf(stderr, gettext("cannot import '%s': "
"pool was previously in use from another system.\n"
"Last accessed by %s (hostid=%lx) at %s"
"The pool can be imported, use 'zpool import -f' "
"to import the pool.\n"), name, hostname,
(unsigned long)hostid, ctime((time_t *)&timestamp));
}
return (1);
}
if (zpool_import_props(g_zfs, config, newname, props, flags) != 0)
return (1);
if (newname != NULL)
name = (char *)newname;
if ((zhp = zpool_open_canfail(g_zfs, name)) == NULL)
return (1);
/*
* Loading keys is best effort. We don't want to return immediately
* if it fails but we do want to give the error to the caller.
*/
if (flags & ZFS_IMPORT_LOAD_KEYS) {
ret = zfs_crypto_attempt_load_keys(g_zfs, name);
if (ret != 0)
ret = 1;
}
if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL &&
!(flags & ZFS_IMPORT_ONLY) &&
zpool_enable_datasets(zhp, mntopts, 0) != 0) {
zpool_close(zhp);
return (1);
}
zpool_close(zhp);
return (ret);
}
static int
import_pools(nvlist_t *pools, nvlist_t *props, char *mntopts, int flags,
char *orig_name, char *new_name,
boolean_t do_destroyed, boolean_t pool_specified, boolean_t do_all,
importargs_t *import)
{
nvlist_t *config = NULL;
nvlist_t *found_config = NULL;
uint64_t pool_state;
/*
* At this point we have a list of import candidate configs. Even if
* we were searching by pool name or guid, we still need to
* post-process the list to deal with pool state and possible
* duplicate names.
*/
int err = 0;
nvpair_t *elem = NULL;
boolean_t first = B_TRUE;
while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) {
verify(nvpair_value_nvlist(elem, &config) == 0);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
&pool_state) == 0);
if (!do_destroyed && pool_state == POOL_STATE_DESTROYED)
continue;
if (do_destroyed && pool_state != POOL_STATE_DESTROYED)
continue;
verify(nvlist_add_nvlist(config, ZPOOL_LOAD_POLICY,
import->policy) == 0);
if (!pool_specified) {
if (first)
first = B_FALSE;
else if (!do_all)
(void) printf("\n");
if (do_all) {
err |= do_import(config, NULL, mntopts,
props, flags);
} else {
/*
* If we're importing from cachefile, then
* we don't want to report errors until we
* are in the scan phase of the import. If
* we get an error, then we return that error
* to invoke the scan phase.
*/
if (import->cachefile && !import->scan)
err = show_import(config, B_FALSE);
else
(void) show_import(config, B_TRUE);
}
} else if (import->poolname != NULL) {
char *name;
/*
* We are searching for a pool based on name.
*/
verify(nvlist_lookup_string(config,
ZPOOL_CONFIG_POOL_NAME, &name) == 0);
if (strcmp(name, import->poolname) == 0) {
if (found_config != NULL) {
(void) fprintf(stderr, gettext(
"cannot import '%s': more than "
"one matching pool\n"),
import->poolname);
(void) fprintf(stderr, gettext(
"import by numeric ID instead\n"));
err = B_TRUE;
}
found_config = config;
}
} else {
uint64_t guid;
/*
* Search for a pool by guid.
*/
verify(nvlist_lookup_uint64(config,
ZPOOL_CONFIG_POOL_GUID, &guid) == 0);
if (guid == import->guid)
found_config = config;
}
}
/*
* If we were searching for a specific pool, verify that we found a
* pool, and then do the import.
*/
if (pool_specified && err == 0) {
if (found_config == NULL) {
(void) fprintf(stderr, gettext("cannot import '%s': "
"no such pool available\n"), orig_name);
err = B_TRUE;
} else {
err |= do_import(found_config, new_name,
mntopts, props, flags);
}
}
/*
* If we were just looking for pools, report an error if none were
* found.
*/
if (!pool_specified && first)
(void) fprintf(stderr,
gettext("no pools available to import\n"));
return (err);
}
typedef struct target_exists_args {
const char *poolname;
uint64_t poolguid;
} target_exists_args_t;
static int
name_or_guid_exists(zpool_handle_t *zhp, void *data)
{
target_exists_args_t *args = data;
nvlist_t *config = zpool_get_config(zhp, NULL);
int found = 0;
if (config == NULL)
return (0);
if (args->poolname != NULL) {
char *pool_name;
verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
&pool_name) == 0);
if (strcmp(pool_name, args->poolname) == 0)
found = 1;
} else {
uint64_t pool_guid;
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
&pool_guid) == 0);
if (pool_guid == args->poolguid)
found = 1;
}
zpool_close(zhp);
return (found);
}
/*
* zpool checkpoint <pool>
* checkpoint --discard <pool>
*
* -d Discard the checkpoint from a checkpointed
* --discard pool.
*
* -w Wait for discarding a checkpoint to complete.
* --wait
*
* Checkpoints the specified pool, by taking a "snapshot" of its
* current state. A pool can only have one checkpoint at a time.
*/
int
zpool_do_checkpoint(int argc, char **argv)
{
boolean_t discard, wait;
char *pool;
zpool_handle_t *zhp;
int c, err;
struct option long_options[] = {
{"discard", no_argument, NULL, 'd'},
{"wait", no_argument, NULL, 'w'},
{0, 0, 0, 0}
};
discard = B_FALSE;
wait = B_FALSE;
while ((c = getopt_long(argc, argv, ":dw", long_options, NULL)) != -1) {
switch (c) {
case 'd':
discard = B_TRUE;
break;
case 'w':
wait = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
if (wait && !discard) {
(void) fprintf(stderr, gettext("--wait only valid when "
"--discard also specified\n"));
usage(B_FALSE);
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
pool = argv[0];
if ((zhp = zpool_open(g_zfs, pool)) == NULL) {
/* As a special case, check for use of '/' in the name */
if (strchr(pool, '/') != NULL)
(void) fprintf(stderr, gettext("'zpool checkpoint' "
"doesn't work on datasets. To save the state "
"of a dataset from a specific point in time "
"please use 'zfs snapshot'\n"));
return (1);
}
if (discard) {
err = (zpool_discard_checkpoint(zhp) != 0);
if (err == 0 && wait)
err = zpool_wait(zhp, ZPOOL_WAIT_CKPT_DISCARD);
} else {
err = (zpool_checkpoint(zhp) != 0);
}
zpool_close(zhp);
return (err);
}
#define CHECKPOINT_OPT 1024
/*
* zpool import [-d dir] [-D]
* import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]
* [-d dir | -c cachefile | -s] [-f] -a
* import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]
* [-d dir | -c cachefile | -s] [-f] [-n] [-F] <pool | id>
* [newpool]
*
* -c Read pool information from a cachefile instead of searching
* devices. If importing from a cachefile config fails, then
* fallback to searching for devices only in the directories that
* exist in the cachefile.
*
* -d Scan in a specific directory, other than /dev/. More than
* one directory can be specified using multiple '-d' options.
*
* -D Scan for previously destroyed pools or import all or only
* specified destroyed pools.
*
* -R Temporarily import the pool, with all mountpoints relative to
* the given root. The pool will remain exported when the machine
* is rebooted.
*
* -V Import even in the presence of faulted vdevs. This is an
* intentionally undocumented option for testing purposes, and
* treats the pool configuration as complete, leaving any bad
* vdevs in the FAULTED state. In other words, it does verbatim
* import.
*
* -f Force import, even if it appears that the pool is active.
*
* -F Attempt rewind if necessary.
*
* -n See if rewind would work, but don't actually rewind.
*
* -N Import the pool but don't mount datasets.
*
* -T Specify a starting txg to use for import. This option is
* intentionally undocumented option for testing purposes.
*
* -a Import all pools found.
*
* -l Load encryption keys while importing.
*
* -o Set property=value and/or temporary mount options (without '=').
*
* -s Scan using the default search path, the libblkid cache will
* not be consulted.
*
* --rewind-to-checkpoint
* Import the pool and revert back to the checkpoint.
*
* The import command scans for pools to import, and import pools based on pool
* name and GUID. The pool can also be renamed as part of the import process.
*/
int
zpool_do_import(int argc, char **argv)
{
char **searchdirs = NULL;
char *env, *envdup = NULL;
int nsearch = 0;
int c;
int err = 0;
nvlist_t *pools = NULL;
boolean_t do_all = B_FALSE;
boolean_t do_destroyed = B_FALSE;
char *mntopts = NULL;
uint64_t searchguid = 0;
char *searchname = NULL;
char *propval;
nvlist_t *policy = NULL;
nvlist_t *props = NULL;
int flags = ZFS_IMPORT_NORMAL;
uint32_t rewind_policy = ZPOOL_NO_REWIND;
boolean_t dryrun = B_FALSE;
boolean_t do_rewind = B_FALSE;
boolean_t xtreme_rewind = B_FALSE;
boolean_t do_scan = B_FALSE;
boolean_t pool_exists = B_FALSE;
boolean_t pool_specified = B_FALSE;
uint64_t txg = -1ULL;
char *cachefile = NULL;
importargs_t idata = { 0 };
char *endptr;
struct option long_options[] = {
{"rewind-to-checkpoint", no_argument, NULL, CHECKPOINT_OPT},
{0, 0, 0, 0}
};
/* check options */
while ((c = getopt_long(argc, argv, ":aCc:d:DEfFlmnNo:R:stT:VX",
long_options, NULL)) != -1) {
switch (c) {
case 'a':
do_all = B_TRUE;
break;
case 'c':
cachefile = optarg;
break;
case 'd':
searchdirs = safe_realloc(searchdirs,
(nsearch + 1) * sizeof (char *));
searchdirs[nsearch++] = optarg;
break;
case 'D':
do_destroyed = B_TRUE;
break;
case 'f':
flags |= ZFS_IMPORT_ANY_HOST;
break;
case 'F':
do_rewind = B_TRUE;
break;
case 'l':
flags |= ZFS_IMPORT_LOAD_KEYS;
break;
case 'm':
flags |= ZFS_IMPORT_MISSING_LOG;
break;
case 'n':
dryrun = B_TRUE;
break;
case 'N':
flags |= ZFS_IMPORT_ONLY;
break;
case 'o':
if ((propval = strchr(optarg, '=')) != NULL) {
*propval = '\0';
propval++;
if (add_prop_list(optarg, propval,
&props, B_TRUE))
goto error;
} else {
mntopts = optarg;
}
break;
case 'R':
if (add_prop_list(zpool_prop_to_name(
ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))
goto error;
if (add_prop_list_default(zpool_prop_to_name(
ZPOOL_PROP_CACHEFILE), "none", &props, B_TRUE))
goto error;
break;
case 's':
do_scan = B_TRUE;
break;
case 't':
flags |= ZFS_IMPORT_TEMP_NAME;
if (add_prop_list_default(zpool_prop_to_name(
ZPOOL_PROP_CACHEFILE), "none", &props, B_TRUE))
goto error;
break;
case 'T':
errno = 0;
txg = strtoull(optarg, &endptr, 0);
if (errno != 0 || *endptr != '\0') {
(void) fprintf(stderr,
gettext("invalid txg value\n"));
usage(B_FALSE);
}
rewind_policy = ZPOOL_DO_REWIND | ZPOOL_EXTREME_REWIND;
break;
case 'V':
flags |= ZFS_IMPORT_VERBATIM;
break;
case 'X':
xtreme_rewind = B_TRUE;
break;
case CHECKPOINT_OPT:
flags |= ZFS_IMPORT_CHECKPOINT;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (cachefile && nsearch != 0) {
(void) fprintf(stderr, gettext("-c is incompatible with -d\n"));
usage(B_FALSE);
}
if (cachefile && do_scan) {
(void) fprintf(stderr, gettext("-c is incompatible with -s\n"));
usage(B_FALSE);
}
if ((flags & ZFS_IMPORT_LOAD_KEYS) && (flags & ZFS_IMPORT_ONLY)) {
(void) fprintf(stderr, gettext("-l is incompatible with -N\n"));
usage(B_FALSE);
}
if ((flags & ZFS_IMPORT_LOAD_KEYS) && !do_all && argc == 0) {
(void) fprintf(stderr, gettext("-l is only meaningful during "
"an import\n"));
usage(B_FALSE);
}
if ((dryrun || xtreme_rewind) && !do_rewind) {
(void) fprintf(stderr,
gettext("-n or -X only meaningful with -F\n"));
usage(B_FALSE);
}
if (dryrun)
rewind_policy = ZPOOL_TRY_REWIND;
else if (do_rewind)
rewind_policy = ZPOOL_DO_REWIND;
if (xtreme_rewind)
rewind_policy |= ZPOOL_EXTREME_REWIND;
/* In the future, we can capture further policy and include it here */
if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
nvlist_add_uint64(policy, ZPOOL_LOAD_REQUEST_TXG, txg) != 0 ||
nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY,
rewind_policy) != 0)
goto error;
/* check argument count */
if (do_all) {
if (argc != 0) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
} else {
if (argc > 2) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
}
/*
* Check for the effective uid. We do this explicitly here because
* otherwise any attempt to discover pools will silently fail.
*/
if (argc == 0 && geteuid() != 0) {
(void) fprintf(stderr, gettext("cannot "
"discover pools: permission denied\n"));
if (searchdirs != NULL)
free(searchdirs);
nvlist_free(props);
nvlist_free(policy);
return (1);
}
/*
* Depending on the arguments given, we do one of the following:
*
* <none> Iterate through all pools and display information about
* each one.
*
* -a Iterate through all pools and try to import each one.
*
* <id> Find the pool that corresponds to the given GUID/pool
* name and import that one.
*
* -D Above options applies only to destroyed pools.
*/
if (argc != 0) {
char *endptr;
errno = 0;
searchguid = strtoull(argv[0], &endptr, 10);
if (errno != 0 || *endptr != '\0') {
searchname = argv[0];
searchguid = 0;
}
pool_specified = B_TRUE;
/*
* User specified a name or guid. Ensure it's unique.
*/
target_exists_args_t search = {searchname, searchguid};
pool_exists = zpool_iter(g_zfs, name_or_guid_exists, &search);
}
/*
* Check the environment for the preferred search path.
*/
if ((searchdirs == NULL) && (env = getenv("ZPOOL_IMPORT_PATH"))) {
char *dir, *tmp = NULL;
envdup = strdup(env);
for (dir = strtok_r(envdup, ":", &tmp);
dir != NULL;
dir = strtok_r(NULL, ":", &tmp)) {
searchdirs = safe_realloc(searchdirs,
(nsearch + 1) * sizeof (char *));
searchdirs[nsearch++] = dir;
}
}
idata.path = searchdirs;
idata.paths = nsearch;
idata.poolname = searchname;
idata.guid = searchguid;
idata.cachefile = cachefile;
idata.scan = do_scan;
idata.policy = policy;
pools = zpool_search_import(g_zfs, &idata, &libzfs_config_ops);
if (pools != NULL && pool_exists &&
(argc == 1 || strcmp(argv[0], argv[1]) == 0)) {
(void) fprintf(stderr, gettext("cannot import '%s': "
"a pool with that name already exists\n"),
argv[0]);
(void) fprintf(stderr, gettext("use the form '%s "
"<pool | id> <newpool>' to give it a new name\n"),
"zpool import");
err = 1;
} else if (pools == NULL && pool_exists) {
(void) fprintf(stderr, gettext("cannot import '%s': "
"a pool with that name is already created/imported,\n"),
argv[0]);
(void) fprintf(stderr, gettext("and no additional pools "
"with that name were found\n"));
err = 1;
} else if (pools == NULL) {
if (argc != 0) {
(void) fprintf(stderr, gettext("cannot import '%s': "
"no such pool available\n"), argv[0]);
}
err = 1;
}
if (err == 1) {
free(searchdirs);
free(envdup);
nvlist_free(policy);
nvlist_free(pools);
nvlist_free(props);
return (1);
}
- err = import_pools(pools, props, mntopts, flags, argv[0],
- argc == 1 ? NULL : argv[1], do_destroyed, pool_specified,
- do_all, &idata);
+ err = import_pools(pools, props, mntopts, flags,
+ argc >= 1 ? argv[0] : NULL,
+ argc >= 2 ? argv[1] : NULL,
+ do_destroyed, pool_specified, do_all, &idata);
/*
* If we're using the cachefile and we failed to import, then
* fallback to scanning the directory for pools that match
* those in the cachefile.
*/
if (err != 0 && cachefile != NULL) {
(void) printf(gettext("cachefile import failed, retrying\n"));
/*
* We use the scan flag to gather the directories that exist
* in the cachefile. If we need to fallback to searching for
* the pool config, we will only search devices in these
* directories.
*/
idata.scan = B_TRUE;
nvlist_free(pools);
pools = zpool_search_import(g_zfs, &idata, &libzfs_config_ops);
- err = import_pools(pools, props, mntopts, flags, argv[0],
- argc == 1 ? NULL : argv[1], do_destroyed, pool_specified,
- do_all, &idata);
+ err = import_pools(pools, props, mntopts, flags,
+ argc >= 1 ? argv[0] : NULL,
+ argc >= 2 ? argv[1] : NULL,
+ do_destroyed, pool_specified, do_all, &idata);
}
error:
nvlist_free(props);
nvlist_free(pools);
nvlist_free(policy);
free(searchdirs);
free(envdup);
return (err ? 1 : 0);
}
/*
* zpool sync [-f] [pool] ...
*
* -f (undocumented) force uberblock (and config including zpool cache file)
* update.
*
* Sync the specified pool(s).
* Without arguments "zpool sync" will sync all pools.
* This command initiates TXG sync(s) and will return after the TXG(s) commit.
*
*/
static int
zpool_do_sync(int argc, char **argv)
{
int ret;
boolean_t force = B_FALSE;
/* check options */
while ((ret = getopt(argc, argv, "f")) != -1) {
switch (ret) {
case 'f':
force = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* if argc == 0 we will execute zpool_sync_one on all pools */
ret = for_each_pool(argc, argv, B_FALSE, NULL, B_FALSE, zpool_sync_one,
&force);
return (ret);
}
typedef struct iostat_cbdata {
uint64_t cb_flags;
int cb_name_flags;
int cb_namewidth;
int cb_iteration;
char **cb_vdev_names; /* Only show these vdevs */
unsigned int cb_vdev_names_count;
boolean_t cb_verbose;
boolean_t cb_literal;
boolean_t cb_scripted;
zpool_list_t *cb_list;
vdev_cmd_data_list_t *vcdl;
} iostat_cbdata_t;
/* iostat labels */
typedef struct name_and_columns {
const char *name; /* Column name */
unsigned int columns; /* Center name to this number of columns */
} name_and_columns_t;
#define IOSTAT_MAX_LABELS 13 /* Max number of labels on one line */
static const name_and_columns_t iostat_top_labels[][IOSTAT_MAX_LABELS] =
{
[IOS_DEFAULT] = {{"capacity", 2}, {"operations", 2}, {"bandwidth", 2},
{NULL}},
[IOS_LATENCY] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},
{"asyncq_wait", 2}, {"scrub", 1}, {"trim", 1}, {NULL}},
[IOS_QUEUES] = {{"syncq_read", 2}, {"syncq_write", 2},
{"asyncq_read", 2}, {"asyncq_write", 2}, {"scrubq_read", 2},
{"trimq_write", 2}, {NULL}},
[IOS_L_HISTO] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},
{"asyncq_wait", 2}, {NULL}},
[IOS_RQ_HISTO] = {{"sync_read", 2}, {"sync_write", 2},
{"async_read", 2}, {"async_write", 2}, {"scrub", 2},
{"trim", 2}, {NULL}},
};
/* Shorthand - if "columns" field not set, default to 1 column */
static const name_and_columns_t iostat_bottom_labels[][IOSTAT_MAX_LABELS] =
{
[IOS_DEFAULT] = {{"alloc"}, {"free"}, {"read"}, {"write"}, {"read"},
{"write"}, {NULL}},
[IOS_LATENCY] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},
{"write"}, {"read"}, {"write"}, {"wait"}, {"wait"}, {NULL}},
[IOS_QUEUES] = {{"pend"}, {"activ"}, {"pend"}, {"activ"}, {"pend"},
{"activ"}, {"pend"}, {"activ"}, {"pend"}, {"activ"},
{"pend"}, {"activ"}, {NULL}},
[IOS_L_HISTO] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},
{"write"}, {"read"}, {"write"}, {"scrub"}, {"trim"}, {NULL}},
[IOS_RQ_HISTO] = {{"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"},
{"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"}, {NULL}},
};
static const char *histo_to_title[] = {
[IOS_L_HISTO] = "latency",
[IOS_RQ_HISTO] = "req_size",
};
/*
* Return the number of labels in a null-terminated name_and_columns_t
* array.
*
*/
static unsigned int
label_array_len(const name_and_columns_t *labels)
{
int i = 0;
while (labels[i].name)
i++;
return (i);
}
/*
* Return the number of strings in a null-terminated string array.
* For example:
*
* const char foo[] = {"bar", "baz", NULL}
*
* returns 2
*/
static uint64_t
str_array_len(const char *array[])
{
uint64_t i = 0;
while (array[i])
i++;
return (i);
}
/*
* Return a default column width for default/latency/queue columns. This does
* not include histograms, which have their columns autosized.
*/
static unsigned int
default_column_width(iostat_cbdata_t *cb, enum iostat_type type)
{
unsigned long column_width = 5; /* Normal niceprint */
static unsigned long widths[] = {
/*
* Choose some sane default column sizes for printing the
* raw numbers.
*/
[IOS_DEFAULT] = 15, /* 1PB capacity */
[IOS_LATENCY] = 10, /* 1B ns = 10sec */
[IOS_QUEUES] = 6, /* 1M queue entries */
[IOS_L_HISTO] = 10, /* 1B ns = 10sec */
[IOS_RQ_HISTO] = 6, /* 1M queue entries */
};
if (cb->cb_literal)
column_width = widths[type];
return (column_width);
}
/*
* Print the column labels, i.e:
*
* capacity operations bandwidth
* alloc free read write read write ...
*
* If force_column_width is set, use it for the column width. If not set, use
* the default column width.
*/
static void
print_iostat_labels(iostat_cbdata_t *cb, unsigned int force_column_width,
const name_and_columns_t labels[][IOSTAT_MAX_LABELS])
{
int i, idx, s;
int text_start, rw_column_width, spaces_to_end;
uint64_t flags = cb->cb_flags;
uint64_t f;
unsigned int column_width = force_column_width;
/* For each bit set in flags */
for (f = flags; f; f &= ~(1ULL << idx)) {
idx = lowbit64(f) - 1;
if (!force_column_width)
column_width = default_column_width(cb, idx);
/* Print our top labels centered over "read write" label. */
for (i = 0; i < label_array_len(labels[idx]); i++) {
const char *name = labels[idx][i].name;
/*
* We treat labels[][].columns == 0 as shorthand
* for one column. It makes writing out the label
* tables more concise.
*/
unsigned int columns = MAX(1, labels[idx][i].columns);
unsigned int slen = strlen(name);
rw_column_width = (column_width * columns) +
(2 * (columns - 1));
text_start = (int)((rw_column_width) / columns -
slen / columns);
if (text_start < 0)
text_start = 0;
printf(" "); /* Two spaces between columns */
/* Space from beginning of column to label */
for (s = 0; s < text_start; s++)
printf(" ");
printf("%s", name);
/* Print space after label to end of column */
spaces_to_end = rw_column_width - text_start - slen;
if (spaces_to_end < 0)
spaces_to_end = 0;
for (s = 0; s < spaces_to_end; s++)
printf(" ");
}
}
}
/*
* print_cmd_columns - Print custom column titles from -c
*
* If the user specified the "zpool status|iostat -c" then print their custom
* column titles in the header. For example, print_cmd_columns() would print
* the " col1 col2" part of this:
*
* $ zpool iostat -vc 'echo col1=val1; echo col2=val2'
* ...
* capacity operations bandwidth
* pool alloc free read write read write col1 col2
* ---------- ----- ----- ----- ----- ----- ----- ---- ----
* mypool 269K 1008M 0 0 107 946
* mirror 269K 1008M 0 0 107 946
* sdb - - 0 0 102 473 val1 val2
* sdc - - 0 0 5 473 val1 val2
* ---------- ----- ----- ----- ----- ----- ----- ---- ----
*/
static void
print_cmd_columns(vdev_cmd_data_list_t *vcdl, int use_dashes)
{
int i, j;
vdev_cmd_data_t *data = &vcdl->data[0];
if (vcdl->count == 0 || data == NULL)
return;
/*
* Each vdev cmd should have the same column names unless the user did
* something weird with their cmd. Just take the column names from the
* first vdev and assume it works for all of them.
*/
for (i = 0; i < vcdl->uniq_cols_cnt; i++) {
printf(" ");
if (use_dashes) {
for (j = 0; j < vcdl->uniq_cols_width[i]; j++)
printf("-");
} else {
printf_color(ANSI_BOLD, "%*s", vcdl->uniq_cols_width[i],
vcdl->uniq_cols[i]);
}
}
}
/*
* Utility function to print out a line of dashes like:
*
* -------------------------------- ----- ----- ----- ----- -----
*
* ...or a dashed named-row line like:
*
* logs - - - - -
*
* @cb: iostat data
*
* @force_column_width If non-zero, use the value as the column width.
* Otherwise use the default column widths.
*
* @name: Print a dashed named-row line starting
* with @name. Otherwise, print a regular
* dashed line.
*/
static void
print_iostat_dashes(iostat_cbdata_t *cb, unsigned int force_column_width,
const char *name)
{
int i;
unsigned int namewidth;
uint64_t flags = cb->cb_flags;
uint64_t f;
int idx;
const name_and_columns_t *labels;
const char *title;
if (cb->cb_flags & IOS_ANYHISTO_M) {
title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];
} else if (cb->cb_vdev_names_count) {
title = "vdev";
} else {
title = "pool";
}
namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),
name ? strlen(name) : 0);
if (name) {
printf("%-*s", namewidth, name);
} else {
for (i = 0; i < namewidth; i++)
(void) printf("-");
}
/* For each bit in flags */
for (f = flags; f; f &= ~(1ULL << idx)) {
unsigned int column_width;
idx = lowbit64(f) - 1;
if (force_column_width)
column_width = force_column_width;
else
column_width = default_column_width(cb, idx);
labels = iostat_bottom_labels[idx];
for (i = 0; i < label_array_len(labels); i++) {
if (name)
printf(" %*s-", column_width - 1, " ");
else
printf(" %.*s", column_width,
"--------------------");
}
}
}
static void
print_iostat_separator_impl(iostat_cbdata_t *cb,
unsigned int force_column_width)
{
print_iostat_dashes(cb, force_column_width, NULL);
}
static void
print_iostat_separator(iostat_cbdata_t *cb)
{
print_iostat_separator_impl(cb, 0);
}
static void
print_iostat_header_impl(iostat_cbdata_t *cb, unsigned int force_column_width,
const char *histo_vdev_name)
{
unsigned int namewidth;
const char *title;
if (cb->cb_flags & IOS_ANYHISTO_M) {
title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];
} else if (cb->cb_vdev_names_count) {
title = "vdev";
} else {
title = "pool";
}
namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),
histo_vdev_name ? strlen(histo_vdev_name) : 0);
if (histo_vdev_name)
printf("%-*s", namewidth, histo_vdev_name);
else
printf("%*s", namewidth, "");
print_iostat_labels(cb, force_column_width, iostat_top_labels);
printf("\n");
printf("%-*s", namewidth, title);
print_iostat_labels(cb, force_column_width, iostat_bottom_labels);
if (cb->vcdl != NULL)
print_cmd_columns(cb->vcdl, 0);
printf("\n");
print_iostat_separator_impl(cb, force_column_width);
if (cb->vcdl != NULL)
print_cmd_columns(cb->vcdl, 1);
printf("\n");
}
static void
print_iostat_header(iostat_cbdata_t *cb)
{
print_iostat_header_impl(cb, 0, NULL);
}
/*
* Display a single statistic.
*/
static void
print_one_stat(uint64_t value, enum zfs_nicenum_format format,
unsigned int column_size, boolean_t scripted)
{
char buf[64];
zfs_nicenum_format(value, buf, sizeof (buf), format);
if (scripted)
printf("\t%s", buf);
else
printf(" %*s", column_size, buf);
}
/*
* Calculate the default vdev stats
*
* Subtract oldvs from newvs, apply a scaling factor, and save the resulting
* stats into calcvs.
*/
static void
calc_default_iostats(vdev_stat_t *oldvs, vdev_stat_t *newvs,
vdev_stat_t *calcvs)
{
int i;
memcpy(calcvs, newvs, sizeof (*calcvs));
for (i = 0; i < ARRAY_SIZE(calcvs->vs_ops); i++)
calcvs->vs_ops[i] = (newvs->vs_ops[i] - oldvs->vs_ops[i]);
for (i = 0; i < ARRAY_SIZE(calcvs->vs_bytes); i++)
calcvs->vs_bytes[i] = (newvs->vs_bytes[i] - oldvs->vs_bytes[i]);
}
/*
* Internal representation of the extended iostats data.
*
* The extended iostat stats are exported in nvlists as either uint64_t arrays
* or single uint64_t's. We make both look like arrays to make them easier
* to process. In order to make single uint64_t's look like arrays, we set
* __data to the stat data, and then set *data = &__data with count = 1. Then,
* we can just use *data and count.
*/
struct stat_array {
uint64_t *data;
uint_t count; /* Number of entries in data[] */
uint64_t __data; /* Only used when data is a single uint64_t */
};
static uint64_t
stat_histo_max(struct stat_array *nva, unsigned int len)
{
uint64_t max = 0;
int i;
for (i = 0; i < len; i++)
max = MAX(max, array64_max(nva[i].data, nva[i].count));
return (max);
}
/*
* Helper function to lookup a uint64_t array or uint64_t value and store its
* data as a stat_array. If the nvpair is a single uint64_t value, then we make
* it look like a one element array to make it easier to process.
*/
static int
nvpair64_to_stat_array(nvlist_t *nvl, const char *name,
struct stat_array *nva)
{
nvpair_t *tmp;
int ret;
verify(nvlist_lookup_nvpair(nvl, name, &tmp) == 0);
switch (nvpair_type(tmp)) {
case DATA_TYPE_UINT64_ARRAY:
ret = nvpair_value_uint64_array(tmp, &nva->data, &nva->count);
break;
case DATA_TYPE_UINT64:
ret = nvpair_value_uint64(tmp, &nva->__data);
nva->data = &nva->__data;
nva->count = 1;
break;
default:
/* Not a uint64_t */
ret = EINVAL;
break;
}
return (ret);
}
/*
* Given a list of nvlist names, look up the extended stats in newnv and oldnv,
* subtract them, and return the results in a newly allocated stat_array.
* You must free the returned array after you are done with it with
* free_calc_stats().
*
* Additionally, you can set "oldnv" to NULL if you simply want the newnv
* values.
*/
static struct stat_array *
calc_and_alloc_stats_ex(const char **names, unsigned int len, nvlist_t *oldnv,
nvlist_t *newnv)
{
nvlist_t *oldnvx = NULL, *newnvx;
struct stat_array *oldnva, *newnva, *calcnva;
int i, j;
unsigned int alloc_size = (sizeof (struct stat_array)) * len;
/* Extract our extended stats nvlist from the main list */
verify(nvlist_lookup_nvlist(newnv, ZPOOL_CONFIG_VDEV_STATS_EX,
&newnvx) == 0);
if (oldnv) {
verify(nvlist_lookup_nvlist(oldnv, ZPOOL_CONFIG_VDEV_STATS_EX,
&oldnvx) == 0);
}
newnva = safe_malloc(alloc_size);
oldnva = safe_malloc(alloc_size);
calcnva = safe_malloc(alloc_size);
for (j = 0; j < len; j++) {
verify(nvpair64_to_stat_array(newnvx, names[j],
&newnva[j]) == 0);
calcnva[j].count = newnva[j].count;
alloc_size = calcnva[j].count * sizeof (calcnva[j].data[0]);
calcnva[j].data = safe_malloc(alloc_size);
memcpy(calcnva[j].data, newnva[j].data, alloc_size);
if (oldnvx) {
verify(nvpair64_to_stat_array(oldnvx, names[j],
&oldnva[j]) == 0);
for (i = 0; i < oldnva[j].count; i++)
calcnva[j].data[i] -= oldnva[j].data[i];
}
}
free(newnva);
free(oldnva);
return (calcnva);
}
static void
free_calc_stats(struct stat_array *nva, unsigned int len)
{
int i;
for (i = 0; i < len; i++)
free(nva[i].data);
free(nva);
}
static void
print_iostat_histo(struct stat_array *nva, unsigned int len,
iostat_cbdata_t *cb, unsigned int column_width, unsigned int namewidth,
double scale)
{
int i, j;
char buf[6];
uint64_t val;
enum zfs_nicenum_format format;
unsigned int buckets;
unsigned int start_bucket;
if (cb->cb_literal)
format = ZFS_NICENUM_RAW;
else
format = ZFS_NICENUM_1024;
/* All these histos are the same size, so just use nva[0].count */
buckets = nva[0].count;
if (cb->cb_flags & IOS_RQ_HISTO_M) {
/* Start at 512 - req size should never be lower than this */
start_bucket = 9;
} else {
start_bucket = 0;
}
for (j = start_bucket; j < buckets; j++) {
/* Print histogram bucket label */
if (cb->cb_flags & IOS_L_HISTO_M) {
/* Ending range of this bucket */
val = (1UL << (j + 1)) - 1;
zfs_nicetime(val, buf, sizeof (buf));
} else {
/* Request size (starting range of bucket) */
val = (1UL << j);
zfs_nicenum(val, buf, sizeof (buf));
}
if (cb->cb_scripted)
printf("%llu", (u_longlong_t)val);
else
printf("%-*s", namewidth, buf);
/* Print the values on the line */
for (i = 0; i < len; i++) {
print_one_stat(nva[i].data[j] * scale, format,
column_width, cb->cb_scripted);
}
printf("\n");
}
}
static void
print_solid_separator(unsigned int length)
{
while (length--)
printf("-");
printf("\n");
}
static void
print_iostat_histos(iostat_cbdata_t *cb, nvlist_t *oldnv,
nvlist_t *newnv, double scale, const char *name)
{
unsigned int column_width;
unsigned int namewidth;
unsigned int entire_width;
enum iostat_type type;
struct stat_array *nva;
const char **names;
unsigned int names_len;
/* What type of histo are we? */
type = IOS_HISTO_IDX(cb->cb_flags);
/* Get NULL-terminated array of nvlist names for our histo */
names = vsx_type_to_nvlist[type];
names_len = str_array_len(names); /* num of names */
nva = calc_and_alloc_stats_ex(names, names_len, oldnv, newnv);
if (cb->cb_literal) {
column_width = MAX(5,
(unsigned int) log10(stat_histo_max(nva, names_len)) + 1);
} else {
column_width = 5;
}
namewidth = MAX(cb->cb_namewidth,
strlen(histo_to_title[IOS_HISTO_IDX(cb->cb_flags)]));
/*
* Calculate the entire line width of what we're printing. The
* +2 is for the two spaces between columns:
*/
/* read write */
/* ----- ----- */
/* |___| <---------- column_width */
/* */
/* |__________| <--- entire_width */
/* */
entire_width = namewidth + (column_width + 2) *
label_array_len(iostat_bottom_labels[type]);
if (cb->cb_scripted)
printf("%s\n", name);
else
print_iostat_header_impl(cb, column_width, name);
print_iostat_histo(nva, names_len, cb, column_width,
namewidth, scale);
free_calc_stats(nva, names_len);
if (!cb->cb_scripted)
print_solid_separator(entire_width);
}
/*
* Calculate the average latency of a power-of-two latency histogram
*/
static uint64_t
single_histo_average(uint64_t *histo, unsigned int buckets)
{
int i;
uint64_t count = 0, total = 0;
for (i = 0; i < buckets; i++) {
/*
* Our buckets are power-of-two latency ranges. Use the
* midpoint latency of each bucket to calculate the average.
* For example:
*
* Bucket Midpoint
* 8ns-15ns: 12ns
* 16ns-31ns: 24ns
* ...
*/
if (histo[i] != 0) {
total += histo[i] * (((1UL << i) + ((1UL << i)/2)));
count += histo[i];
}
}
/* Prevent divide by zero */
return (count == 0 ? 0 : total / count);
}
static void
print_iostat_queues(iostat_cbdata_t *cb, nvlist_t *oldnv,
nvlist_t *newnv)
{
int i;
uint64_t val;
const char *names[] = {
ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_TRIM_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
};
struct stat_array *nva;
unsigned int column_width = default_column_width(cb, IOS_QUEUES);
enum zfs_nicenum_format format;
nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), NULL, newnv);
if (cb->cb_literal)
format = ZFS_NICENUM_RAW;
else
format = ZFS_NICENUM_1024;
for (i = 0; i < ARRAY_SIZE(names); i++) {
val = nva[i].data[0];
print_one_stat(val, format, column_width, cb->cb_scripted);
}
free_calc_stats(nva, ARRAY_SIZE(names));
}
static void
print_iostat_latency(iostat_cbdata_t *cb, nvlist_t *oldnv,
nvlist_t *newnv)
{
int i;
uint64_t val;
const char *names[] = {
ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
};
struct stat_array *nva;
unsigned int column_width = default_column_width(cb, IOS_LATENCY);
enum zfs_nicenum_format format;
nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), oldnv, newnv);
if (cb->cb_literal)
format = ZFS_NICENUM_RAWTIME;
else
format = ZFS_NICENUM_TIME;
/* Print our avg latencies on the line */
for (i = 0; i < ARRAY_SIZE(names); i++) {
/* Compute average latency for a latency histo */
val = single_histo_average(nva[i].data, nva[i].count);
print_one_stat(val, format, column_width, cb->cb_scripted);
}
free_calc_stats(nva, ARRAY_SIZE(names));
}
/*
* Print default statistics (capacity/operations/bandwidth)
*/
static void
print_iostat_default(vdev_stat_t *vs, iostat_cbdata_t *cb, double scale)
{
unsigned int column_width = default_column_width(cb, IOS_DEFAULT);
enum zfs_nicenum_format format;
char na; /* char to print for "not applicable" values */
if (cb->cb_literal) {
format = ZFS_NICENUM_RAW;
na = '0';
} else {
format = ZFS_NICENUM_1024;
na = '-';
}
/* only toplevel vdevs have capacity stats */
if (vs->vs_space == 0) {
if (cb->cb_scripted)
printf("\t%c\t%c", na, na);
else
printf(" %*c %*c", column_width, na, column_width,
na);
} else {
print_one_stat(vs->vs_alloc, format, column_width,
cb->cb_scripted);
print_one_stat(vs->vs_space - vs->vs_alloc, format,
column_width, cb->cb_scripted);
}
print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_READ] * scale),
format, column_width, cb->cb_scripted);
print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_WRITE] * scale),
format, column_width, cb->cb_scripted);
print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_READ] * scale),
format, column_width, cb->cb_scripted);
print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_WRITE] * scale),
format, column_width, cb->cb_scripted);
}
static const char *class_name[] = {
VDEV_ALLOC_BIAS_DEDUP,
VDEV_ALLOC_BIAS_SPECIAL,
VDEV_ALLOC_CLASS_LOGS
};
/*
* Print out all the statistics for the given vdev. This can either be the
* toplevel configuration, or called recursively. If 'name' is NULL, then this
* is a verbose output, and we don't want to display the toplevel pool stats.
*
* Returns the number of stat lines printed.
*/
static unsigned int
print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv,
nvlist_t *newnv, iostat_cbdata_t *cb, int depth)
{
nvlist_t **oldchild, **newchild;
uint_t c, children, oldchildren;
vdev_stat_t *oldvs, *newvs, *calcvs;
vdev_stat_t zerovs = { 0 };
char *vname;
int i;
int ret = 0;
uint64_t tdelta;
double scale;
if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)
return (ret);
calcvs = safe_malloc(sizeof (*calcvs));
if (oldnv != NULL) {
verify(nvlist_lookup_uint64_array(oldnv,
ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&oldvs, &c) == 0);
} else {
oldvs = &zerovs;
}
/* Do we only want to see a specific vdev? */
for (i = 0; i < cb->cb_vdev_names_count; i++) {
/* Yes we do. Is this the vdev? */
if (strcmp(name, cb->cb_vdev_names[i]) == 0) {
/*
* This is our vdev. Since it is the only vdev we
* will be displaying, make depth = 0 so that it
* doesn't get indented.
*/
depth = 0;
break;
}
}
if (cb->cb_vdev_names_count && (i == cb->cb_vdev_names_count)) {
/* Couldn't match the name */
goto children;
}
verify(nvlist_lookup_uint64_array(newnv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&newvs, &c) == 0);
/*
* Print the vdev name unless it's is a histogram. Histograms
* display the vdev name in the header itself.
*/
if (!(cb->cb_flags & IOS_ANYHISTO_M)) {
if (cb->cb_scripted) {
printf("%s", name);
} else {
if (strlen(name) + depth > cb->cb_namewidth)
(void) printf("%*s%s", depth, "", name);
else
(void) printf("%*s%s%*s", depth, "", name,
(int)(cb->cb_namewidth - strlen(name) -
depth), "");
}
}
/* Calculate our scaling factor */
tdelta = newvs->vs_timestamp - oldvs->vs_timestamp;
if ((oldvs->vs_timestamp == 0) && (cb->cb_flags & IOS_ANYHISTO_M)) {
/*
* If we specify printing histograms with no time interval, then
* print the histogram numbers over the entire lifetime of the
* vdev.
*/
scale = 1;
} else {
if (tdelta == 0)
scale = 1.0;
else
scale = (double)NANOSEC / tdelta;
}
if (cb->cb_flags & IOS_DEFAULT_M) {
calc_default_iostats(oldvs, newvs, calcvs);
print_iostat_default(calcvs, cb, scale);
}
if (cb->cb_flags & IOS_LATENCY_M)
print_iostat_latency(cb, oldnv, newnv);
if (cb->cb_flags & IOS_QUEUES_M)
print_iostat_queues(cb, oldnv, newnv);
if (cb->cb_flags & IOS_ANYHISTO_M) {
printf("\n");
print_iostat_histos(cb, oldnv, newnv, scale, name);
}
if (cb->vcdl != NULL) {
char *path;
if (nvlist_lookup_string(newnv, ZPOOL_CONFIG_PATH,
&path) == 0) {
printf(" ");
zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);
}
}
if (!(cb->cb_flags & IOS_ANYHISTO_M))
printf("\n");
ret++;
children:
free(calcvs);
if (!cb->cb_verbose)
return (ret);
if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_CHILDREN,
&newchild, &children) != 0)
return (ret);
if (oldnv) {
if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_CHILDREN,
&oldchild, &oldchildren) != 0)
return (ret);
children = MIN(oldchildren, children);
}
/*
* print normal top-level devices
*/
for (c = 0; c < children; c++) {
uint64_t ishole = B_FALSE, islog = B_FALSE;
(void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_HOLE,
&ishole);
(void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_LOG,
&islog);
if (ishole || islog)
continue;
if (nvlist_exists(newchild[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
continue;
vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
cb->cb_name_flags);
ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c] : NULL,
newchild[c], cb, depth + 2);
free(vname);
}
/*
* print all other top-level devices
*/
for (uint_t n = 0; n < 3; n++) {
boolean_t printed = B_FALSE;
for (c = 0; c < children; c++) {
uint64_t islog = B_FALSE;
char *bias = NULL;
char *type = NULL;
(void) nvlist_lookup_uint64(newchild[c],
ZPOOL_CONFIG_IS_LOG, &islog);
if (islog) {
bias = VDEV_ALLOC_CLASS_LOGS;
} else {
(void) nvlist_lookup_string(newchild[c],
ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
(void) nvlist_lookup_string(newchild[c],
ZPOOL_CONFIG_TYPE, &type);
}
if (bias == NULL || strcmp(bias, class_name[n]) != 0)
continue;
if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
continue;
if (!printed) {
if ((!(cb->cb_flags & IOS_ANYHISTO_M)) &&
!cb->cb_scripted && !cb->cb_vdev_names) {
print_iostat_dashes(cb, 0,
class_name[n]);
}
printf("\n");
printed = B_TRUE;
}
vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
cb->cb_name_flags);
ret += print_vdev_stats(zhp, vname, oldnv ?
oldchild[c] : NULL, newchild[c], cb, depth + 2);
free(vname);
}
}
/*
* Include level 2 ARC devices in iostat output
*/
if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_L2CACHE,
&newchild, &children) != 0)
return (ret);
if (oldnv) {
if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_L2CACHE,
&oldchild, &oldchildren) != 0)
return (ret);
children = MIN(oldchildren, children);
}
if (children > 0) {
if ((!(cb->cb_flags & IOS_ANYHISTO_M)) && !cb->cb_scripted &&
!cb->cb_vdev_names) {
print_iostat_dashes(cb, 0, "cache");
}
printf("\n");
for (c = 0; c < children; c++) {
vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
cb->cb_name_flags);
ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c]
: NULL, newchild[c], cb, depth + 2);
free(vname);
}
}
return (ret);
}
static int
refresh_iostat(zpool_handle_t *zhp, void *data)
{
iostat_cbdata_t *cb = data;
boolean_t missing;
/*
* If the pool has disappeared, remove it from the list and continue.
*/
if (zpool_refresh_stats(zhp, &missing) != 0)
return (-1);
if (missing)
pool_list_remove(cb->cb_list, zhp);
return (0);
}
/*
* Callback to print out the iostats for the given pool.
*/
static int
print_iostat(zpool_handle_t *zhp, void *data)
{
iostat_cbdata_t *cb = data;
nvlist_t *oldconfig, *newconfig;
nvlist_t *oldnvroot, *newnvroot;
int ret;
newconfig = zpool_get_config(zhp, &oldconfig);
if (cb->cb_iteration == 1)
oldconfig = NULL;
verify(nvlist_lookup_nvlist(newconfig, ZPOOL_CONFIG_VDEV_TREE,
&newnvroot) == 0);
if (oldconfig == NULL)
oldnvroot = NULL;
else
verify(nvlist_lookup_nvlist(oldconfig, ZPOOL_CONFIG_VDEV_TREE,
&oldnvroot) == 0);
ret = print_vdev_stats(zhp, zpool_get_name(zhp), oldnvroot, newnvroot,
cb, 0);
if ((ret != 0) && !(cb->cb_flags & IOS_ANYHISTO_M) &&
!cb->cb_scripted && cb->cb_verbose && !cb->cb_vdev_names_count) {
print_iostat_separator(cb);
if (cb->vcdl != NULL) {
print_cmd_columns(cb->vcdl, 1);
}
printf("\n");
}
return (ret);
}
static int
get_columns(void)
{
struct winsize ws;
int columns = 80;
int error;
if (isatty(STDOUT_FILENO)) {
error = ioctl(STDOUT_FILENO, TIOCGWINSZ, &ws);
if (error == 0)
columns = ws.ws_col;
} else {
columns = 999;
}
return (columns);
}
/*
* Return the required length of the pool/vdev name column. The minimum
* allowed width and output formatting flags must be provided.
*/
static int
get_namewidth(zpool_handle_t *zhp, int min_width, int flags, boolean_t verbose)
{
nvlist_t *config, *nvroot;
int width = min_width;
if ((config = zpool_get_config(zhp, NULL)) != NULL) {
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
unsigned int poolname_len = strlen(zpool_get_name(zhp));
if (verbose == B_FALSE) {
width = MAX(poolname_len, min_width);
} else {
width = MAX(poolname_len,
max_width(zhp, nvroot, 0, min_width, flags));
}
}
return (width);
}
/*
* Parse the input string, get the 'interval' and 'count' value if there is one.
*/
static void
get_interval_count(int *argcp, char **argv, float *iv,
unsigned long *cnt)
{
float interval = 0;
unsigned long count = 0;
int argc = *argcp;
/*
* Determine if the last argument is an integer or a pool name
*/
if (argc > 0 && zfs_isnumber(argv[argc - 1])) {
char *end;
errno = 0;
interval = strtof(argv[argc - 1], &end);
if (*end == '\0' && errno == 0) {
if (interval == 0) {
(void) fprintf(stderr, gettext(
"interval cannot be zero\n"));
usage(B_FALSE);
}
/*
* Ignore the last parameter
*/
argc--;
} else {
/*
* If this is not a valid number, just plow on. The
* user will get a more informative error message later
* on.
*/
interval = 0;
}
}
/*
* If the last argument is also an integer, then we have both a count
* and an interval.
*/
if (argc > 0 && zfs_isnumber(argv[argc - 1])) {
char *end;
errno = 0;
count = interval;
interval = strtof(argv[argc - 1], &end);
if (*end == '\0' && errno == 0) {
if (interval == 0) {
(void) fprintf(stderr, gettext(
"interval cannot be zero\n"));
usage(B_FALSE);
}
/*
* Ignore the last parameter
*/
argc--;
} else {
interval = 0;
}
}
*iv = interval;
*cnt = count;
*argcp = argc;
}
static void
get_timestamp_arg(char c)
{
if (c == 'u')
timestamp_fmt = UDATE;
else if (c == 'd')
timestamp_fmt = DDATE;
else
usage(B_FALSE);
}
/*
* Return stat flags that are supported by all pools by both the module and
* zpool iostat. "*data" should be initialized to all 0xFFs before running.
* It will get ANDed down until only the flags that are supported on all pools
* remain.
*/
static int
get_stat_flags_cb(zpool_handle_t *zhp, void *data)
{
uint64_t *mask = data;
nvlist_t *config, *nvroot, *nvx;
uint64_t flags = 0;
int i, j;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
/* Default stats are always supported, but for completeness.. */
if (nvlist_exists(nvroot, ZPOOL_CONFIG_VDEV_STATS))
flags |= IOS_DEFAULT_M;
/* Get our extended stats nvlist from the main list */
if (nvlist_lookup_nvlist(nvroot, ZPOOL_CONFIG_VDEV_STATS_EX,
&nvx) != 0) {
/*
* No extended stats; they're probably running an older
* module. No big deal, we support that too.
*/
goto end;
}
/* For each extended stat, make sure all its nvpairs are supported */
for (j = 0; j < ARRAY_SIZE(vsx_type_to_nvlist); j++) {
if (!vsx_type_to_nvlist[j][0])
continue;
/* Start off by assuming the flag is supported, then check */
flags |= (1ULL << j);
for (i = 0; vsx_type_to_nvlist[j][i]; i++) {
if (!nvlist_exists(nvx, vsx_type_to_nvlist[j][i])) {
/* flag isn't supported */
flags = flags & ~(1ULL << j);
break;
}
}
}
end:
*mask = *mask & flags;
return (0);
}
/*
* Return a bitmask of stats that are supported on all pools by both the module
* and zpool iostat.
*/
static uint64_t
get_stat_flags(zpool_list_t *list)
{
uint64_t mask = -1;
/*
* get_stat_flags_cb() will lop off bits from "mask" until only the
* flags that are supported on all pools remain.
*/
pool_list_iter(list, B_FALSE, get_stat_flags_cb, &mask);
return (mask);
}
/*
* Return 1 if cb_data->cb_vdev_names[0] is this vdev's name, 0 otherwise.
*/
static int
is_vdev_cb(zpool_handle_t *zhp, nvlist_t *nv, void *cb_data)
{
iostat_cbdata_t *cb = cb_data;
char *name = NULL;
int ret = 0;
name = zpool_vdev_name(g_zfs, zhp, nv, cb->cb_name_flags);
if (strcmp(name, cb->cb_vdev_names[0]) == 0)
ret = 1; /* match */
free(name);
return (ret);
}
/*
* Returns 1 if cb_data->cb_vdev_names[0] is a vdev name, 0 otherwise.
*/
static int
is_vdev(zpool_handle_t *zhp, void *cb_data)
{
return (for_each_vdev(zhp, is_vdev_cb, cb_data));
}
/*
* Check if vdevs are in a pool
*
* Return 1 if all argv[] strings are vdev names in pool "pool_name". Otherwise
* return 0. If pool_name is NULL, then search all pools.
*/
static int
are_vdevs_in_pool(int argc, char **argv, char *pool_name,
iostat_cbdata_t *cb)
{
char **tmp_name;
int ret = 0;
int i;
int pool_count = 0;
if ((argc == 0) || !*argv)
return (0);
if (pool_name)
pool_count = 1;
/* Temporarily hijack cb_vdev_names for a second... */
tmp_name = cb->cb_vdev_names;
/* Go though our list of prospective vdev names */
for (i = 0; i < argc; i++) {
cb->cb_vdev_names = argv + i;
/* Is this name a vdev in our pools? */
ret = for_each_pool(pool_count, &pool_name, B_TRUE, NULL,
B_FALSE, is_vdev, cb);
if (!ret) {
/* No match */
break;
}
}
cb->cb_vdev_names = tmp_name;
return (ret);
}
static int
is_pool_cb(zpool_handle_t *zhp, void *data)
{
char *name = data;
if (strcmp(name, zpool_get_name(zhp)) == 0)
return (1);
return (0);
}
/*
* Do we have a pool named *name? If so, return 1, otherwise 0.
*/
static int
is_pool(char *name)
{
return (for_each_pool(0, NULL, B_TRUE, NULL, B_FALSE, is_pool_cb,
name));
}
/* Are all our argv[] strings pool names? If so return 1, 0 otherwise. */
static int
are_all_pools(int argc, char **argv)
{
if ((argc == 0) || !*argv)
return (0);
while (--argc >= 0)
if (!is_pool(argv[argc]))
return (0);
return (1);
}
/*
* Helper function to print out vdev/pool names we can't resolve. Used for an
* error message.
*/
static void
error_list_unresolved_vdevs(int argc, char **argv, char *pool_name,
iostat_cbdata_t *cb)
{
int i;
char *name;
char *str;
for (i = 0; i < argc; i++) {
name = argv[i];
if (is_pool(name))
str = gettext("pool");
else if (are_vdevs_in_pool(1, &name, pool_name, cb))
str = gettext("vdev in this pool");
else if (are_vdevs_in_pool(1, &name, NULL, cb))
str = gettext("vdev in another pool");
else
str = gettext("unknown");
fprintf(stderr, "\t%s (%s)\n", name, str);
}
}
/*
* Same as get_interval_count(), but with additional checks to not misinterpret
* guids as interval/count values. Assumes VDEV_NAME_GUID is set in
* cb.cb_name_flags.
*/
static void
get_interval_count_filter_guids(int *argc, char **argv, float *interval,
unsigned long *count, iostat_cbdata_t *cb)
{
char **tmpargv = argv;
int argc_for_interval = 0;
/* Is the last arg an interval value? Or a guid? */
if (*argc >= 1 && !are_vdevs_in_pool(1, &argv[*argc - 1], NULL, cb)) {
/*
* The last arg is not a guid, so it's probably an
* interval value.
*/
argc_for_interval++;
if (*argc >= 2 &&
!are_vdevs_in_pool(1, &argv[*argc - 2], NULL, cb)) {
/*
* The 2nd to last arg is not a guid, so it's probably
* an interval value.
*/
argc_for_interval++;
}
}
/* Point to our list of possible intervals */
tmpargv = &argv[*argc - argc_for_interval];
*argc = *argc - argc_for_interval;
get_interval_count(&argc_for_interval, tmpargv,
interval, count);
}
/*
* Floating point sleep(). Allows you to pass in a floating point value for
* seconds.
*/
static void
fsleep(float sec)
{
struct timespec req;
req.tv_sec = floor(sec);
req.tv_nsec = (sec - (float)req.tv_sec) * NANOSEC;
nanosleep(&req, NULL);
}
/*
* Terminal height, in rows. Returns -1 if stdout is not connected to a TTY or
* if we were unable to determine its size.
*/
static int
terminal_height(void)
{
struct winsize win;
if (isatty(STDOUT_FILENO) == 0)
return (-1);
if (ioctl(STDOUT_FILENO, TIOCGWINSZ, &win) != -1 && win.ws_row > 0)
return (win.ws_row);
return (-1);
}
/*
* Run one of the zpool status/iostat -c scripts with the help (-h) option and
* print the result.
*
* name: Short name of the script ('iostat').
* path: Full path to the script ('/usr/local/etc/zfs/zpool.d/iostat');
*/
static void
print_zpool_script_help(char *name, char *path)
{
char *argv[] = {path, "-h", NULL};
char **lines = NULL;
int lines_cnt = 0;
int rc;
rc = libzfs_run_process_get_stdout_nopath(path, argv, NULL, &lines,
&lines_cnt);
if (rc != 0 || lines == NULL || lines_cnt <= 0) {
if (lines != NULL)
libzfs_free_str_array(lines, lines_cnt);
return;
}
for (int i = 0; i < lines_cnt; i++)
if (!is_blank_str(lines[i]))
printf(" %-14s %s\n", name, lines[i]);
libzfs_free_str_array(lines, lines_cnt);
}
/*
* Go though the zpool status/iostat -c scripts in the user's path, run their
* help option (-h), and print out the results.
*/
static void
print_zpool_dir_scripts(char *dirpath)
{
DIR *dir;
struct dirent *ent;
char fullpath[MAXPATHLEN];
struct stat dir_stat;
if ((dir = opendir(dirpath)) != NULL) {
/* print all the files and directories within directory */
while ((ent = readdir(dir)) != NULL) {
sprintf(fullpath, "%s/%s", dirpath, ent->d_name);
/* Print the scripts */
if (stat(fullpath, &dir_stat) == 0)
if (dir_stat.st_mode & S_IXUSR &&
S_ISREG(dir_stat.st_mode))
print_zpool_script_help(ent->d_name,
fullpath);
}
closedir(dir);
}
}
/*
* Print out help text for all zpool status/iostat -c scripts.
*/
static void
print_zpool_script_list(char *subcommand)
{
char *dir, *sp, *tmp;
printf(gettext("Available 'zpool %s -c' commands:\n"), subcommand);
sp = zpool_get_cmd_search_path();
if (sp == NULL)
return;
for (dir = strtok_r(sp, ":", &tmp);
dir != NULL;
dir = strtok_r(NULL, ":", &tmp))
print_zpool_dir_scripts(dir);
free(sp);
}
/*
* Set the minimum pool/vdev name column width. The width must be at least 10,
* but may be as large as the column width - 42 so it still fits on one line.
* NOTE: 42 is the width of the default capacity/operations/bandwidth output
*/
static int
get_namewidth_iostat(zpool_handle_t *zhp, void *data)
{
iostat_cbdata_t *cb = data;
int width, available_width;
/*
* get_namewidth() returns the maximum width of any name in that column
* for any pool/vdev/device line that will be output.
*/
width = get_namewidth(zhp, cb->cb_namewidth, cb->cb_name_flags,
cb->cb_verbose);
/*
* The width we are calculating is the width of the header and also the
* padding width for names that are less than maximum width. The stats
* take up 42 characters, so the width available for names is:
*/
available_width = get_columns() - 42;
/*
* If the maximum width fits on a screen, then great! Make everything
* line up by justifying all lines to the same width. If that max
* width is larger than what's available, the name plus stats won't fit
* on one line, and justifying to that width would cause every line to
* wrap on the screen. We only want lines with long names to wrap.
* Limit the padding to what won't wrap.
*/
if (width > available_width)
width = available_width;
/*
* And regardless of whatever the screen width is (get_columns can
* return 0 if the width is not known or less than 42 for a narrow
* terminal) have the width be a minimum of 10.
*/
if (width < 10)
width = 10;
/* Save the calculated width */
cb->cb_namewidth = width;
return (0);
}
/*
* zpool iostat [[-c [script1,script2,...]] [-lq]|[-rw]] [-ghHLpPvy] [-n name]
* [-T d|u] [[ pool ...]|[pool vdev ...]|[vdev ...]]
* [interval [count]]
*
* -c CMD For each vdev, run command CMD
* -g Display guid for individual vdev name.
* -L Follow links when resolving vdev path name.
* -P Display full path for vdev name.
* -v Display statistics for individual vdevs
* -h Display help
* -p Display values in parsable (exact) format.
* -H Scripted mode. Don't display headers, and separate properties
* by a single tab.
* -l Display average latency
* -q Display queue depths
* -w Display latency histograms
* -r Display request size histogram
* -T Display a timestamp in date(1) or Unix format
* -n Only print headers once
*
* This command can be tricky because we want to be able to deal with pool
* creation/destruction as well as vdev configuration changes. The bulk of this
* processing is handled by the pool_list_* routines in zpool_iter.c. We rely
* on pool_list_update() to detect the addition of new pools. Configuration
* changes are all handled within libzfs.
*/
int
zpool_do_iostat(int argc, char **argv)
{
int c;
int ret;
int npools;
float interval = 0;
unsigned long count = 0;
int winheight = 24;
zpool_list_t *list;
boolean_t verbose = B_FALSE;
boolean_t latency = B_FALSE, l_histo = B_FALSE, rq_histo = B_FALSE;
boolean_t queues = B_FALSE, parsable = B_FALSE, scripted = B_FALSE;
boolean_t omit_since_boot = B_FALSE;
boolean_t guid = B_FALSE;
boolean_t follow_links = B_FALSE;
boolean_t full_name = B_FALSE;
boolean_t headers_once = B_FALSE;
iostat_cbdata_t cb = { 0 };
char *cmd = NULL;
/* Used for printing error message */
const char flag_to_arg[] = {[IOS_LATENCY] = 'l', [IOS_QUEUES] = 'q',
[IOS_L_HISTO] = 'w', [IOS_RQ_HISTO] = 'r'};
uint64_t unsupported_flags;
/* check options */
while ((c = getopt(argc, argv, "c:gLPT:vyhplqrwnH")) != -1) {
switch (c) {
case 'c':
if (cmd != NULL) {
fprintf(stderr,
gettext("Can't set -c flag twice\n"));
exit(1);
}
if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&
!libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {
fprintf(stderr, gettext(
"Can't run -c, disabled by "
"ZPOOL_SCRIPTS_ENABLED.\n"));
exit(1);
}
if ((getuid() <= 0 || geteuid() <= 0) &&
!libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {
fprintf(stderr, gettext(
"Can't run -c with root privileges "
"unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));
exit(1);
}
cmd = optarg;
verbose = B_TRUE;
break;
case 'g':
guid = B_TRUE;
break;
case 'L':
follow_links = B_TRUE;
break;
case 'P':
full_name = B_TRUE;
break;
case 'T':
get_timestamp_arg(*optarg);
break;
case 'v':
verbose = B_TRUE;
break;
case 'p':
parsable = B_TRUE;
break;
case 'l':
latency = B_TRUE;
break;
case 'q':
queues = B_TRUE;
break;
case 'H':
scripted = B_TRUE;
break;
case 'w':
l_histo = B_TRUE;
break;
case 'r':
rq_histo = B_TRUE;
break;
case 'y':
omit_since_boot = B_TRUE;
break;
case 'n':
headers_once = B_TRUE;
break;
case 'h':
usage(B_FALSE);
break;
case '?':
if (optopt == 'c') {
print_zpool_script_list("iostat");
exit(0);
} else {
fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
}
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
cb.cb_literal = parsable;
cb.cb_scripted = scripted;
if (guid)
cb.cb_name_flags |= VDEV_NAME_GUID;
if (follow_links)
cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
if (full_name)
cb.cb_name_flags |= VDEV_NAME_PATH;
cb.cb_iteration = 0;
cb.cb_namewidth = 0;
cb.cb_verbose = verbose;
/* Get our interval and count values (if any) */
if (guid) {
get_interval_count_filter_guids(&argc, argv, &interval,
&count, &cb);
} else {
get_interval_count(&argc, argv, &interval, &count);
}
if (argc == 0) {
/* No args, so just print the defaults. */
} else if (are_all_pools(argc, argv)) {
/* All the args are pool names */
} else if (are_vdevs_in_pool(argc, argv, NULL, &cb)) {
/* All the args are vdevs */
cb.cb_vdev_names = argv;
cb.cb_vdev_names_count = argc;
argc = 0; /* No pools to process */
} else if (are_all_pools(1, argv)) {
/* The first arg is a pool name */
if (are_vdevs_in_pool(argc - 1, argv + 1, argv[0], &cb)) {
/* ...and the rest are vdev names */
cb.cb_vdev_names = argv + 1;
cb.cb_vdev_names_count = argc - 1;
argc = 1; /* One pool to process */
} else {
fprintf(stderr, gettext("Expected either a list of "));
fprintf(stderr, gettext("pools, or list of vdevs in"));
fprintf(stderr, " \"%s\", ", argv[0]);
fprintf(stderr, gettext("but got:\n"));
error_list_unresolved_vdevs(argc - 1, argv + 1,
argv[0], &cb);
fprintf(stderr, "\n");
usage(B_FALSE);
return (1);
}
} else {
/*
* The args don't make sense. The first arg isn't a pool name,
* nor are all the args vdevs.
*/
fprintf(stderr, gettext("Unable to parse pools/vdevs list.\n"));
fprintf(stderr, "\n");
return (1);
}
if (cb.cb_vdev_names_count != 0) {
/*
* If user specified vdevs, it implies verbose.
*/
cb.cb_verbose = B_TRUE;
}
/*
* Construct the list of all interesting pools.
*/
ret = 0;
if ((list = pool_list_get(argc, argv, NULL, parsable, &ret)) == NULL)
return (1);
if (pool_list_count(list) == 0 && argc != 0) {
pool_list_free(list);
return (1);
}
if (pool_list_count(list) == 0 && interval == 0) {
pool_list_free(list);
(void) fprintf(stderr, gettext("no pools available\n"));
return (1);
}
if ((l_histo || rq_histo) && (cmd != NULL || latency || queues)) {
pool_list_free(list);
(void) fprintf(stderr,
gettext("[-r|-w] isn't allowed with [-c|-l|-q]\n"));
usage(B_FALSE);
return (1);
}
if (l_histo && rq_histo) {
pool_list_free(list);
(void) fprintf(stderr,
gettext("Only one of [-r|-w] can be passed at a time\n"));
usage(B_FALSE);
return (1);
}
/*
* Enter the main iostat loop.
*/
cb.cb_list = list;
if (l_histo) {
/*
* Histograms tables look out of place when you try to display
* them with the other stats, so make a rule that you can only
* print histograms by themselves.
*/
cb.cb_flags = IOS_L_HISTO_M;
} else if (rq_histo) {
cb.cb_flags = IOS_RQ_HISTO_M;
} else {
cb.cb_flags = IOS_DEFAULT_M;
if (latency)
cb.cb_flags |= IOS_LATENCY_M;
if (queues)
cb.cb_flags |= IOS_QUEUES_M;
}
/*
* See if the module supports all the stats we want to display.
*/
unsupported_flags = cb.cb_flags & ~get_stat_flags(list);
if (unsupported_flags) {
uint64_t f;
int idx;
fprintf(stderr,
gettext("The loaded zfs module doesn't support:"));
/* for each bit set in unsupported_flags */
for (f = unsupported_flags; f; f &= ~(1ULL << idx)) {
idx = lowbit64(f) - 1;
fprintf(stderr, " -%c", flag_to_arg[idx]);
}
fprintf(stderr, ". Try running a newer module.\n");
pool_list_free(list);
return (1);
}
for (;;) {
if ((npools = pool_list_count(list)) == 0)
(void) fprintf(stderr, gettext("no pools available\n"));
else {
/*
* If this is the first iteration and -y was supplied
* we skip any printing.
*/
boolean_t skip = (omit_since_boot &&
cb.cb_iteration == 0);
/*
* Refresh all statistics. This is done as an
* explicit step before calculating the maximum name
* width, so that any * configuration changes are
* properly accounted for.
*/
(void) pool_list_iter(list, B_FALSE, refresh_iostat,
&cb);
/*
* Iterate over all pools to determine the maximum width
* for the pool / device name column across all pools.
*/
cb.cb_namewidth = 0;
(void) pool_list_iter(list, B_FALSE,
get_namewidth_iostat, &cb);
if (timestamp_fmt != NODATE)
print_timestamp(timestamp_fmt);
if (cmd != NULL && cb.cb_verbose &&
!(cb.cb_flags & IOS_ANYHISTO_M)) {
cb.vcdl = all_pools_for_each_vdev_run(argc,
argv, cmd, g_zfs, cb.cb_vdev_names,
cb.cb_vdev_names_count, cb.cb_name_flags);
} else {
cb.vcdl = NULL;
}
/*
* Check terminal size so we can print headers
* even when terminal window has its height
* changed.
*/
winheight = terminal_height();
/*
* Are we connected to TTY? If not, headers_once
* should be true, to avoid breaking scripts.
*/
if (winheight < 0)
headers_once = B_TRUE;
/*
* If it's the first time and we're not skipping it,
* or either skip or verbose mode, print the header.
*
* The histogram code explicitly prints its header on
* every vdev, so skip this for histograms.
*/
if (((++cb.cb_iteration == 1 && !skip) ||
(skip != verbose) ||
(!headers_once &&
(cb.cb_iteration % winheight) == 0)) &&
(!(cb.cb_flags & IOS_ANYHISTO_M)) &&
!cb.cb_scripted)
print_iostat_header(&cb);
if (skip) {
(void) fsleep(interval);
continue;
}
pool_list_iter(list, B_FALSE, print_iostat, &cb);
/*
* If there's more than one pool, and we're not in
* verbose mode (which prints a separator for us),
* then print a separator.
*
* In addition, if we're printing specific vdevs then
* we also want an ending separator.
*/
if (((npools > 1 && !verbose &&
!(cb.cb_flags & IOS_ANYHISTO_M)) ||
(!(cb.cb_flags & IOS_ANYHISTO_M) &&
cb.cb_vdev_names_count)) &&
!cb.cb_scripted) {
print_iostat_separator(&cb);
if (cb.vcdl != NULL)
print_cmd_columns(cb.vcdl, 1);
printf("\n");
}
if (cb.vcdl != NULL)
free_vdev_cmd_data_list(cb.vcdl);
}
/*
* Flush the output so that redirection to a file isn't buffered
* indefinitely.
*/
(void) fflush(stdout);
if (interval == 0)
break;
if (count != 0 && --count == 0)
break;
(void) fsleep(interval);
}
pool_list_free(list);
return (ret);
}
typedef struct list_cbdata {
boolean_t cb_verbose;
int cb_name_flags;
int cb_namewidth;
boolean_t cb_scripted;
zprop_list_t *cb_proplist;
boolean_t cb_literal;
} list_cbdata_t;
/*
* Given a list of columns to display, output appropriate headers for each one.
*/
static void
print_header(list_cbdata_t *cb)
{
zprop_list_t *pl = cb->cb_proplist;
char headerbuf[ZPOOL_MAXPROPLEN];
const char *header;
boolean_t first = B_TRUE;
boolean_t right_justify;
size_t width = 0;
for (; pl != NULL; pl = pl->pl_next) {
width = pl->pl_width;
if (first && cb->cb_verbose) {
/*
* Reset the width to accommodate the verbose listing
* of devices.
*/
width = cb->cb_namewidth;
}
if (!first)
(void) printf(" ");
else
first = B_FALSE;
right_justify = B_FALSE;
if (pl->pl_prop != ZPROP_INVAL) {
header = zpool_prop_column_name(pl->pl_prop);
right_justify = zpool_prop_align_right(pl->pl_prop);
} else {
int i;
for (i = 0; pl->pl_user_prop[i] != '\0'; i++)
headerbuf[i] = toupper(pl->pl_user_prop[i]);
headerbuf[i] = '\0';
header = headerbuf;
}
if (pl->pl_next == NULL && !right_justify)
(void) printf("%s", header);
else if (right_justify)
(void) printf("%*s", (int)width, header);
else
(void) printf("%-*s", (int)width, header);
}
(void) printf("\n");
}
/*
* Given a pool and a list of properties, print out all the properties according
* to the described layout. Used by zpool_do_list().
*/
static void
print_pool(zpool_handle_t *zhp, list_cbdata_t *cb)
{
zprop_list_t *pl = cb->cb_proplist;
boolean_t first = B_TRUE;
char property[ZPOOL_MAXPROPLEN];
char *propstr;
boolean_t right_justify;
size_t width;
for (; pl != NULL; pl = pl->pl_next) {
width = pl->pl_width;
if (first && cb->cb_verbose) {
/*
* Reset the width to accommodate the verbose listing
* of devices.
*/
width = cb->cb_namewidth;
}
if (!first) {
if (cb->cb_scripted)
(void) printf("\t");
else
(void) printf(" ");
} else {
first = B_FALSE;
}
right_justify = B_FALSE;
if (pl->pl_prop != ZPROP_INVAL) {
if (zpool_get_prop(zhp, pl->pl_prop, property,
sizeof (property), NULL, cb->cb_literal) != 0)
propstr = "-";
else
propstr = property;
right_justify = zpool_prop_align_right(pl->pl_prop);
} else if ((zpool_prop_feature(pl->pl_user_prop) ||
zpool_prop_unsupported(pl->pl_user_prop)) &&
zpool_prop_get_feature(zhp, pl->pl_user_prop, property,
sizeof (property)) == 0) {
propstr = property;
} else {
propstr = "-";
}
/*
* If this is being called in scripted mode, or if this is the
* last column and it is left-justified, don't include a width
* format specifier.
*/
if (cb->cb_scripted || (pl->pl_next == NULL && !right_justify))
(void) printf("%s", propstr);
else if (right_justify)
(void) printf("%*s", (int)width, propstr);
else
(void) printf("%-*s", (int)width, propstr);
}
(void) printf("\n");
}
static void
print_one_column(zpool_prop_t prop, uint64_t value, const char *str,
boolean_t scripted, boolean_t valid, enum zfs_nicenum_format format)
{
char propval[64];
boolean_t fixed;
size_t width = zprop_width(prop, &fixed, ZFS_TYPE_POOL);
switch (prop) {
case ZPOOL_PROP_EXPANDSZ:
case ZPOOL_PROP_CHECKPOINT:
case ZPOOL_PROP_DEDUPRATIO:
if (value == 0)
(void) strlcpy(propval, "-", sizeof (propval));
else
zfs_nicenum_format(value, propval, sizeof (propval),
format);
break;
case ZPOOL_PROP_FRAGMENTATION:
if (value == ZFS_FRAG_INVALID) {
(void) strlcpy(propval, "-", sizeof (propval));
} else if (format == ZFS_NICENUM_RAW) {
(void) snprintf(propval, sizeof (propval), "%llu",
(unsigned long long)value);
} else {
(void) snprintf(propval, sizeof (propval), "%llu%%",
(unsigned long long)value);
}
break;
case ZPOOL_PROP_CAPACITY:
/* capacity value is in parts-per-10,000 (aka permyriad) */
if (format == ZFS_NICENUM_RAW)
(void) snprintf(propval, sizeof (propval), "%llu",
(unsigned long long)value / 100);
else
(void) snprintf(propval, sizeof (propval),
value < 1000 ? "%1.2f%%" : value < 10000 ?
"%2.1f%%" : "%3.0f%%", value / 100.0);
break;
case ZPOOL_PROP_HEALTH:
width = 8;
(void) strlcpy(propval, str, sizeof (propval));
break;
default:
zfs_nicenum_format(value, propval, sizeof (propval), format);
}
if (!valid)
(void) strlcpy(propval, "-", sizeof (propval));
if (scripted)
(void) printf("\t%s", propval);
else
(void) printf(" %*s", (int)width, propval);
}
/*
* print static default line per vdev
* not compatible with '-o' <proplist> option
*/
static void
print_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
list_cbdata_t *cb, int depth, boolean_t isspare)
{
nvlist_t **child;
vdev_stat_t *vs;
uint_t c, children;
char *vname;
boolean_t scripted = cb->cb_scripted;
uint64_t islog = B_FALSE;
char *dashes = "%-*s - - - - "
"- - - - -\n";
verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) == 0);
if (name != NULL) {
boolean_t toplevel = (vs->vs_space != 0);
uint64_t cap;
enum zfs_nicenum_format format;
const char *state;
if (cb->cb_literal)
format = ZFS_NICENUM_RAW;
else
format = ZFS_NICENUM_1024;
if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)
return;
if (scripted)
(void) printf("\t%s", name);
else if (strlen(name) + depth > cb->cb_namewidth)
(void) printf("%*s%s", depth, "", name);
else
(void) printf("%*s%s%*s", depth, "", name,
(int)(cb->cb_namewidth - strlen(name) - depth), "");
/*
* Print the properties for the individual vdevs. Some
* properties are only applicable to toplevel vdevs. The
* 'toplevel' boolean value is passed to the print_one_column()
* to indicate that the value is valid.
*/
print_one_column(ZPOOL_PROP_SIZE, vs->vs_space, NULL, scripted,
toplevel, format);
print_one_column(ZPOOL_PROP_ALLOCATED, vs->vs_alloc, NULL,
scripted, toplevel, format);
print_one_column(ZPOOL_PROP_FREE, vs->vs_space - vs->vs_alloc,
NULL, scripted, toplevel, format);
print_one_column(ZPOOL_PROP_CHECKPOINT,
vs->vs_checkpoint_space, NULL, scripted, toplevel, format);
print_one_column(ZPOOL_PROP_EXPANDSZ, vs->vs_esize, NULL,
scripted, B_TRUE, format);
print_one_column(ZPOOL_PROP_FRAGMENTATION,
vs->vs_fragmentation, NULL, scripted,
(vs->vs_fragmentation != ZFS_FRAG_INVALID && toplevel),
format);
cap = (vs->vs_space == 0) ? 0 :
(vs->vs_alloc * 10000 / vs->vs_space);
print_one_column(ZPOOL_PROP_CAPACITY, cap, NULL,
scripted, toplevel, format);
print_one_column(ZPOOL_PROP_DEDUPRATIO, 0, NULL,
scripted, toplevel, format);
state = zpool_state_to_name(vs->vs_state, vs->vs_aux);
if (isspare) {
if (vs->vs_aux == VDEV_AUX_SPARED)
state = "INUSE";
else if (vs->vs_state == VDEV_STATE_HEALTHY)
state = "AVAIL";
}
print_one_column(ZPOOL_PROP_HEALTH, 0, state, scripted,
B_TRUE, format);
(void) printf("\n");
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
return;
/* list the normal vdevs first */
for (c = 0; c < children; c++) {
uint64_t ishole = B_FALSE;
if (nvlist_lookup_uint64(child[c],
ZPOOL_CONFIG_IS_HOLE, &ishole) == 0 && ishole)
continue;
if (nvlist_lookup_uint64(child[c],
ZPOOL_CONFIG_IS_LOG, &islog) == 0 && islog)
continue;
if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
continue;
vname = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags);
print_list_stats(zhp, vname, child[c], cb, depth + 2, B_FALSE);
free(vname);
}
/* list the classes: 'logs', 'dedup', and 'special' */
for (uint_t n = 0; n < 3; n++) {
boolean_t printed = B_FALSE;
for (c = 0; c < children; c++) {
char *bias = NULL;
char *type = NULL;
if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&islog) == 0 && islog) {
bias = VDEV_ALLOC_CLASS_LOGS;
} else {
(void) nvlist_lookup_string(child[c],
ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
(void) nvlist_lookup_string(child[c],
ZPOOL_CONFIG_TYPE, &type);
}
if (bias == NULL || strcmp(bias, class_name[n]) != 0)
continue;
if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
continue;
if (!printed) {
/* LINTED E_SEC_PRINTF_VAR_FMT */
(void) printf(dashes, cb->cb_namewidth,
class_name[n]);
printed = B_TRUE;
}
vname = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags);
print_list_stats(zhp, vname, child[c], cb, depth + 2,
B_FALSE);
free(vname);
}
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
&child, &children) == 0 && children > 0) {
/* LINTED E_SEC_PRINTF_VAR_FMT */
(void) printf(dashes, cb->cb_namewidth, "cache");
for (c = 0; c < children; c++) {
vname = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags);
print_list_stats(zhp, vname, child[c], cb, depth + 2,
B_FALSE);
free(vname);
}
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, &child,
&children) == 0 && children > 0) {
/* LINTED E_SEC_PRINTF_VAR_FMT */
(void) printf(dashes, cb->cb_namewidth, "spare");
for (c = 0; c < children; c++) {
vname = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags);
print_list_stats(zhp, vname, child[c], cb, depth + 2,
B_TRUE);
free(vname);
}
}
}
/*
* Generic callback function to list a pool.
*/
static int
list_callback(zpool_handle_t *zhp, void *data)
{
list_cbdata_t *cbp = data;
print_pool(zhp, cbp);
if (cbp->cb_verbose) {
nvlist_t *config, *nvroot;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
print_list_stats(zhp, NULL, nvroot, cbp, 0, B_FALSE);
}
return (0);
}
/*
* Set the minimum pool/vdev name column width. The width must be at least 9,
* but may be as large as needed.
*/
static int
get_namewidth_list(zpool_handle_t *zhp, void *data)
{
list_cbdata_t *cb = data;
int width;
width = get_namewidth(zhp, cb->cb_namewidth, cb->cb_name_flags,
cb->cb_verbose);
if (width < 9)
width = 9;
cb->cb_namewidth = width;
return (0);
}
/*
* zpool list [-gHLpP] [-o prop[,prop]*] [-T d|u] [pool] ... [interval [count]]
*
* -g Display guid for individual vdev name.
* -H Scripted mode. Don't display headers, and separate properties
* by a single tab.
* -L Follow links when resolving vdev path name.
* -o List of properties to display. Defaults to
* "name,size,allocated,free,expandsize,fragmentation,capacity,"
* "dedupratio,health,altroot"
* -p Display values in parsable (exact) format.
* -P Display full path for vdev name.
* -T Display a timestamp in date(1) or Unix format
*
* List all pools in the system, whether or not they're healthy. Output space
* statistics for each one, as well as health status summary.
*/
int
zpool_do_list(int argc, char **argv)
{
int c;
int ret = 0;
list_cbdata_t cb = { 0 };
static char default_props[] =
"name,size,allocated,free,checkpoint,expandsize,fragmentation,"
"capacity,dedupratio,health,altroot";
char *props = default_props;
float interval = 0;
unsigned long count = 0;
zpool_list_t *list;
boolean_t first = B_TRUE;
/* check options */
while ((c = getopt(argc, argv, ":gHLo:pPT:v")) != -1) {
switch (c) {
case 'g':
cb.cb_name_flags |= VDEV_NAME_GUID;
break;
case 'H':
cb.cb_scripted = B_TRUE;
break;
case 'L':
cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
break;
case 'o':
props = optarg;
break;
case 'P':
cb.cb_name_flags |= VDEV_NAME_PATH;
break;
case 'p':
cb.cb_literal = B_TRUE;
break;
case 'T':
get_timestamp_arg(*optarg);
break;
case 'v':
cb.cb_verbose = B_TRUE;
cb.cb_namewidth = 8; /* 8 until precalc is avail */
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
get_interval_count(&argc, argv, &interval, &count);
if (zprop_get_list(g_zfs, props, &cb.cb_proplist, ZFS_TYPE_POOL) != 0)
usage(B_FALSE);
for (;;) {
if ((list = pool_list_get(argc, argv, &cb.cb_proplist,
cb.cb_literal, &ret)) == NULL)
return (1);
if (pool_list_count(list) == 0)
break;
cb.cb_namewidth = 0;
(void) pool_list_iter(list, B_FALSE, get_namewidth_list, &cb);
if (timestamp_fmt != NODATE)
print_timestamp(timestamp_fmt);
if (!cb.cb_scripted && (first || cb.cb_verbose)) {
print_header(&cb);
first = B_FALSE;
}
ret = pool_list_iter(list, B_TRUE, list_callback, &cb);
if (interval == 0)
break;
if (count != 0 && --count == 0)
break;
pool_list_free(list);
(void) fsleep(interval);
}
if (argc == 0 && !cb.cb_scripted && pool_list_count(list) == 0) {
(void) printf(gettext("no pools available\n"));
ret = 0;
}
pool_list_free(list);
zprop_free_list(cb.cb_proplist);
return (ret);
}
static int
zpool_do_attach_or_replace(int argc, char **argv, int replacing)
{
boolean_t force = B_FALSE;
boolean_t rebuild = B_FALSE;
boolean_t wait = B_FALSE;
int c;
nvlist_t *nvroot;
char *poolname, *old_disk, *new_disk;
zpool_handle_t *zhp;
nvlist_t *props = NULL;
char *propval;
int ret;
/* check options */
while ((c = getopt(argc, argv, "fo:sw")) != -1) {
switch (c) {
case 'f':
force = B_TRUE;
break;
case 'o':
if ((propval = strchr(optarg, '=')) == NULL) {
(void) fprintf(stderr, gettext("missing "
"'=' for -o option\n"));
usage(B_FALSE);
}
*propval = '\0';
propval++;
if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) ||
(add_prop_list(optarg, propval, &props, B_TRUE)))
usage(B_FALSE);
break;
case 's':
rebuild = B_TRUE;
break;
case 'w':
wait = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
}
poolname = argv[0];
if (argc < 2) {
(void) fprintf(stderr,
gettext("missing <device> specification\n"));
usage(B_FALSE);
}
old_disk = argv[1];
if (argc < 3) {
if (!replacing) {
(void) fprintf(stderr,
gettext("missing <new_device> specification\n"));
usage(B_FALSE);
}
new_disk = old_disk;
argc -= 1;
argv += 1;
} else {
new_disk = argv[2];
argc -= 2;
argv += 2;
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {
nvlist_free(props);
return (1);
}
if (zpool_get_config(zhp, NULL) == NULL) {
(void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),
poolname);
zpool_close(zhp);
nvlist_free(props);
return (1);
}
/* unless manually specified use "ashift" pool property (if set) */
if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {
int intval;
zprop_source_t src;
char strval[ZPOOL_MAXPROPLEN];
intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);
if (src != ZPROP_SRC_DEFAULT) {
(void) sprintf(strval, "%" PRId32, intval);
verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,
&props, B_TRUE) == 0);
}
}
nvroot = make_root_vdev(zhp, props, force, B_FALSE, replacing, B_FALSE,
argc, argv);
if (nvroot == NULL) {
zpool_close(zhp);
nvlist_free(props);
return (1);
}
ret = zpool_vdev_attach(zhp, old_disk, new_disk, nvroot, replacing,
rebuild);
if (ret == 0 && wait)
ret = zpool_wait(zhp,
replacing ? ZPOOL_WAIT_REPLACE : ZPOOL_WAIT_RESILVER);
nvlist_free(props);
nvlist_free(nvroot);
zpool_close(zhp);
return (ret);
}
/*
* zpool replace [-fsw] [-o property=value] <pool> <device> <new_device>
*
* -f Force attach, even if <new_device> appears to be in use.
* -s Use sequential instead of healing reconstruction for resilver.
* -o Set property=value.
* -w Wait for replacing to complete before returning
*
* Replace <device> with <new_device>.
*/
/* ARGSUSED */
int
zpool_do_replace(int argc, char **argv)
{
return (zpool_do_attach_or_replace(argc, argv, B_TRUE));
}
/*
* zpool attach [-fsw] [-o property=value] <pool> <device> <new_device>
*
* -f Force attach, even if <new_device> appears to be in use.
* -s Use sequential instead of healing reconstruction for resilver.
* -o Set property=value.
* -w Wait for resilvering to complete before returning
*
* Attach <new_device> to the mirror containing <device>. If <device> is not
* part of a mirror, then <device> will be transformed into a mirror of
* <device> and <new_device>. In either case, <new_device> will begin life
* with a DTL of [0, now], and will immediately begin to resilver itself.
*/
int
zpool_do_attach(int argc, char **argv)
{
return (zpool_do_attach_or_replace(argc, argv, B_FALSE));
}
/*
* zpool detach [-f] <pool> <device>
*
* -f Force detach of <device>, even if DTLs argue against it
* (not supported yet)
*
* Detach a device from a mirror. The operation will be refused if <device>
* is the last device in the mirror, or if the DTLs indicate that this device
* has the only valid copy of some data.
*/
/* ARGSUSED */
int
zpool_do_detach(int argc, char **argv)
{
int c;
char *poolname, *path;
zpool_handle_t *zhp;
int ret;
/* check options */
while ((c = getopt(argc, argv, "")) != -1) {
switch (c) {
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr,
gettext("missing <device> specification\n"));
usage(B_FALSE);
}
poolname = argv[0];
path = argv[1];
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
ret = zpool_vdev_detach(zhp, path);
zpool_close(zhp);
return (ret);
}
/*
* zpool split [-gLnP] [-o prop=val] ...
* [-o mntopt] ...
* [-R altroot] <pool> <newpool> [<device> ...]
*
* -g Display guid for individual vdev name.
* -L Follow links when resolving vdev path name.
* -n Do not split the pool, but display the resulting layout if
* it were to be split.
* -o Set property=value, or set mount options.
* -P Display full path for vdev name.
* -R Mount the split-off pool under an alternate root.
* -l Load encryption keys while importing.
*
* Splits the named pool and gives it the new pool name. Devices to be split
* off may be listed, provided that no more than one device is specified
* per top-level vdev mirror. The newly split pool is left in an exported
* state unless -R is specified.
*
* Restrictions: the top-level of the pool pool must only be made up of
* mirrors; all devices in the pool must be healthy; no device may be
* undergoing a resilvering operation.
*/
int
zpool_do_split(int argc, char **argv)
{
char *srcpool, *newpool, *propval;
char *mntopts = NULL;
splitflags_t flags;
int c, ret = 0;
boolean_t loadkeys = B_FALSE;
zpool_handle_t *zhp;
nvlist_t *config, *props = NULL;
flags.dryrun = B_FALSE;
flags.import = B_FALSE;
flags.name_flags = 0;
/* check options */
while ((c = getopt(argc, argv, ":gLR:lno:P")) != -1) {
switch (c) {
case 'g':
flags.name_flags |= VDEV_NAME_GUID;
break;
case 'L':
flags.name_flags |= VDEV_NAME_FOLLOW_LINKS;
break;
case 'R':
flags.import = B_TRUE;
if (add_prop_list(
zpool_prop_to_name(ZPOOL_PROP_ALTROOT), optarg,
&props, B_TRUE) != 0) {
nvlist_free(props);
usage(B_FALSE);
}
break;
case 'l':
loadkeys = B_TRUE;
break;
case 'n':
flags.dryrun = B_TRUE;
break;
case 'o':
if ((propval = strchr(optarg, '=')) != NULL) {
*propval = '\0';
propval++;
if (add_prop_list(optarg, propval,
&props, B_TRUE) != 0) {
nvlist_free(props);
usage(B_FALSE);
}
} else {
mntopts = optarg;
}
break;
case 'P':
flags.name_flags |= VDEV_NAME_PATH;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
break;
}
}
if (!flags.import && mntopts != NULL) {
(void) fprintf(stderr, gettext("setting mntopts is only "
"valid when importing the pool\n"));
usage(B_FALSE);
}
if (!flags.import && loadkeys) {
(void) fprintf(stderr, gettext("loading keys is only "
"valid when importing the pool\n"));
usage(B_FALSE);
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("Missing pool name\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("Missing new pool name\n"));
usage(B_FALSE);
}
srcpool = argv[0];
newpool = argv[1];
argc -= 2;
argv += 2;
if ((zhp = zpool_open(g_zfs, srcpool)) == NULL) {
nvlist_free(props);
return (1);
}
config = split_mirror_vdev(zhp, newpool, props, flags, argc, argv);
if (config == NULL) {
ret = 1;
} else {
if (flags.dryrun) {
(void) printf(gettext("would create '%s' with the "
"following layout:\n\n"), newpool);
print_vdev_tree(NULL, newpool, config, 0, "",
flags.name_flags);
print_vdev_tree(NULL, "dedup", config, 0,
VDEV_ALLOC_BIAS_DEDUP, 0);
print_vdev_tree(NULL, "special", config, 0,
VDEV_ALLOC_BIAS_SPECIAL, 0);
}
}
zpool_close(zhp);
if (ret != 0 || flags.dryrun || !flags.import) {
nvlist_free(config);
nvlist_free(props);
return (ret);
}
/*
* The split was successful. Now we need to open the new
* pool and import it.
*/
if ((zhp = zpool_open_canfail(g_zfs, newpool)) == NULL) {
nvlist_free(config);
nvlist_free(props);
return (1);
}
if (loadkeys) {
ret = zfs_crypto_attempt_load_keys(g_zfs, newpool);
if (ret != 0)
ret = 1;
}
if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL &&
zpool_enable_datasets(zhp, mntopts, 0) != 0) {
ret = 1;
(void) fprintf(stderr, gettext("Split was successful, but "
"the datasets could not all be mounted\n"));
(void) fprintf(stderr, gettext("Try doing '%s' with a "
"different altroot\n"), "zpool import");
}
zpool_close(zhp);
nvlist_free(config);
nvlist_free(props);
return (ret);
}
/*
* zpool online <pool> <device> ...
*/
int
zpool_do_online(int argc, char **argv)
{
int c, i;
char *poolname;
zpool_handle_t *zhp;
int ret = 0;
vdev_state_t newstate;
int flags = 0;
/* check options */
while ((c = getopt(argc, argv, "e")) != -1) {
switch (c) {
case 'e':
flags |= ZFS_ONLINE_EXPAND;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing device name\n"));
usage(B_FALSE);
}
poolname = argv[0];
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
for (i = 1; i < argc; i++) {
if (zpool_vdev_online(zhp, argv[i], flags, &newstate) == 0) {
if (newstate != VDEV_STATE_HEALTHY) {
(void) printf(gettext("warning: device '%s' "
"onlined, but remains in faulted state\n"),
argv[i]);
if (newstate == VDEV_STATE_FAULTED)
(void) printf(gettext("use 'zpool "
"clear' to restore a faulted "
"device\n"));
else
(void) printf(gettext("use 'zpool "
"replace' to replace devices "
"that are no longer present\n"));
}
} else {
ret = 1;
}
}
zpool_close(zhp);
return (ret);
}
/*
* zpool offline [-ft] <pool> <device> ...
*
* -f Force the device into a faulted state.
*
* -t Only take the device off-line temporarily. The offline/faulted
* state will not be persistent across reboots.
*/
/* ARGSUSED */
int
zpool_do_offline(int argc, char **argv)
{
int c, i;
char *poolname;
zpool_handle_t *zhp;
int ret = 0;
boolean_t istmp = B_FALSE;
boolean_t fault = B_FALSE;
/* check options */
while ((c = getopt(argc, argv, "ft")) != -1) {
switch (c) {
case 'f':
fault = B_TRUE;
break;
case 't':
istmp = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing device name\n"));
usage(B_FALSE);
}
poolname = argv[0];
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
for (i = 1; i < argc; i++) {
if (fault) {
uint64_t guid = zpool_vdev_path_to_guid(zhp, argv[i]);
vdev_aux_t aux;
if (istmp == B_FALSE) {
/* Force the fault to persist across imports */
aux = VDEV_AUX_EXTERNAL_PERSIST;
} else {
aux = VDEV_AUX_EXTERNAL;
}
if (guid == 0 || zpool_vdev_fault(zhp, guid, aux) != 0)
ret = 1;
} else {
if (zpool_vdev_offline(zhp, argv[i], istmp) != 0)
ret = 1;
}
}
zpool_close(zhp);
return (ret);
}
/*
* zpool clear <pool> [device]
*
* Clear all errors associated with a pool or a particular device.
*/
int
zpool_do_clear(int argc, char **argv)
{
int c;
int ret = 0;
boolean_t dryrun = B_FALSE;
boolean_t do_rewind = B_FALSE;
boolean_t xtreme_rewind = B_FALSE;
uint32_t rewind_policy = ZPOOL_NO_REWIND;
nvlist_t *policy = NULL;
zpool_handle_t *zhp;
char *pool, *device;
/* check options */
while ((c = getopt(argc, argv, "FnX")) != -1) {
switch (c) {
case 'F':
do_rewind = B_TRUE;
break;
case 'n':
dryrun = B_TRUE;
break;
case 'X':
xtreme_rewind = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name\n"));
usage(B_FALSE);
}
if (argc > 2) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if ((dryrun || xtreme_rewind) && !do_rewind) {
(void) fprintf(stderr,
gettext("-n or -X only meaningful with -F\n"));
usage(B_FALSE);
}
if (dryrun)
rewind_policy = ZPOOL_TRY_REWIND;
else if (do_rewind)
rewind_policy = ZPOOL_DO_REWIND;
if (xtreme_rewind)
rewind_policy |= ZPOOL_EXTREME_REWIND;
/* In future, further rewind policy choices can be passed along here */
if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY,
rewind_policy) != 0) {
return (1);
}
pool = argv[0];
device = argc == 2 ? argv[1] : NULL;
if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {
nvlist_free(policy);
return (1);
}
if (zpool_clear(zhp, device, policy) != 0)
ret = 1;
zpool_close(zhp);
nvlist_free(policy);
return (ret);
}
/*
* zpool reguid <pool>
*/
int
zpool_do_reguid(int argc, char **argv)
{
int c;
char *poolname;
zpool_handle_t *zhp;
int ret = 0;
/* check options */
while ((c = getopt(argc, argv, "")) != -1) {
switch (c) {
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
poolname = argv[0];
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
ret = zpool_reguid(zhp);
zpool_close(zhp);
return (ret);
}
/*
* zpool reopen <pool>
*
* Reopen the pool so that the kernel can update the sizes of all vdevs.
*/
int
zpool_do_reopen(int argc, char **argv)
{
int c;
int ret = 0;
boolean_t scrub_restart = B_TRUE;
/* check options */
while ((c = getopt(argc, argv, "n")) != -1) {
switch (c) {
case 'n':
scrub_restart = B_FALSE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* if argc == 0 we will execute zpool_reopen_one on all pools */
ret = for_each_pool(argc, argv, B_TRUE, NULL, B_FALSE, zpool_reopen_one,
&scrub_restart);
return (ret);
}
typedef struct scrub_cbdata {
int cb_type;
pool_scrub_cmd_t cb_scrub_cmd;
} scrub_cbdata_t;
static boolean_t
zpool_has_checkpoint(zpool_handle_t *zhp)
{
nvlist_t *config, *nvroot;
config = zpool_get_config(zhp, NULL);
if (config != NULL) {
pool_checkpoint_stat_t *pcs = NULL;
uint_t c;
nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
if (pcs == NULL || pcs->pcs_state == CS_NONE)
return (B_FALSE);
assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS ||
pcs->pcs_state == CS_CHECKPOINT_DISCARDING);
return (B_TRUE);
}
return (B_FALSE);
}
static int
scrub_callback(zpool_handle_t *zhp, void *data)
{
scrub_cbdata_t *cb = data;
int err;
/*
* Ignore faulted pools.
*/
if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
(void) fprintf(stderr, gettext("cannot scan '%s': pool is "
"currently unavailable\n"), zpool_get_name(zhp));
return (1);
}
err = zpool_scan(zhp, cb->cb_type, cb->cb_scrub_cmd);
if (err == 0 && zpool_has_checkpoint(zhp) &&
cb->cb_type == POOL_SCAN_SCRUB) {
(void) printf(gettext("warning: will not scrub state that "
"belongs to the checkpoint of pool '%s'\n"),
zpool_get_name(zhp));
}
return (err != 0);
}
static int
wait_callback(zpool_handle_t *zhp, void *data)
{
zpool_wait_activity_t *act = data;
return (zpool_wait(zhp, *act));
}
/*
* zpool scrub [-s | -p] [-w] <pool> ...
*
* -s Stop. Stops any in-progress scrub.
* -p Pause. Pause in-progress scrub.
* -w Wait. Blocks until scrub has completed.
*/
int
zpool_do_scrub(int argc, char **argv)
{
int c;
scrub_cbdata_t cb;
boolean_t wait = B_FALSE;
int error;
cb.cb_type = POOL_SCAN_SCRUB;
cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
/* check options */
while ((c = getopt(argc, argv, "spw")) != -1) {
switch (c) {
case 's':
cb.cb_type = POOL_SCAN_NONE;
break;
case 'p':
cb.cb_scrub_cmd = POOL_SCRUB_PAUSE;
break;
case 'w':
wait = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
if (cb.cb_type == POOL_SCAN_NONE &&
cb.cb_scrub_cmd == POOL_SCRUB_PAUSE) {
(void) fprintf(stderr, gettext("invalid option combination: "
"-s and -p are mutually exclusive\n"));
usage(B_FALSE);
}
if (wait && (cb.cb_type == POOL_SCAN_NONE ||
cb.cb_scrub_cmd == POOL_SCRUB_PAUSE)) {
(void) fprintf(stderr, gettext("invalid option combination: "
"-w cannot be used with -p or -s\n"));
usage(B_FALSE);
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
}
error = for_each_pool(argc, argv, B_TRUE, NULL, B_FALSE,
scrub_callback, &cb);
if (wait && !error) {
zpool_wait_activity_t act = ZPOOL_WAIT_SCRUB;
error = for_each_pool(argc, argv, B_TRUE, NULL, B_FALSE,
wait_callback, &act);
}
return (error);
}
/*
* zpool resilver <pool> ...
*
* Restarts any in-progress resilver
*/
int
zpool_do_resilver(int argc, char **argv)
{
int c;
scrub_cbdata_t cb;
cb.cb_type = POOL_SCAN_RESILVER;
cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
/* check options */
while ((c = getopt(argc, argv, "")) != -1) {
switch (c) {
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
}
return (for_each_pool(argc, argv, B_TRUE, NULL, B_FALSE,
scrub_callback, &cb));
}
/*
* zpool trim [-d] [-r <rate>] [-c | -s] <pool> [<device> ...]
*
* -c Cancel. Ends any in-progress trim.
* -d Secure trim. Requires kernel and device support.
* -r <rate> Sets the TRIM rate in bytes (per second). Supports
* adding a multiplier suffix such as 'k' or 'm'.
* -s Suspend. TRIM can then be restarted with no flags.
* -w Wait. Blocks until trimming has completed.
*/
int
zpool_do_trim(int argc, char **argv)
{
struct option long_options[] = {
{"cancel", no_argument, NULL, 'c'},
{"secure", no_argument, NULL, 'd'},
{"rate", required_argument, NULL, 'r'},
{"suspend", no_argument, NULL, 's'},
{"wait", no_argument, NULL, 'w'},
{0, 0, 0, 0}
};
pool_trim_func_t cmd_type = POOL_TRIM_START;
uint64_t rate = 0;
boolean_t secure = B_FALSE;
boolean_t wait = B_FALSE;
int c;
while ((c = getopt_long(argc, argv, "cdr:sw", long_options, NULL))
!= -1) {
switch (c) {
case 'c':
if (cmd_type != POOL_TRIM_START &&
cmd_type != POOL_TRIM_CANCEL) {
(void) fprintf(stderr, gettext("-c cannot be "
"combined with other options\n"));
usage(B_FALSE);
}
cmd_type = POOL_TRIM_CANCEL;
break;
case 'd':
if (cmd_type != POOL_TRIM_START) {
(void) fprintf(stderr, gettext("-d cannot be "
"combined with the -c or -s options\n"));
usage(B_FALSE);
}
secure = B_TRUE;
break;
case 'r':
if (cmd_type != POOL_TRIM_START) {
(void) fprintf(stderr, gettext("-r cannot be "
"combined with the -c or -s options\n"));
usage(B_FALSE);
}
if (zfs_nicestrtonum(NULL, optarg, &rate) == -1) {
(void) fprintf(stderr,
gettext("invalid value for rate\n"));
usage(B_FALSE);
}
break;
case 's':
if (cmd_type != POOL_TRIM_START &&
cmd_type != POOL_TRIM_SUSPEND) {
(void) fprintf(stderr, gettext("-s cannot be "
"combined with other options\n"));
usage(B_FALSE);
}
cmd_type = POOL_TRIM_SUSPEND;
break;
case 'w':
wait = B_TRUE;
break;
case '?':
if (optopt != 0) {
(void) fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
} else {
(void) fprintf(stderr,
gettext("invalid option '%s'\n"),
argv[optind - 1]);
}
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
return (-1);
}
if (wait && (cmd_type != POOL_TRIM_START)) {
(void) fprintf(stderr, gettext("-w cannot be used with -c or "
"-s\n"));
usage(B_FALSE);
}
char *poolname = argv[0];
zpool_handle_t *zhp = zpool_open(g_zfs, poolname);
if (zhp == NULL)
return (-1);
trimflags_t trim_flags = {
.secure = secure,
.rate = rate,
.wait = wait,
};
nvlist_t *vdevs = fnvlist_alloc();
if (argc == 1) {
/* no individual leaf vdevs specified, so add them all */
nvlist_t *config = zpool_get_config(zhp, NULL);
nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
ZPOOL_CONFIG_VDEV_TREE);
zpool_collect_leaves(zhp, nvroot, vdevs);
trim_flags.fullpool = B_TRUE;
} else {
trim_flags.fullpool = B_FALSE;
for (int i = 1; i < argc; i++) {
fnvlist_add_boolean(vdevs, argv[i]);
}
}
int error = zpool_trim(zhp, cmd_type, vdevs, &trim_flags);
fnvlist_free(vdevs);
zpool_close(zhp);
return (error);
}
/*
* Converts a total number of seconds to a human readable string broken
* down in to days/hours/minutes/seconds.
*/
static void
secs_to_dhms(uint64_t total, char *buf)
{
uint64_t days = total / 60 / 60 / 24;
uint64_t hours = (total / 60 / 60) % 24;
uint64_t mins = (total / 60) % 60;
uint64_t secs = (total % 60);
if (days > 0) {
(void) sprintf(buf, "%llu days %02llu:%02llu:%02llu",
(u_longlong_t)days, (u_longlong_t)hours,
(u_longlong_t)mins, (u_longlong_t)secs);
} else {
(void) sprintf(buf, "%02llu:%02llu:%02llu",
(u_longlong_t)hours, (u_longlong_t)mins,
(u_longlong_t)secs);
}
}
/*
* Print out detailed scrub status.
*/
static void
print_scan_scrub_resilver_status(pool_scan_stat_t *ps)
{
time_t start, end, pause;
uint64_t pass_scanned, scanned, pass_issued, issued, total;
uint64_t elapsed, scan_rate, issue_rate;
double fraction_done;
char processed_buf[7], scanned_buf[7], issued_buf[7], total_buf[7];
char srate_buf[7], irate_buf[7], time_buf[32];
printf(" ");
printf_color(ANSI_BOLD, gettext("scan:"));
printf(" ");
/* If there's never been a scan, there's not much to say. */
if (ps == NULL || ps->pss_func == POOL_SCAN_NONE ||
ps->pss_func >= POOL_SCAN_FUNCS) {
(void) printf(gettext("none requested\n"));
return;
}
start = ps->pss_start_time;
end = ps->pss_end_time;
pause = ps->pss_pass_scrub_pause;
zfs_nicebytes(ps->pss_processed, processed_buf, sizeof (processed_buf));
assert(ps->pss_func == POOL_SCAN_SCRUB ||
ps->pss_func == POOL_SCAN_RESILVER);
/* Scan is finished or canceled. */
if (ps->pss_state == DSS_FINISHED) {
secs_to_dhms(end - start, time_buf);
if (ps->pss_func == POOL_SCAN_SCRUB) {
(void) printf(gettext("scrub repaired %s "
"in %s with %llu errors on %s"), processed_buf,
time_buf, (u_longlong_t)ps->pss_errors,
ctime(&end));
} else if (ps->pss_func == POOL_SCAN_RESILVER) {
(void) printf(gettext("resilvered %s "
"in %s with %llu errors on %s"), processed_buf,
time_buf, (u_longlong_t)ps->pss_errors,
ctime(&end));
}
return;
} else if (ps->pss_state == DSS_CANCELED) {
if (ps->pss_func == POOL_SCAN_SCRUB) {
(void) printf(gettext("scrub canceled on %s"),
ctime(&end));
} else if (ps->pss_func == POOL_SCAN_RESILVER) {
(void) printf(gettext("resilver canceled on %s"),
ctime(&end));
}
return;
}
assert(ps->pss_state == DSS_SCANNING);
/* Scan is in progress. Resilvers can't be paused. */
if (ps->pss_func == POOL_SCAN_SCRUB) {
if (pause == 0) {
(void) printf(gettext("scrub in progress since %s"),
ctime(&start));
} else {
(void) printf(gettext("scrub paused since %s"),
ctime(&pause));
(void) printf(gettext("\tscrub started on %s"),
ctime(&start));
}
} else if (ps->pss_func == POOL_SCAN_RESILVER) {
(void) printf(gettext("resilver in progress since %s"),
ctime(&start));
}
scanned = ps->pss_examined;
pass_scanned = ps->pss_pass_exam;
issued = ps->pss_issued;
pass_issued = ps->pss_pass_issued;
total = ps->pss_to_examine;
/* we are only done with a block once we have issued the IO for it */
fraction_done = (double)issued / total;
/* elapsed time for this pass, rounding up to 1 if it's 0 */
elapsed = time(NULL) - ps->pss_pass_start;
elapsed -= ps->pss_pass_scrub_spent_paused;
elapsed = (elapsed != 0) ? elapsed : 1;
scan_rate = pass_scanned / elapsed;
issue_rate = pass_issued / elapsed;
uint64_t total_secs_left = (issue_rate != 0 && total >= issued) ?
((total - issued) / issue_rate) : UINT64_MAX;
secs_to_dhms(total_secs_left, time_buf);
/* format all of the numbers we will be reporting */
zfs_nicebytes(scanned, scanned_buf, sizeof (scanned_buf));
zfs_nicebytes(issued, issued_buf, sizeof (issued_buf));
zfs_nicebytes(total, total_buf, sizeof (total_buf));
zfs_nicebytes(scan_rate, srate_buf, sizeof (srate_buf));
zfs_nicebytes(issue_rate, irate_buf, sizeof (irate_buf));
/* do not print estimated time if we have a paused scrub */
if (pause == 0) {
(void) printf(gettext("\t%s scanned at %s/s, "
"%s issued at %s/s, %s total\n"),
scanned_buf, srate_buf, issued_buf, irate_buf, total_buf);
} else {
(void) printf(gettext("\t%s scanned, %s issued, %s total\n"),
scanned_buf, issued_buf, total_buf);
}
if (ps->pss_func == POOL_SCAN_RESILVER) {
(void) printf(gettext("\t%s resilvered, %.2f%% done"),
processed_buf, 100 * fraction_done);
} else if (ps->pss_func == POOL_SCAN_SCRUB) {
(void) printf(gettext("\t%s repaired, %.2f%% done"),
processed_buf, 100 * fraction_done);
}
if (pause == 0) {
if (total_secs_left != UINT64_MAX &&
issue_rate >= 10 * 1024 * 1024) {
(void) printf(gettext(", %s to go\n"), time_buf);
} else {
(void) printf(gettext(", no estimated "
"completion time\n"));
}
} else {
(void) printf(gettext("\n"));
}
}
static void
print_rebuild_status_impl(vdev_rebuild_stat_t *vrs, char *vdev_name)
{
if (vrs == NULL || vrs->vrs_state == VDEV_REBUILD_NONE)
return;
printf(" ");
printf_color(ANSI_BOLD, gettext("scan:"));
printf(" ");
uint64_t bytes_scanned = vrs->vrs_bytes_scanned;
uint64_t bytes_issued = vrs->vrs_bytes_issued;
uint64_t bytes_rebuilt = vrs->vrs_bytes_rebuilt;
uint64_t bytes_est = vrs->vrs_bytes_est;
uint64_t scan_rate = (vrs->vrs_pass_bytes_scanned /
(vrs->vrs_pass_time_ms + 1)) * 1000;
uint64_t issue_rate = (vrs->vrs_pass_bytes_issued /
(vrs->vrs_pass_time_ms + 1)) * 1000;
double scan_pct = MIN((double)bytes_scanned * 100 /
(bytes_est + 1), 100);
/* Format all of the numbers we will be reporting */
char bytes_scanned_buf[7], bytes_issued_buf[7];
char bytes_rebuilt_buf[7], bytes_est_buf[7];
char scan_rate_buf[7], issue_rate_buf[7], time_buf[32];
zfs_nicebytes(bytes_scanned, bytes_scanned_buf,
sizeof (bytes_scanned_buf));
zfs_nicebytes(bytes_issued, bytes_issued_buf,
sizeof (bytes_issued_buf));
zfs_nicebytes(bytes_rebuilt, bytes_rebuilt_buf,
sizeof (bytes_rebuilt_buf));
zfs_nicebytes(bytes_est, bytes_est_buf, sizeof (bytes_est_buf));
zfs_nicebytes(scan_rate, scan_rate_buf, sizeof (scan_rate_buf));
zfs_nicebytes(issue_rate, issue_rate_buf, sizeof (issue_rate_buf));
time_t start = vrs->vrs_start_time;
time_t end = vrs->vrs_end_time;
/* Rebuild is finished or canceled. */
if (vrs->vrs_state == VDEV_REBUILD_COMPLETE) {
secs_to_dhms(vrs->vrs_scan_time_ms / 1000, time_buf);
(void) printf(gettext("resilvered (%s) %s in %s "
"with %llu errors on %s"), vdev_name, bytes_rebuilt_buf,
time_buf, (u_longlong_t)vrs->vrs_errors, ctime(&end));
return;
} else if (vrs->vrs_state == VDEV_REBUILD_CANCELED) {
(void) printf(gettext("resilver (%s) canceled on %s"),
vdev_name, ctime(&end));
return;
} else if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
(void) printf(gettext("resilver (%s) in progress since %s"),
vdev_name, ctime(&start));
}
assert(vrs->vrs_state == VDEV_REBUILD_ACTIVE);
secs_to_dhms(MAX((int64_t)bytes_est - (int64_t)bytes_scanned, 0) /
MAX(scan_rate, 1), time_buf);
(void) printf(gettext("\t%s scanned at %s/s, %s issued %s/s, "
"%s total\n"), bytes_scanned_buf, scan_rate_buf,
bytes_issued_buf, issue_rate_buf, bytes_est_buf);
(void) printf(gettext("\t%s resilvered, %.2f%% done"),
bytes_rebuilt_buf, scan_pct);
if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
if (scan_rate >= 10 * 1024 * 1024) {
(void) printf(gettext(", %s to go\n"), time_buf);
} else {
(void) printf(gettext(", no estimated "
"completion time\n"));
}
} else {
(void) printf(gettext("\n"));
}
}
/*
* Print rebuild status for top-level vdevs.
*/
static void
print_rebuild_status(zpool_handle_t *zhp, nvlist_t *nvroot)
{
nvlist_t **child;
uint_t children;
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
children = 0;
for (uint_t c = 0; c < children; c++) {
vdev_rebuild_stat_t *vrs;
uint_t i;
if (nvlist_lookup_uint64_array(child[c],
ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) {
char *name = zpool_vdev_name(g_zfs, zhp,
child[c], VDEV_NAME_TYPE_ID);
print_rebuild_status_impl(vrs, name);
free(name);
}
}
}
/*
* As we don't scrub checkpointed blocks, we want to warn the user that we
* skipped scanning some blocks if a checkpoint exists or existed at any
* time during the scan. If a sequential instead of healing reconstruction
* was performed then the blocks were reconstructed. However, their checksums
* have not been verified so we still print the warning.
*/
static void
print_checkpoint_scan_warning(pool_scan_stat_t *ps, pool_checkpoint_stat_t *pcs)
{
if (ps == NULL || pcs == NULL)
return;
if (pcs->pcs_state == CS_NONE ||
pcs->pcs_state == CS_CHECKPOINT_DISCARDING)
return;
assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS);
if (ps->pss_state == DSS_NONE)
return;
if ((ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) &&
ps->pss_end_time < pcs->pcs_start_time)
return;
if (ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) {
(void) printf(gettext(" scan warning: skipped blocks "
"that are only referenced by the checkpoint.\n"));
} else {
assert(ps->pss_state == DSS_SCANNING);
(void) printf(gettext(" scan warning: skipping blocks "
"that are only referenced by the checkpoint.\n"));
}
}
/*
* Returns B_TRUE if there is an active rebuild in progress. Otherwise,
* B_FALSE is returned and 'rebuild_end_time' is set to the end time for
* the last completed (or cancelled) rebuild.
*/
static boolean_t
check_rebuilding(nvlist_t *nvroot, uint64_t *rebuild_end_time)
{
nvlist_t **child;
uint_t children;
boolean_t rebuilding = B_FALSE;
uint64_t end_time = 0;
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
children = 0;
for (uint_t c = 0; c < children; c++) {
vdev_rebuild_stat_t *vrs;
uint_t i;
if (nvlist_lookup_uint64_array(child[c],
ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) {
if (vrs->vrs_end_time > end_time)
end_time = vrs->vrs_end_time;
if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
rebuilding = B_TRUE;
end_time = 0;
break;
}
}
}
if (rebuild_end_time != NULL)
*rebuild_end_time = end_time;
return (rebuilding);
}
/*
* Print the scan status.
*/
static void
print_scan_status(zpool_handle_t *zhp, nvlist_t *nvroot)
{
uint64_t rebuild_end_time = 0, resilver_end_time = 0;
boolean_t have_resilver = B_FALSE, have_scrub = B_FALSE;
boolean_t active_resilver = B_FALSE;
pool_checkpoint_stat_t *pcs = NULL;
pool_scan_stat_t *ps = NULL;
uint_t c;
if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,
(uint64_t **)&ps, &c) == 0) {
if (ps->pss_func == POOL_SCAN_RESILVER) {
resilver_end_time = ps->pss_end_time;
active_resilver = (ps->pss_state == DSS_SCANNING);
}
have_resilver = (ps->pss_func == POOL_SCAN_RESILVER);
have_scrub = (ps->pss_func == POOL_SCAN_SCRUB);
}
boolean_t active_rebuild = check_rebuilding(nvroot, &rebuild_end_time);
boolean_t have_rebuild = (active_rebuild || (rebuild_end_time > 0));
/* Always print the scrub status when available. */
if (have_scrub)
print_scan_scrub_resilver_status(ps);
/*
* When there is an active resilver or rebuild print its status.
* Otherwise print the status of the last resilver or rebuild.
*/
if (active_resilver || (!active_rebuild && have_resilver &&
resilver_end_time && resilver_end_time > rebuild_end_time)) {
print_scan_scrub_resilver_status(ps);
} else if (active_rebuild || (!active_resilver && have_rebuild &&
rebuild_end_time && rebuild_end_time > resilver_end_time)) {
print_rebuild_status(zhp, nvroot);
}
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
print_checkpoint_scan_warning(ps, pcs);
}
/*
* Print out detailed removal status.
*/
static void
print_removal_status(zpool_handle_t *zhp, pool_removal_stat_t *prs)
{
char copied_buf[7], examined_buf[7], total_buf[7], rate_buf[7];
time_t start, end;
nvlist_t *config, *nvroot;
nvlist_t **child;
uint_t children;
char *vdev_name;
if (prs == NULL || prs->prs_state == DSS_NONE)
return;
/*
* Determine name of vdev.
*/
config = zpool_get_config(zhp, NULL);
nvroot = fnvlist_lookup_nvlist(config,
ZPOOL_CONFIG_VDEV_TREE);
verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0);
assert(prs->prs_removing_vdev < children);
vdev_name = zpool_vdev_name(g_zfs, zhp,
child[prs->prs_removing_vdev], B_TRUE);
printf_color(ANSI_BOLD, gettext("remove: "));
start = prs->prs_start_time;
end = prs->prs_end_time;
zfs_nicenum(prs->prs_copied, copied_buf, sizeof (copied_buf));
/*
* Removal is finished or canceled.
*/
if (prs->prs_state == DSS_FINISHED) {
uint64_t minutes_taken = (end - start) / 60;
(void) printf(gettext("Removal of vdev %llu copied %s "
"in %lluh%um, completed on %s"),
(longlong_t)prs->prs_removing_vdev,
copied_buf,
(u_longlong_t)(minutes_taken / 60),
(uint_t)(minutes_taken % 60),
ctime((time_t *)&end));
} else if (prs->prs_state == DSS_CANCELED) {
(void) printf(gettext("Removal of %s canceled on %s"),
vdev_name, ctime(&end));
} else {
uint64_t copied, total, elapsed, mins_left, hours_left;
double fraction_done;
uint_t rate;
assert(prs->prs_state == DSS_SCANNING);
/*
* Removal is in progress.
*/
(void) printf(gettext(
"Evacuation of %s in progress since %s"),
vdev_name, ctime(&start));
copied = prs->prs_copied > 0 ? prs->prs_copied : 1;
total = prs->prs_to_copy;
fraction_done = (double)copied / total;
/* elapsed time for this pass */
elapsed = time(NULL) - prs->prs_start_time;
elapsed = elapsed > 0 ? elapsed : 1;
rate = copied / elapsed;
rate = rate > 0 ? rate : 1;
mins_left = ((total - copied) / rate) / 60;
hours_left = mins_left / 60;
zfs_nicenum(copied, examined_buf, sizeof (examined_buf));
zfs_nicenum(total, total_buf, sizeof (total_buf));
zfs_nicenum(rate, rate_buf, sizeof (rate_buf));
/*
* do not print estimated time if hours_left is more than
* 30 days
*/
(void) printf(gettext(
"\t%s copied out of %s at %s/s, %.2f%% done"),
examined_buf, total_buf, rate_buf, 100 * fraction_done);
if (hours_left < (30 * 24)) {
(void) printf(gettext(", %lluh%um to go\n"),
(u_longlong_t)hours_left, (uint_t)(mins_left % 60));
} else {
(void) printf(gettext(
", (copy is slow, no estimated time)\n"));
}
}
free(vdev_name);
if (prs->prs_mapping_memory > 0) {
char mem_buf[7];
zfs_nicenum(prs->prs_mapping_memory, mem_buf, sizeof (mem_buf));
(void) printf(gettext(
"\t%s memory used for removed device mappings\n"),
mem_buf);
}
}
static void
print_checkpoint_status(pool_checkpoint_stat_t *pcs)
{
time_t start;
char space_buf[7];
if (pcs == NULL || pcs->pcs_state == CS_NONE)
return;
(void) printf(gettext("checkpoint: "));
start = pcs->pcs_start_time;
zfs_nicenum(pcs->pcs_space, space_buf, sizeof (space_buf));
if (pcs->pcs_state == CS_CHECKPOINT_EXISTS) {
char *date = ctime(&start);
/*
* ctime() adds a newline at the end of the generated
* string, thus the weird format specifier and the
* strlen() call used to chop it off from the output.
*/
(void) printf(gettext("created %.*s, consumes %s\n"),
(int)(strlen(date) - 1), date, space_buf);
return;
}
assert(pcs->pcs_state == CS_CHECKPOINT_DISCARDING);
(void) printf(gettext("discarding, %s remaining.\n"),
space_buf);
}
static void
print_error_log(zpool_handle_t *zhp)
{
nvlist_t *nverrlist = NULL;
nvpair_t *elem;
char *pathname;
size_t len = MAXPATHLEN * 2;
if (zpool_get_errlog(zhp, &nverrlist) != 0)
return;
(void) printf("errors: Permanent errors have been "
"detected in the following files:\n\n");
pathname = safe_malloc(len);
elem = NULL;
while ((elem = nvlist_next_nvpair(nverrlist, elem)) != NULL) {
nvlist_t *nv;
uint64_t dsobj, obj;
verify(nvpair_value_nvlist(elem, &nv) == 0);
verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_DATASET,
&dsobj) == 0);
verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_OBJECT,
&obj) == 0);
zpool_obj_to_path(zhp, dsobj, obj, pathname, len);
(void) printf("%7s %s\n", "", pathname);
}
free(pathname);
nvlist_free(nverrlist);
}
static void
print_spares(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **spares,
uint_t nspares)
{
uint_t i;
char *name;
if (nspares == 0)
return;
(void) printf(gettext("\tspares\n"));
for (i = 0; i < nspares; i++) {
name = zpool_vdev_name(g_zfs, zhp, spares[i],
cb->cb_name_flags);
print_status_config(zhp, cb, name, spares[i], 2, B_TRUE, NULL);
free(name);
}
}
static void
print_l2cache(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **l2cache,
uint_t nl2cache)
{
uint_t i;
char *name;
if (nl2cache == 0)
return;
(void) printf(gettext("\tcache\n"));
for (i = 0; i < nl2cache; i++) {
name = zpool_vdev_name(g_zfs, zhp, l2cache[i],
cb->cb_name_flags);
print_status_config(zhp, cb, name, l2cache[i], 2,
B_FALSE, NULL);
free(name);
}
}
static void
print_dedup_stats(nvlist_t *config)
{
ddt_histogram_t *ddh;
ddt_stat_t *dds;
ddt_object_t *ddo;
uint_t c;
char dspace[6], mspace[6];
/*
* If the pool was faulted then we may not have been able to
* obtain the config. Otherwise, if we have anything in the dedup
* table continue processing the stats.
*/
if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_OBJ_STATS,
(uint64_t **)&ddo, &c) != 0)
return;
(void) printf("\n");
(void) printf(gettext(" dedup: "));
if (ddo->ddo_count == 0) {
(void) printf(gettext("no DDT entries\n"));
return;
}
zfs_nicebytes(ddo->ddo_dspace, dspace, sizeof (dspace));
zfs_nicebytes(ddo->ddo_mspace, mspace, sizeof (mspace));
(void) printf("DDT entries %llu, size %s on disk, %s in core\n",
(u_longlong_t)ddo->ddo_count,
dspace,
mspace);
verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS,
(uint64_t **)&dds, &c) == 0);
verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_HISTOGRAM,
(uint64_t **)&ddh, &c) == 0);
zpool_dump_ddt(dds, ddh);
}
/*
* Display a summary of pool status. Displays a summary such as:
*
* pool: tank
* status: DEGRADED
* reason: One or more devices ...
* see: https://openzfs.github.io/openzfs-docs/msg/ZFS-xxxx-01
* config:
* mirror DEGRADED
* c1t0d0 OK
* c2t0d0 UNAVAIL
*
* When given the '-v' option, we print out the complete config. If the '-e'
* option is specified, then we print out error rate information as well.
*/
static int
status_callback(zpool_handle_t *zhp, void *data)
{
status_cbdata_t *cbp = data;
nvlist_t *config, *nvroot;
char *msgid;
zpool_status_t reason;
zpool_errata_t errata;
const char *health;
uint_t c;
vdev_stat_t *vs;
config = zpool_get_config(zhp, NULL);
reason = zpool_get_status(zhp, &msgid, &errata);
cbp->cb_count++;
/*
* If we were given 'zpool status -x', only report those pools with
* problems.
*/
if (cbp->cb_explain &&
(reason == ZPOOL_STATUS_OK ||
reason == ZPOOL_STATUS_VERSION_OLDER ||
reason == ZPOOL_STATUS_FEAT_DISABLED ||
reason == ZPOOL_STATUS_COMPATIBILITY_ERR ||
reason == ZPOOL_STATUS_INCOMPATIBLE_FEAT)) {
if (!cbp->cb_allpools) {
(void) printf(gettext("pool '%s' is healthy\n"),
zpool_get_name(zhp));
if (cbp->cb_first)
cbp->cb_first = B_FALSE;
}
return (0);
}
if (cbp->cb_first)
cbp->cb_first = B_FALSE;
else
(void) printf("\n");
nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) == 0);
health = zpool_get_state_str(zhp);
printf(" ");
printf_color(ANSI_BOLD, gettext("pool:"));
printf(" %s\n", zpool_get_name(zhp));
printf(" ");
printf_color(ANSI_BOLD, gettext("state: "));
printf_color(health_str_to_color(health), "%s", health);
printf("\n");
switch (reason) {
case ZPOOL_STATUS_MISSING_DEV_R:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices could "
"not be opened. Sufficient replicas exist for\n\tthe pool "
"to continue functioning in a degraded state.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Attach the missing device "
"and online it using 'zpool online'.\n"));
break;
case ZPOOL_STATUS_MISSING_DEV_NR:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices could "
"not be opened. There are insufficient\n\treplicas for the"
" pool to continue functioning.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Attach the missing device "
"and online it using 'zpool online'.\n"));
break;
case ZPOOL_STATUS_CORRUPT_LABEL_R:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices could "
"not be used because the label is missing or\n\tinvalid. "
"Sufficient replicas exist for the pool to continue\n\t"
"functioning in a degraded state.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Replace the device using "
"'zpool replace'.\n"));
break;
case ZPOOL_STATUS_CORRUPT_LABEL_NR:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices could "
"not be used because the label is missing \n\tor invalid. "
"There are insufficient replicas for the pool to "
"continue\n\tfunctioning.\n"));
zpool_explain_recover(zpool_get_handle(zhp),
zpool_get_name(zhp), reason, config);
break;
case ZPOOL_STATUS_FAILING_DEV:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices has "
"experienced an unrecoverable error. An\n\tattempt was "
"made to correct the error. Applications are "
"unaffected.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Determine if the "
"device needs to be replaced, and clear the errors\n\tusing"
" 'zpool clear' or replace the device with 'zpool "
"replace'.\n"));
break;
case ZPOOL_STATUS_OFFLINE_DEV:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices has "
"been taken offline by the administrator.\n\tSufficient "
"replicas exist for the pool to continue functioning in "
"a\n\tdegraded state.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Online the device "
"using 'zpool online' or replace the device with\n\t'zpool "
"replace'.\n"));
break;
case ZPOOL_STATUS_REMOVED_DEV:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices has "
"been removed by the administrator.\n\tSufficient "
"replicas exist for the pool to continue functioning in "
"a\n\tdegraded state.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Online the device "
"using zpool online' or replace the device with\n\t'zpool "
"replace'.\n"));
break;
case ZPOOL_STATUS_RESILVERING:
case ZPOOL_STATUS_REBUILDING:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices is "
"currently being resilvered. The pool will\n\tcontinue "
"to function, possibly in a degraded state.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Wait for the resilver to "
"complete.\n"));
break;
case ZPOOL_STATUS_REBUILD_SCRUB:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices have "
"been sequentially resilvered, scrubbing\n\tthe pool "
"is recommended.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Use 'zpool scrub' to "
"verify all data checksums.\n"));
break;
case ZPOOL_STATUS_CORRUPT_DATA:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices has "
"experienced an error resulting in data\n\tcorruption. "
"Applications may be affected.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Restore the file in question"
" if possible. Otherwise restore the\n\tentire pool from "
"backup.\n"));
break;
case ZPOOL_STATUS_CORRUPT_POOL:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool metadata is "
"corrupted and the pool cannot be opened.\n"));
zpool_explain_recover(zpool_get_handle(zhp),
zpool_get_name(zhp), reason, config);
break;
case ZPOOL_STATUS_VERSION_OLDER:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
"a legacy on-disk format. The pool can\n\tstill be used, "
"but some features are unavailable.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Upgrade the pool using "
"'zpool upgrade'. Once this is done, the\n\tpool will no "
"longer be accessible on software that does not support\n\t"
"feature flags.\n"));
break;
case ZPOOL_STATUS_VERSION_NEWER:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool has been upgraded "
"to a newer, incompatible on-disk version.\n\tThe pool "
"cannot be accessed on this system.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Access the pool from a "
"system running more recent software, or\n\trestore the "
"pool from backup.\n"));
break;
case ZPOOL_STATUS_FEAT_DISABLED:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("Some supported and "
"requested features are not enabled on the pool.\n\t"
"The pool can still be used, but some features are "
"unavailable.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Enable all features using "
"'zpool upgrade'. Once this is done,\n\tthe pool may no "
"longer be accessible by software that does not support\n\t"
"the features. See zpool-features(7) for details.\n"));
break;
case ZPOOL_STATUS_COMPATIBILITY_ERR:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("This pool has a "
"compatibility list specified, but it could not be\n\t"
"read/parsed at this time. The pool can still be used, "
"but this\n\tshould be investigated.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Check the value of the "
"'compatibility' property against the\n\t"
"appropriate file in " ZPOOL_SYSCONF_COMPAT_D " or "
ZPOOL_DATA_COMPAT_D ".\n"));
break;
case ZPOOL_STATUS_INCOMPATIBLE_FEAT:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more features "
"are enabled on the pool despite not being\n\t"
"requested by the 'compatibility' property.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Consider setting "
"'compatibility' to an appropriate value, or\n\t"
"adding needed features to the relevant file in\n\t"
ZPOOL_SYSCONF_COMPAT_D " or " ZPOOL_DATA_COMPAT_D ".\n"));
break;
case ZPOOL_STATUS_UNSUP_FEAT_READ:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool cannot be accessed "
"on this system because it uses the\n\tfollowing feature(s)"
" not supported on this system:\n"));
zpool_print_unsup_feat(config);
(void) printf("\n");
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Access the pool from a "
"system that supports the required feature(s),\n\tor "
"restore the pool from backup.\n"));
break;
case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool can only be "
"accessed in read-only mode on this system. It\n\tcannot be"
" accessed in read-write mode because it uses the "
"following\n\tfeature(s) not supported on this system:\n"));
zpool_print_unsup_feat(config);
(void) printf("\n");
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("The pool cannot be accessed "
"in read-write mode. Import the pool with\n"
"\t\"-o readonly=on\", access the pool from a system that "
"supports the\n\trequired feature(s), or restore the "
"pool from backup.\n"));
break;
case ZPOOL_STATUS_FAULTED_DEV_R:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices are "
"faulted in response to persistent errors.\n\tSufficient "
"replicas exist for the pool to continue functioning "
"in a\n\tdegraded state.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Replace the faulted device, "
"or use 'zpool clear' to mark the device\n\trepaired.\n"));
break;
case ZPOOL_STATUS_FAULTED_DEV_NR:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices are "
"faulted in response to persistent errors. There are "
"insufficient replicas for the pool to\n\tcontinue "
"functioning.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Destroy and re-create the "
"pool from a backup source. Manually marking the device\n"
"\trepaired using 'zpool clear' may allow some data "
"to be recovered.\n"));
break;
case ZPOOL_STATUS_IO_FAILURE_MMP:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool is suspended "
"because multihost writes failed or were delayed;\n\t"
"another system could import the pool undetected.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Make sure the pool's devices"
" are connected, then reboot your system and\n\timport the "
"pool.\n"));
break;
case ZPOOL_STATUS_IO_FAILURE_WAIT:
case ZPOOL_STATUS_IO_FAILURE_CONTINUE:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices are "
"faulted in response to IO failures.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Make sure the affected "
"devices are connected, then run 'zpool clear'.\n"));
break;
case ZPOOL_STATUS_BAD_LOG:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("An intent log record "
"could not be read.\n"
"\tWaiting for administrator intervention to fix the "
"faulted pool.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Either restore the affected "
"device(s) and run 'zpool online',\n"
"\tor ignore the intent log records by running "
"'zpool clear'.\n"));
break;
case ZPOOL_STATUS_NON_NATIVE_ASHIFT:
(void) printf(gettext("status: One or more devices are "
"configured to use a non-native block size.\n"
"\tExpect reduced performance.\n"));
(void) printf(gettext("action: Replace affected devices with "
"devices that support the\n\tconfigured block size, or "
"migrate data to a properly configured\n\tpool.\n"));
break;
case ZPOOL_STATUS_HOSTID_MISMATCH:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("Mismatch between pool hostid"
" and system hostid on imported pool.\n\tThis pool was "
"previously imported into a system with a different "
"hostid,\n\tand then was verbatim imported into this "
"system.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Export this pool on all "
"systems on which it is imported.\n"
"\tThen import it to correct the mismatch.\n"));
break;
case ZPOOL_STATUS_ERRATA:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("Errata #%d detected.\n"),
errata);
switch (errata) {
case ZPOOL_ERRATA_NONE:
break;
case ZPOOL_ERRATA_ZOL_2094_SCRUB:
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("To correct the issue"
" run 'zpool scrub'.\n"));
break;
case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:
(void) printf(gettext("\tExisting encrypted datasets "
"contain an on-disk incompatibility\n\twhich "
"needs to be corrected.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("To correct the issue"
" backup existing encrypted datasets to new\n\t"
"encrypted datasets and destroy the old ones. "
"'zfs mount -o ro' can\n\tbe used to temporarily "
"mount existing encrypted datasets readonly.\n"));
break;
case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION:
(void) printf(gettext("\tExisting encrypted snapshots "
"and bookmarks contain an on-disk\n\tincompat"
"ibility. This may cause on-disk corruption if "
"they are used\n\twith 'zfs recv'.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("To correct the"
"issue, enable the bookmark_v2 feature. No "
"additional\n\taction is needed if there are no "
"encrypted snapshots or bookmarks.\n\tIf preserving"
"the encrypted snapshots and bookmarks is required,"
" use\n\ta non-raw send to backup and restore them."
" Alternately, they may be\n\tremoved to resolve "
"the incompatibility.\n"));
break;
default:
/*
* All errata which allow the pool to be imported
* must contain an action message.
*/
assert(0);
}
break;
default:
/*
* The remaining errors can't actually be generated, yet.
*/
assert(reason == ZPOOL_STATUS_OK);
}
if (msgid != NULL) {
printf(" ");
printf_color(ANSI_BOLD, gettext("see:"));
printf(gettext(
" https://openzfs.github.io/openzfs-docs/msg/%s\n"),
msgid);
}
if (config != NULL) {
uint64_t nerr;
nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache;
pool_checkpoint_stat_t *pcs = NULL;
pool_removal_stat_t *prs = NULL;
print_scan_status(zhp, nvroot);
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);
print_removal_status(zhp, prs);
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
print_checkpoint_status(pcs);
cbp->cb_namewidth = max_width(zhp, nvroot, 0, 0,
cbp->cb_name_flags | VDEV_NAME_TYPE_ID);
if (cbp->cb_namewidth < 10)
cbp->cb_namewidth = 10;
color_start(ANSI_BOLD);
(void) printf(gettext("config:\n\n"));
(void) printf(gettext("\t%-*s %-8s %5s %5s %5s"),
cbp->cb_namewidth, "NAME", "STATE", "READ", "WRITE",
"CKSUM");
color_end();
if (cbp->cb_print_slow_ios) {
printf_color(ANSI_BOLD, " %5s", gettext("SLOW"));
}
if (cbp->vcdl != NULL)
print_cmd_columns(cbp->vcdl, 0);
printf("\n");
print_status_config(zhp, cbp, zpool_get_name(zhp), nvroot, 0,
B_FALSE, NULL);
print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_DEDUP);
print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_SPECIAL);
print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_CLASS_LOGS);
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2cache, &nl2cache) == 0)
print_l2cache(zhp, cbp, l2cache, nl2cache);
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) == 0)
print_spares(zhp, cbp, spares, nspares);
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
&nerr) == 0) {
nvlist_t *nverrlist = NULL;
/*
* If the approximate error count is small, get a
* precise count by fetching the entire log and
* uniquifying the results.
*/
if (nerr > 0 && nerr < 100 && !cbp->cb_verbose &&
zpool_get_errlog(zhp, &nverrlist) == 0) {
nvpair_t *elem;
elem = NULL;
nerr = 0;
while ((elem = nvlist_next_nvpair(nverrlist,
elem)) != NULL) {
nerr++;
}
}
nvlist_free(nverrlist);
(void) printf("\n");
if (nerr == 0)
(void) printf(gettext("errors: No known data "
"errors\n"));
else if (!cbp->cb_verbose)
(void) printf(gettext("errors: %llu data "
"errors, use '-v' for a list\n"),
(u_longlong_t)nerr);
else
print_error_log(zhp);
}
if (cbp->cb_dedup_stats)
print_dedup_stats(config);
} else {
(void) printf(gettext("config: The configuration cannot be "
"determined.\n"));
}
return (0);
}
/*
* zpool status [-c [script1,script2,...]] [-igLpPstvx] [-T d|u] [pool] ...
* [interval [count]]
*
* -c CMD For each vdev, run command CMD
* -i Display vdev initialization status.
* -g Display guid for individual vdev name.
* -L Follow links when resolving vdev path name.
* -p Display values in parsable (exact) format.
* -P Display full path for vdev name.
* -s Display slow IOs column.
* -v Display complete error logs
* -x Display only pools with potential problems
* -D Display dedup status (undocumented)
* -t Display vdev TRIM status.
* -T Display a timestamp in date(1) or Unix format
*
* Describes the health status of all pools or some subset.
*/
int
zpool_do_status(int argc, char **argv)
{
int c;
int ret;
float interval = 0;
unsigned long count = 0;
status_cbdata_t cb = { 0 };
char *cmd = NULL;
/* check options */
while ((c = getopt(argc, argv, "c:igLpPsvxDtT:")) != -1) {
switch (c) {
case 'c':
if (cmd != NULL) {
fprintf(stderr,
gettext("Can't set -c flag twice\n"));
exit(1);
}
if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&
!libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {
fprintf(stderr, gettext(
"Can't run -c, disabled by "
"ZPOOL_SCRIPTS_ENABLED.\n"));
exit(1);
}
if ((getuid() <= 0 || geteuid() <= 0) &&
!libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {
fprintf(stderr, gettext(
"Can't run -c with root privileges "
"unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));
exit(1);
}
cmd = optarg;
break;
case 'i':
cb.cb_print_vdev_init = B_TRUE;
break;
case 'g':
cb.cb_name_flags |= VDEV_NAME_GUID;
break;
case 'L':
cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
break;
case 'p':
cb.cb_literal = B_TRUE;
break;
case 'P':
cb.cb_name_flags |= VDEV_NAME_PATH;
break;
case 's':
cb.cb_print_slow_ios = B_TRUE;
break;
case 'v':
cb.cb_verbose = B_TRUE;
break;
case 'x':
cb.cb_explain = B_TRUE;
break;
case 'D':
cb.cb_dedup_stats = B_TRUE;
break;
case 't':
cb.cb_print_vdev_trim = B_TRUE;
break;
case 'T':
get_timestamp_arg(*optarg);
break;
case '?':
if (optopt == 'c') {
print_zpool_script_list("status");
exit(0);
} else {
fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
}
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
get_interval_count(&argc, argv, &interval, &count);
if (argc == 0)
cb.cb_allpools = B_TRUE;
cb.cb_first = B_TRUE;
cb.cb_print_status = B_TRUE;
for (;;) {
if (timestamp_fmt != NODATE)
print_timestamp(timestamp_fmt);
if (cmd != NULL)
cb.vcdl = all_pools_for_each_vdev_run(argc, argv, cmd,
NULL, NULL, 0, 0);
ret = for_each_pool(argc, argv, B_TRUE, NULL, cb.cb_literal,
status_callback, &cb);
if (cb.vcdl != NULL)
free_vdev_cmd_data_list(cb.vcdl);
if (argc == 0 && cb.cb_count == 0)
(void) fprintf(stderr, gettext("no pools available\n"));
else if (cb.cb_explain && cb.cb_first && cb.cb_allpools)
(void) printf(gettext("all pools are healthy\n"));
if (ret != 0)
return (ret);
if (interval == 0)
break;
if (count != 0 && --count == 0)
break;
(void) fsleep(interval);
}
return (0);
}
typedef struct upgrade_cbdata {
int cb_first;
int cb_argc;
uint64_t cb_version;
char **cb_argv;
} upgrade_cbdata_t;
static int
check_unsupp_fs(zfs_handle_t *zhp, void *unsupp_fs)
{
int zfs_version = (int)zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
int *count = (int *)unsupp_fs;
if (zfs_version > ZPL_VERSION) {
(void) printf(gettext("%s (v%d) is not supported by this "
"implementation of ZFS.\n"),
zfs_get_name(zhp), zfs_version);
(*count)++;
}
zfs_iter_filesystems(zhp, check_unsupp_fs, unsupp_fs);
zfs_close(zhp);
return (0);
}
static int
upgrade_version(zpool_handle_t *zhp, uint64_t version)
{
int ret;
nvlist_t *config;
uint64_t oldversion;
int unsupp_fs = 0;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
&oldversion) == 0);
char compat[ZFS_MAXPROPLEN];
if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat,
ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
compat[0] = '\0';
assert(SPA_VERSION_IS_SUPPORTED(oldversion));
assert(oldversion < version);
ret = zfs_iter_root(zpool_get_handle(zhp), check_unsupp_fs, &unsupp_fs);
if (ret != 0)
return (ret);
if (unsupp_fs) {
(void) fprintf(stderr, gettext("Upgrade not performed due "
"to %d unsupported filesystems (max v%d).\n"),
unsupp_fs, (int)ZPL_VERSION);
return (1);
}
if (strcmp(compat, ZPOOL_COMPAT_LEGACY) == 0) {
(void) fprintf(stderr, gettext("Upgrade not performed because "
"'compatibility' property set to '"
ZPOOL_COMPAT_LEGACY "'.\n"));
return (1);
}
ret = zpool_upgrade(zhp, version);
if (ret != 0)
return (ret);
if (version >= SPA_VERSION_FEATURES) {
(void) printf(gettext("Successfully upgraded "
"'%s' from version %llu to feature flags.\n"),
zpool_get_name(zhp), (u_longlong_t)oldversion);
} else {
(void) printf(gettext("Successfully upgraded "
"'%s' from version %llu to version %llu.\n"),
zpool_get_name(zhp), (u_longlong_t)oldversion,
(u_longlong_t)version);
}
return (0);
}
static int
upgrade_enable_all(zpool_handle_t *zhp, int *countp)
{
int i, ret, count;
boolean_t firstff = B_TRUE;
nvlist_t *enabled = zpool_get_features(zhp);
char compat[ZFS_MAXPROPLEN];
if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat,
ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
compat[0] = '\0';
boolean_t requested_features[SPA_FEATURES];
if (zpool_do_load_compat(compat, requested_features) !=
ZPOOL_COMPATIBILITY_OK)
return (-1);
count = 0;
for (i = 0; i < SPA_FEATURES; i++) {
const char *fname = spa_feature_table[i].fi_uname;
const char *fguid = spa_feature_table[i].fi_guid;
if (!spa_feature_table[i].fi_zfs_mod_supported)
continue;
if (!nvlist_exists(enabled, fguid) && requested_features[i]) {
char *propname;
verify(-1 != asprintf(&propname, "feature@%s", fname));
ret = zpool_set_prop(zhp, propname,
ZFS_FEATURE_ENABLED);
if (ret != 0) {
free(propname);
return (ret);
}
count++;
if (firstff) {
(void) printf(gettext("Enabled the "
"following features on '%s':\n"),
zpool_get_name(zhp));
firstff = B_FALSE;
}
(void) printf(gettext(" %s\n"), fname);
free(propname);
}
}
if (countp != NULL)
*countp = count;
return (0);
}
static int
upgrade_cb(zpool_handle_t *zhp, void *arg)
{
upgrade_cbdata_t *cbp = arg;
nvlist_t *config;
uint64_t version;
boolean_t modified_pool = B_FALSE;
int ret;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
&version) == 0);
assert(SPA_VERSION_IS_SUPPORTED(version));
if (version < cbp->cb_version) {
cbp->cb_first = B_FALSE;
ret = upgrade_version(zhp, cbp->cb_version);
if (ret != 0)
return (ret);
modified_pool = B_TRUE;
/*
* If they did "zpool upgrade -a", then we could
* be doing ioctls to different pools. We need
* to log this history once to each pool, and bypass
* the normal history logging that happens in main().
*/
(void) zpool_log_history(g_zfs, history_str);
log_history = B_FALSE;
}
if (cbp->cb_version >= SPA_VERSION_FEATURES) {
int count;
ret = upgrade_enable_all(zhp, &count);
if (ret != 0)
return (ret);
if (count > 0) {
cbp->cb_first = B_FALSE;
modified_pool = B_TRUE;
}
}
if (modified_pool) {
(void) printf("\n");
(void) after_zpool_upgrade(zhp);
}
return (0);
}
static int
upgrade_list_older_cb(zpool_handle_t *zhp, void *arg)
{
upgrade_cbdata_t *cbp = arg;
nvlist_t *config;
uint64_t version;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
&version) == 0);
assert(SPA_VERSION_IS_SUPPORTED(version));
if (version < SPA_VERSION_FEATURES) {
if (cbp->cb_first) {
(void) printf(gettext("The following pools are "
"formatted with legacy version numbers and can\n"
"be upgraded to use feature flags. After "
"being upgraded, these pools\nwill no "
"longer be accessible by software that does not "
"support feature\nflags.\n\n"
"Note that setting a pool's 'compatibility' "
"feature to '" ZPOOL_COMPAT_LEGACY "' will\n"
"inhibit upgrades.\n\n"));
(void) printf(gettext("VER POOL\n"));
(void) printf(gettext("--- ------------\n"));
cbp->cb_first = B_FALSE;
}
(void) printf("%2llu %s\n", (u_longlong_t)version,
zpool_get_name(zhp));
}
return (0);
}
static int
upgrade_list_disabled_cb(zpool_handle_t *zhp, void *arg)
{
upgrade_cbdata_t *cbp = arg;
nvlist_t *config;
uint64_t version;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
&version) == 0);
if (version >= SPA_VERSION_FEATURES) {
int i;
boolean_t poolfirst = B_TRUE;
nvlist_t *enabled = zpool_get_features(zhp);
for (i = 0; i < SPA_FEATURES; i++) {
const char *fguid = spa_feature_table[i].fi_guid;
const char *fname = spa_feature_table[i].fi_uname;
if (!spa_feature_table[i].fi_zfs_mod_supported)
continue;
if (!nvlist_exists(enabled, fguid)) {
if (cbp->cb_first) {
(void) printf(gettext("\nSome "
"supported features are not "
"enabled on the following pools. "
"Once a\nfeature is enabled the "
"pool may become incompatible with "
"software\nthat does not support "
"the feature. See "
"zpool-features(7) for "
"details.\n\n"
"Note that the pool "
"'compatibility' feature can be "
"used to inhibit\nfeature "
"upgrades.\n\n"));
(void) printf(gettext("POOL "
"FEATURE\n"));
(void) printf(gettext("------"
"---------\n"));
cbp->cb_first = B_FALSE;
}
if (poolfirst) {
(void) printf(gettext("%s\n"),
zpool_get_name(zhp));
poolfirst = B_FALSE;
}
(void) printf(gettext(" %s\n"), fname);
}
/*
* If they did "zpool upgrade -a", then we could
* be doing ioctls to different pools. We need
* to log this history once to each pool, and bypass
* the normal history logging that happens in main().
*/
(void) zpool_log_history(g_zfs, history_str);
log_history = B_FALSE;
}
}
return (0);
}
/* ARGSUSED */
static int
upgrade_one(zpool_handle_t *zhp, void *data)
{
boolean_t modified_pool = B_FALSE;
upgrade_cbdata_t *cbp = data;
uint64_t cur_version;
int ret;
if (strcmp("log", zpool_get_name(zhp)) == 0) {
(void) fprintf(stderr, gettext("'log' is now a reserved word\n"
"Pool 'log' must be renamed using export and import"
" to upgrade.\n"));
return (1);
}
cur_version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
if (cur_version > cbp->cb_version) {
(void) printf(gettext("Pool '%s' is already formatted "
"using more current version '%llu'.\n\n"),
zpool_get_name(zhp), (u_longlong_t)cur_version);
return (0);
}
if (cbp->cb_version != SPA_VERSION && cur_version == cbp->cb_version) {
(void) printf(gettext("Pool '%s' is already formatted "
"using version %llu.\n\n"), zpool_get_name(zhp),
(u_longlong_t)cbp->cb_version);
return (0);
}
if (cur_version != cbp->cb_version) {
modified_pool = B_TRUE;
ret = upgrade_version(zhp, cbp->cb_version);
if (ret != 0)
return (ret);
}
if (cbp->cb_version >= SPA_VERSION_FEATURES) {
int count = 0;
ret = upgrade_enable_all(zhp, &count);
if (ret != 0)
return (ret);
if (count != 0) {
modified_pool = B_TRUE;
} else if (cur_version == SPA_VERSION) {
(void) printf(gettext("Pool '%s' already has all "
"supported and requested features enabled.\n"),
zpool_get_name(zhp));
}
}
if (modified_pool) {
(void) printf("\n");
(void) after_zpool_upgrade(zhp);
}
return (0);
}
/*
* zpool upgrade
* zpool upgrade -v
* zpool upgrade [-V version] <-a | pool ...>
*
* With no arguments, display downrev'd ZFS pool available for upgrade.
* Individual pools can be upgraded by specifying the pool, and '-a' will
* upgrade all pools.
*/
int
zpool_do_upgrade(int argc, char **argv)
{
int c;
upgrade_cbdata_t cb = { 0 };
int ret = 0;
boolean_t showversions = B_FALSE;
boolean_t upgradeall = B_FALSE;
char *end;
/* check options */
while ((c = getopt(argc, argv, ":avV:")) != -1) {
switch (c) {
case 'a':
upgradeall = B_TRUE;
break;
case 'v':
showversions = B_TRUE;
break;
case 'V':
cb.cb_version = strtoll(optarg, &end, 10);
if (*end != '\0' ||
!SPA_VERSION_IS_SUPPORTED(cb.cb_version)) {
(void) fprintf(stderr,
gettext("invalid version '%s'\n"), optarg);
usage(B_FALSE);
}
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
cb.cb_argc = argc;
cb.cb_argv = argv;
argc -= optind;
argv += optind;
if (cb.cb_version == 0) {
cb.cb_version = SPA_VERSION;
} else if (!upgradeall && argc == 0) {
(void) fprintf(stderr, gettext("-V option is "
"incompatible with other arguments\n"));
usage(B_FALSE);
}
if (showversions) {
if (upgradeall || argc != 0) {
(void) fprintf(stderr, gettext("-v option is "
"incompatible with other arguments\n"));
usage(B_FALSE);
}
} else if (upgradeall) {
if (argc != 0) {
(void) fprintf(stderr, gettext("-a option should not "
"be used along with a pool name\n"));
usage(B_FALSE);
}
}
(void) printf(gettext("This system supports ZFS pool feature "
"flags.\n\n"));
if (showversions) {
int i;
(void) printf(gettext("The following features are "
"supported:\n\n"));
(void) printf(gettext("FEAT DESCRIPTION\n"));
(void) printf("----------------------------------------------"
"---------------\n");
for (i = 0; i < SPA_FEATURES; i++) {
zfeature_info_t *fi = &spa_feature_table[i];
if (!fi->fi_zfs_mod_supported)
continue;
const char *ro =
(fi->fi_flags & ZFEATURE_FLAG_READONLY_COMPAT) ?
" (read-only compatible)" : "";
(void) printf("%-37s%s\n", fi->fi_uname, ro);
(void) printf(" %s\n", fi->fi_desc);
}
(void) printf("\n");
(void) printf(gettext("The following legacy versions are also "
"supported:\n\n"));
(void) printf(gettext("VER DESCRIPTION\n"));
(void) printf("--- -----------------------------------------"
"---------------\n");
(void) printf(gettext(" 1 Initial ZFS version\n"));
(void) printf(gettext(" 2 Ditto blocks "
"(replicated metadata)\n"));
(void) printf(gettext(" 3 Hot spares and double parity "
"RAID-Z\n"));
(void) printf(gettext(" 4 zpool history\n"));
(void) printf(gettext(" 5 Compression using the gzip "
"algorithm\n"));
(void) printf(gettext(" 6 bootfs pool property\n"));
(void) printf(gettext(" 7 Separate intent log devices\n"));
(void) printf(gettext(" 8 Delegated administration\n"));
(void) printf(gettext(" 9 refquota and refreservation "
"properties\n"));
(void) printf(gettext(" 10 Cache devices\n"));
(void) printf(gettext(" 11 Improved scrub performance\n"));
(void) printf(gettext(" 12 Snapshot properties\n"));
(void) printf(gettext(" 13 snapused property\n"));
(void) printf(gettext(" 14 passthrough-x aclinherit\n"));
(void) printf(gettext(" 15 user/group space accounting\n"));
(void) printf(gettext(" 16 stmf property support\n"));
(void) printf(gettext(" 17 Triple-parity RAID-Z\n"));
(void) printf(gettext(" 18 Snapshot user holds\n"));
(void) printf(gettext(" 19 Log device removal\n"));
(void) printf(gettext(" 20 Compression using zle "
"(zero-length encoding)\n"));
(void) printf(gettext(" 21 Deduplication\n"));
(void) printf(gettext(" 22 Received properties\n"));
(void) printf(gettext(" 23 Slim ZIL\n"));
(void) printf(gettext(" 24 System attributes\n"));
(void) printf(gettext(" 25 Improved scrub stats\n"));
(void) printf(gettext(" 26 Improved snapshot deletion "
"performance\n"));
(void) printf(gettext(" 27 Improved snapshot creation "
"performance\n"));
(void) printf(gettext(" 28 Multiple vdev replacements\n"));
(void) printf(gettext("\nFor more information on a particular "
"version, including supported releases,\n"));
(void) printf(gettext("see the ZFS Administration Guide.\n\n"));
} else if (argc == 0 && upgradeall) {
cb.cb_first = B_TRUE;
ret = zpool_iter(g_zfs, upgrade_cb, &cb);
if (ret == 0 && cb.cb_first) {
if (cb.cb_version == SPA_VERSION) {
(void) printf(gettext("All pools are already "
"formatted using feature flags.\n\n"));
(void) printf(gettext("Every feature flags "
"pool already has all supported and "
"requested features enabled.\n"));
} else {
(void) printf(gettext("All pools are already "
"formatted with version %llu or higher.\n"),
(u_longlong_t)cb.cb_version);
}
}
} else if (argc == 0) {
cb.cb_first = B_TRUE;
ret = zpool_iter(g_zfs, upgrade_list_older_cb, &cb);
assert(ret == 0);
if (cb.cb_first) {
(void) printf(gettext("All pools are formatted "
"using feature flags.\n\n"));
} else {
(void) printf(gettext("\nUse 'zpool upgrade -v' "
"for a list of available legacy versions.\n"));
}
cb.cb_first = B_TRUE;
ret = zpool_iter(g_zfs, upgrade_list_disabled_cb, &cb);
assert(ret == 0);
if (cb.cb_first) {
(void) printf(gettext("Every feature flags pool has "
"all supported and requested features enabled.\n"));
} else {
(void) printf(gettext("\n"));
}
} else {
ret = for_each_pool(argc, argv, B_FALSE, NULL, B_FALSE,
upgrade_one, &cb);
}
return (ret);
}
typedef struct hist_cbdata {
boolean_t first;
boolean_t longfmt;
boolean_t internal;
} hist_cbdata_t;
static void
print_history_records(nvlist_t *nvhis, hist_cbdata_t *cb)
{
nvlist_t **records;
uint_t numrecords;
int i;
verify(nvlist_lookup_nvlist_array(nvhis, ZPOOL_HIST_RECORD,
&records, &numrecords) == 0);
for (i = 0; i < numrecords; i++) {
nvlist_t *rec = records[i];
char tbuf[64] = "";
if (nvlist_exists(rec, ZPOOL_HIST_TIME)) {
time_t tsec;
struct tm t;
tsec = fnvlist_lookup_uint64(records[i],
ZPOOL_HIST_TIME);
(void) localtime_r(&tsec, &t);
(void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t);
}
if (nvlist_exists(rec, ZPOOL_HIST_ELAPSED_NS)) {
uint64_t elapsed_ns = fnvlist_lookup_int64(records[i],
ZPOOL_HIST_ELAPSED_NS);
(void) snprintf(tbuf + strlen(tbuf),
sizeof (tbuf) - strlen(tbuf),
" (%lldms)", (long long)elapsed_ns / 1000 / 1000);
}
if (nvlist_exists(rec, ZPOOL_HIST_CMD)) {
(void) printf("%s %s", tbuf,
fnvlist_lookup_string(rec, ZPOOL_HIST_CMD));
} else if (nvlist_exists(rec, ZPOOL_HIST_INT_EVENT)) {
int ievent =
fnvlist_lookup_uint64(rec, ZPOOL_HIST_INT_EVENT);
if (!cb->internal)
continue;
if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS) {
(void) printf("%s unrecognized record:\n",
tbuf);
dump_nvlist(rec, 4);
continue;
}
(void) printf("%s [internal %s txg:%lld] %s", tbuf,
zfs_history_event_names[ievent],
(longlong_t)fnvlist_lookup_uint64(
rec, ZPOOL_HIST_TXG),
fnvlist_lookup_string(rec, ZPOOL_HIST_INT_STR));
} else if (nvlist_exists(rec, ZPOOL_HIST_INT_NAME)) {
if (!cb->internal)
continue;
(void) printf("%s [txg:%lld] %s", tbuf,
(longlong_t)fnvlist_lookup_uint64(
rec, ZPOOL_HIST_TXG),
fnvlist_lookup_string(rec, ZPOOL_HIST_INT_NAME));
if (nvlist_exists(rec, ZPOOL_HIST_DSNAME)) {
(void) printf(" %s (%llu)",
fnvlist_lookup_string(rec,
ZPOOL_HIST_DSNAME),
(u_longlong_t)fnvlist_lookup_uint64(rec,
ZPOOL_HIST_DSID));
}
(void) printf(" %s", fnvlist_lookup_string(rec,
ZPOOL_HIST_INT_STR));
} else if (nvlist_exists(rec, ZPOOL_HIST_IOCTL)) {
if (!cb->internal)
continue;
(void) printf("%s ioctl %s\n", tbuf,
fnvlist_lookup_string(rec, ZPOOL_HIST_IOCTL));
if (nvlist_exists(rec, ZPOOL_HIST_INPUT_NVL)) {
(void) printf(" input:\n");
dump_nvlist(fnvlist_lookup_nvlist(rec,
ZPOOL_HIST_INPUT_NVL), 8);
}
if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_NVL)) {
(void) printf(" output:\n");
dump_nvlist(fnvlist_lookup_nvlist(rec,
ZPOOL_HIST_OUTPUT_NVL), 8);
}
if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_SIZE)) {
(void) printf(" output nvlist omitted; "
"original size: %lldKB\n",
(longlong_t)fnvlist_lookup_int64(rec,
ZPOOL_HIST_OUTPUT_SIZE) / 1024);
}
if (nvlist_exists(rec, ZPOOL_HIST_ERRNO)) {
(void) printf(" errno: %lld\n",
(longlong_t)fnvlist_lookup_int64(rec,
ZPOOL_HIST_ERRNO));
}
} else {
if (!cb->internal)
continue;
(void) printf("%s unrecognized record:\n", tbuf);
dump_nvlist(rec, 4);
}
if (!cb->longfmt) {
(void) printf("\n");
continue;
}
(void) printf(" [");
if (nvlist_exists(rec, ZPOOL_HIST_WHO)) {
uid_t who = fnvlist_lookup_uint64(rec, ZPOOL_HIST_WHO);
struct passwd *pwd = getpwuid(who);
(void) printf("user %d ", (int)who);
if (pwd != NULL)
(void) printf("(%s) ", pwd->pw_name);
}
if (nvlist_exists(rec, ZPOOL_HIST_HOST)) {
(void) printf("on %s",
fnvlist_lookup_string(rec, ZPOOL_HIST_HOST));
}
if (nvlist_exists(rec, ZPOOL_HIST_ZONE)) {
(void) printf(":%s",
fnvlist_lookup_string(rec, ZPOOL_HIST_ZONE));
}
(void) printf("]");
(void) printf("\n");
}
}
/*
* Print out the command history for a specific pool.
*/
static int
get_history_one(zpool_handle_t *zhp, void *data)
{
nvlist_t *nvhis;
int ret;
hist_cbdata_t *cb = (hist_cbdata_t *)data;
uint64_t off = 0;
boolean_t eof = B_FALSE;
cb->first = B_FALSE;
(void) printf(gettext("History for '%s':\n"), zpool_get_name(zhp));
while (!eof) {
if ((ret = zpool_get_history(zhp, &nvhis, &off, &eof)) != 0)
return (ret);
print_history_records(nvhis, cb);
nvlist_free(nvhis);
}
(void) printf("\n");
return (ret);
}
/*
* zpool history <pool>
*
* Displays the history of commands that modified pools.
*/
int
zpool_do_history(int argc, char **argv)
{
hist_cbdata_t cbdata = { 0 };
int ret;
int c;
cbdata.first = B_TRUE;
/* check options */
while ((c = getopt(argc, argv, "li")) != -1) {
switch (c) {
case 'l':
cbdata.longfmt = B_TRUE;
break;
case 'i':
cbdata.internal = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
ret = for_each_pool(argc, argv, B_FALSE, NULL, B_FALSE, get_history_one,
&cbdata);
if (argc == 0 && cbdata.first == B_TRUE) {
(void) fprintf(stderr, gettext("no pools available\n"));
return (0);
}
return (ret);
}
typedef struct ev_opts {
int verbose;
int scripted;
int follow;
int clear;
char poolname[ZFS_MAX_DATASET_NAME_LEN];
} ev_opts_t;
static void
zpool_do_events_short(nvlist_t *nvl, ev_opts_t *opts)
{
char ctime_str[26], str[32], *ptr;
int64_t *tv;
uint_t n;
verify(nvlist_lookup_int64_array(nvl, FM_EREPORT_TIME, &tv, &n) == 0);
memset(str, ' ', 32);
(void) ctime_r((const time_t *)&tv[0], ctime_str);
(void) memcpy(str, ctime_str+4, 6); /* 'Jun 30' */
(void) memcpy(str+7, ctime_str+20, 4); /* '1993' */
(void) memcpy(str+12, ctime_str+11, 8); /* '21:49:08' */
(void) sprintf(str+20, ".%09lld", (longlong_t)tv[1]); /* '.123456789' */
if (opts->scripted)
(void) printf(gettext("%s\t"), str);
else
(void) printf(gettext("%s "), str);
verify(nvlist_lookup_string(nvl, FM_CLASS, &ptr) == 0);
(void) printf(gettext("%s\n"), ptr);
}
static void
zpool_do_events_nvprint(nvlist_t *nvl, int depth)
{
nvpair_t *nvp;
for (nvp = nvlist_next_nvpair(nvl, NULL);
nvp != NULL; nvp = nvlist_next_nvpair(nvl, nvp)) {
data_type_t type = nvpair_type(nvp);
const char *name = nvpair_name(nvp);
boolean_t b;
uint8_t i8;
uint16_t i16;
uint32_t i32;
uint64_t i64;
char *str;
nvlist_t *cnv;
printf(gettext("%*s%s = "), depth, "", name);
switch (type) {
case DATA_TYPE_BOOLEAN:
printf(gettext("%s"), "1");
break;
case DATA_TYPE_BOOLEAN_VALUE:
(void) nvpair_value_boolean_value(nvp, &b);
printf(gettext("%s"), b ? "1" : "0");
break;
case DATA_TYPE_BYTE:
(void) nvpair_value_byte(nvp, &i8);
printf(gettext("0x%x"), i8);
break;
case DATA_TYPE_INT8:
(void) nvpair_value_int8(nvp, (void *)&i8);
printf(gettext("0x%x"), i8);
break;
case DATA_TYPE_UINT8:
(void) nvpair_value_uint8(nvp, &i8);
printf(gettext("0x%x"), i8);
break;
case DATA_TYPE_INT16:
(void) nvpair_value_int16(nvp, (void *)&i16);
printf(gettext("0x%x"), i16);
break;
case DATA_TYPE_UINT16:
(void) nvpair_value_uint16(nvp, &i16);
printf(gettext("0x%x"), i16);
break;
case DATA_TYPE_INT32:
(void) nvpair_value_int32(nvp, (void *)&i32);
printf(gettext("0x%x"), i32);
break;
case DATA_TYPE_UINT32:
(void) nvpair_value_uint32(nvp, &i32);
printf(gettext("0x%x"), i32);
break;
case DATA_TYPE_INT64:
(void) nvpair_value_int64(nvp, (void *)&i64);
printf(gettext("0x%llx"), (u_longlong_t)i64);
break;
case DATA_TYPE_UINT64:
(void) nvpair_value_uint64(nvp, &i64);
/*
* translate vdev state values to readable
* strings to aide zpool events consumers
*/
if (strcmp(name,
FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE) == 0 ||
strcmp(name,
FM_EREPORT_PAYLOAD_ZFS_VDEV_LASTSTATE) == 0) {
printf(gettext("\"%s\" (0x%llx)"),
zpool_state_to_name(i64, VDEV_AUX_NONE),
(u_longlong_t)i64);
} else {
printf(gettext("0x%llx"), (u_longlong_t)i64);
}
break;
case DATA_TYPE_HRTIME:
(void) nvpair_value_hrtime(nvp, (void *)&i64);
printf(gettext("0x%llx"), (u_longlong_t)i64);
break;
case DATA_TYPE_STRING:
(void) nvpair_value_string(nvp, &str);
printf(gettext("\"%s\""), str ? str : "<NULL>");
break;
case DATA_TYPE_NVLIST:
printf(gettext("(embedded nvlist)\n"));
(void) nvpair_value_nvlist(nvp, &cnv);
zpool_do_events_nvprint(cnv, depth + 8);
printf(gettext("%*s(end %s)"), depth, "", name);
break;
case DATA_TYPE_NVLIST_ARRAY: {
nvlist_t **val;
uint_t i, nelem;
(void) nvpair_value_nvlist_array(nvp, &val, &nelem);
printf(gettext("(%d embedded nvlists)\n"), nelem);
for (i = 0; i < nelem; i++) {
printf(gettext("%*s%s[%d] = %s\n"),
depth, "", name, i, "(embedded nvlist)");
zpool_do_events_nvprint(val[i], depth + 8);
printf(gettext("%*s(end %s[%i])\n"),
depth, "", name, i);
}
printf(gettext("%*s(end %s)\n"), depth, "", name);
}
break;
case DATA_TYPE_INT8_ARRAY: {
int8_t *val;
uint_t i, nelem;
(void) nvpair_value_int8_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%x "), val[i]);
break;
}
case DATA_TYPE_UINT8_ARRAY: {
uint8_t *val;
uint_t i, nelem;
(void) nvpair_value_uint8_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%x "), val[i]);
break;
}
case DATA_TYPE_INT16_ARRAY: {
int16_t *val;
uint_t i, nelem;
(void) nvpair_value_int16_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%x "), val[i]);
break;
}
case DATA_TYPE_UINT16_ARRAY: {
uint16_t *val;
uint_t i, nelem;
(void) nvpair_value_uint16_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%x "), val[i]);
break;
}
case DATA_TYPE_INT32_ARRAY: {
int32_t *val;
uint_t i, nelem;
(void) nvpair_value_int32_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%x "), val[i]);
break;
}
case DATA_TYPE_UINT32_ARRAY: {
uint32_t *val;
uint_t i, nelem;
(void) nvpair_value_uint32_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%x "), val[i]);
break;
}
case DATA_TYPE_INT64_ARRAY: {
int64_t *val;
uint_t i, nelem;
(void) nvpair_value_int64_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%llx "),
(u_longlong_t)val[i]);
break;
}
case DATA_TYPE_UINT64_ARRAY: {
uint64_t *val;
uint_t i, nelem;
(void) nvpair_value_uint64_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%llx "),
(u_longlong_t)val[i]);
break;
}
case DATA_TYPE_STRING_ARRAY: {
char **str;
uint_t i, nelem;
(void) nvpair_value_string_array(nvp, &str, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("\"%s\" "),
str[i] ? str[i] : "<NULL>");
break;
}
case DATA_TYPE_BOOLEAN_ARRAY:
case DATA_TYPE_BYTE_ARRAY:
case DATA_TYPE_DOUBLE:
case DATA_TYPE_DONTCARE:
case DATA_TYPE_UNKNOWN:
printf(gettext("<unknown>"));
break;
}
printf(gettext("\n"));
}
}
static int
zpool_do_events_next(ev_opts_t *opts)
{
nvlist_t *nvl;
int zevent_fd, ret, dropped;
char *pool;
zevent_fd = open(ZFS_DEV, O_RDWR);
VERIFY(zevent_fd >= 0);
if (!opts->scripted)
(void) printf(gettext("%-30s %s\n"), "TIME", "CLASS");
while (1) {
ret = zpool_events_next(g_zfs, &nvl, &dropped,
(opts->follow ? ZEVENT_NONE : ZEVENT_NONBLOCK), zevent_fd);
if (ret || nvl == NULL)
break;
if (dropped > 0)
(void) printf(gettext("dropped %d events\n"), dropped);
if (strlen(opts->poolname) > 0 &&
nvlist_lookup_string(nvl, FM_FMRI_ZFS_POOL, &pool) == 0 &&
strcmp(opts->poolname, pool) != 0)
continue;
zpool_do_events_short(nvl, opts);
if (opts->verbose) {
zpool_do_events_nvprint(nvl, 8);
printf(gettext("\n"));
}
(void) fflush(stdout);
nvlist_free(nvl);
}
VERIFY(0 == close(zevent_fd));
return (ret);
}
static int
zpool_do_events_clear(ev_opts_t *opts)
{
int count, ret;
ret = zpool_events_clear(g_zfs, &count);
if (!ret)
(void) printf(gettext("cleared %d events\n"), count);
return (ret);
}
/*
* zpool events [-vHf [pool] | -c]
*
* Displays events logs by ZFS.
*/
int
zpool_do_events(int argc, char **argv)
{
ev_opts_t opts = { 0 };
int ret;
int c;
/* check options */
while ((c = getopt(argc, argv, "vHfc")) != -1) {
switch (c) {
case 'v':
opts.verbose = 1;
break;
case 'H':
opts.scripted = 1;
break;
case 'f':
opts.follow = 1;
break;
case 'c':
opts.clear = 1;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
} else if (argc == 1) {
(void) strlcpy(opts.poolname, argv[0], sizeof (opts.poolname));
if (!zfs_name_valid(opts.poolname, ZFS_TYPE_POOL)) {
(void) fprintf(stderr,
gettext("invalid pool name '%s'\n"), opts.poolname);
usage(B_FALSE);
}
}
if ((argc == 1 || opts.verbose || opts.scripted || opts.follow) &&
opts.clear) {
(void) fprintf(stderr,
gettext("invalid options combined with -c\n"));
usage(B_FALSE);
}
if (opts.clear)
ret = zpool_do_events_clear(&opts);
else
ret = zpool_do_events_next(&opts);
return (ret);
}
static int
get_callback(zpool_handle_t *zhp, void *data)
{
zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
char value[MAXNAMELEN];
zprop_source_t srctype;
zprop_list_t *pl;
for (pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) {
/*
* Skip the special fake placeholder. This will also skip
* over the name property when 'all' is specified.
*/
if (pl->pl_prop == ZPOOL_PROP_NAME &&
pl == cbp->cb_proplist)
continue;
if (pl->pl_prop == ZPROP_INVAL &&
(zpool_prop_feature(pl->pl_user_prop) ||
zpool_prop_unsupported(pl->pl_user_prop))) {
srctype = ZPROP_SRC_LOCAL;
if (zpool_prop_get_feature(zhp, pl->pl_user_prop,
value, sizeof (value)) == 0) {
zprop_print_one_property(zpool_get_name(zhp),
cbp, pl->pl_user_prop, value, srctype,
NULL, NULL);
}
} else {
if (zpool_get_prop(zhp, pl->pl_prop, value,
sizeof (value), &srctype, cbp->cb_literal) != 0)
continue;
zprop_print_one_property(zpool_get_name(zhp), cbp,
zpool_prop_to_name(pl->pl_prop), value, srctype,
NULL, NULL);
}
}
return (0);
}
/*
* zpool get [-Hp] [-o "all" | field[,...]] <"all" | property[,...]> <pool> ...
*
* -H Scripted mode. Don't display headers, and separate properties
* by a single tab.
* -o List of columns to display. Defaults to
* "name,property,value,source".
* -p Display values in parsable (exact) format.
*
* Get properties of pools in the system. Output space statistics
* for each one as well as other attributes.
*/
int
zpool_do_get(int argc, char **argv)
{
zprop_get_cbdata_t cb = { 0 };
zprop_list_t fake_name = { 0 };
int ret;
int c, i;
char *value;
cb.cb_first = B_TRUE;
/*
* Set up default columns and sources.
*/
cb.cb_sources = ZPROP_SRC_ALL;
cb.cb_columns[0] = GET_COL_NAME;
cb.cb_columns[1] = GET_COL_PROPERTY;
cb.cb_columns[2] = GET_COL_VALUE;
cb.cb_columns[3] = GET_COL_SOURCE;
cb.cb_type = ZFS_TYPE_POOL;
/* check options */
while ((c = getopt(argc, argv, ":Hpo:")) != -1) {
switch (c) {
case 'p':
cb.cb_literal = B_TRUE;
break;
case 'H':
cb.cb_scripted = B_TRUE;
break;
case 'o':
bzero(&cb.cb_columns, sizeof (cb.cb_columns));
i = 0;
while (*optarg != '\0') {
static char *col_subopts[] =
{ "name", "property", "value", "source",
"all", NULL };
if (i == ZFS_GET_NCOLS) {
(void) fprintf(stderr, gettext("too "
"many fields given to -o "
"option\n"));
usage(B_FALSE);
}
switch (getsubopt(&optarg, col_subopts,
&value)) {
case 0:
cb.cb_columns[i++] = GET_COL_NAME;
break;
case 1:
cb.cb_columns[i++] = GET_COL_PROPERTY;
break;
case 2:
cb.cb_columns[i++] = GET_COL_VALUE;
break;
case 3:
cb.cb_columns[i++] = GET_COL_SOURCE;
break;
case 4:
if (i > 0) {
(void) fprintf(stderr,
gettext("\"all\" conflicts "
"with specific fields "
"given to -o option\n"));
usage(B_FALSE);
}
cb.cb_columns[0] = GET_COL_NAME;
cb.cb_columns[1] = GET_COL_PROPERTY;
cb.cb_columns[2] = GET_COL_VALUE;
cb.cb_columns[3] = GET_COL_SOURCE;
i = ZFS_GET_NCOLS;
break;
default:
(void) fprintf(stderr,
gettext("invalid column name "
"'%s'\n"), value);
usage(B_FALSE);
}
}
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing property "
"argument\n"));
usage(B_FALSE);
}
if (zprop_get_list(g_zfs, argv[0], &cb.cb_proplist,
ZFS_TYPE_POOL) != 0)
usage(B_FALSE);
argc--;
argv++;
if (cb.cb_proplist != NULL) {
fake_name.pl_prop = ZPOOL_PROP_NAME;
fake_name.pl_width = strlen(gettext("NAME"));
fake_name.pl_next = cb.cb_proplist;
cb.cb_proplist = &fake_name;
}
ret = for_each_pool(argc, argv, B_TRUE, &cb.cb_proplist, cb.cb_literal,
get_callback, &cb);
if (cb.cb_proplist == &fake_name)
zprop_free_list(fake_name.pl_next);
else
zprop_free_list(cb.cb_proplist);
return (ret);
}
typedef struct set_cbdata {
char *cb_propname;
char *cb_value;
boolean_t cb_any_successful;
} set_cbdata_t;
static int
set_callback(zpool_handle_t *zhp, void *data)
{
int error;
set_cbdata_t *cb = (set_cbdata_t *)data;
/* Check if we have out-of-bounds features */
if (strcmp(cb->cb_propname, ZPOOL_CONFIG_COMPATIBILITY) == 0) {
boolean_t features[SPA_FEATURES];
if (zpool_do_load_compat(cb->cb_value, features) !=
ZPOOL_COMPATIBILITY_OK)
return (-1);
nvlist_t *enabled = zpool_get_features(zhp);
spa_feature_t i;
for (i = 0; i < SPA_FEATURES; i++) {
const char *fguid = spa_feature_table[i].fi_guid;
if (nvlist_exists(enabled, fguid) && !features[i])
break;
}
if (i < SPA_FEATURES)
(void) fprintf(stderr, gettext("Warning: one or "
"more features already enabled on pool '%s'\n"
"are not present in this compatibility set.\n"),
zpool_get_name(zhp));
}
/* if we're setting a feature, check it's in compatibility set */
if (zpool_prop_feature(cb->cb_propname) &&
strcmp(cb->cb_value, ZFS_FEATURE_ENABLED) == 0) {
char *fname = strchr(cb->cb_propname, '@') + 1;
spa_feature_t f;
if (zfeature_lookup_name(fname, &f) == 0) {
char compat[ZFS_MAXPROPLEN];
if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY,
compat, ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
compat[0] = '\0';
boolean_t features[SPA_FEATURES];
if (zpool_do_load_compat(compat, features) !=
ZPOOL_COMPATIBILITY_OK) {
(void) fprintf(stderr, gettext("Error: "
"cannot enable feature '%s' on pool '%s'\n"
"because the pool's 'compatibility' "
"property cannot be parsed.\n"),
fname, zpool_get_name(zhp));
return (-1);
}
if (!features[f]) {
(void) fprintf(stderr, gettext("Error: "
"cannot enable feature '%s' on pool '%s'\n"
"as it is not specified in this pool's "
"current compatibility set.\n"
"Consider setting 'compatibility' to a "
"less restrictive set, or to 'off'.\n"),
fname, zpool_get_name(zhp));
return (-1);
}
}
}
error = zpool_set_prop(zhp, cb->cb_propname, cb->cb_value);
if (!error)
cb->cb_any_successful = B_TRUE;
return (error);
}
int
zpool_do_set(int argc, char **argv)
{
set_cbdata_t cb = { 0 };
int error;
if (argc > 1 && argv[1][0] == '-') {
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
argv[1][1]);
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing property=value "
"argument\n"));
usage(B_FALSE);
}
if (argc < 3) {
(void) fprintf(stderr, gettext("missing pool name\n"));
usage(B_FALSE);
}
if (argc > 3) {
(void) fprintf(stderr, gettext("too many pool names\n"));
usage(B_FALSE);
}
cb.cb_propname = argv[1];
cb.cb_value = strchr(cb.cb_propname, '=');
if (cb.cb_value == NULL) {
(void) fprintf(stderr, gettext("missing value in "
"property=value argument\n"));
usage(B_FALSE);
}
*(cb.cb_value) = '\0';
cb.cb_value++;
error = for_each_pool(argc - 2, argv + 2, B_TRUE, NULL, B_FALSE,
set_callback, &cb);
return (error);
}
/* Add up the total number of bytes left to initialize/trim across all vdevs */
static uint64_t
vdev_activity_remaining(nvlist_t *nv, zpool_wait_activity_t activity)
{
uint64_t bytes_remaining;
nvlist_t **child;
uint_t c, children;
vdev_stat_t *vs;
assert(activity == ZPOOL_WAIT_INITIALIZE ||
activity == ZPOOL_WAIT_TRIM);
verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) == 0);
if (activity == ZPOOL_WAIT_INITIALIZE &&
vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE)
bytes_remaining = vs->vs_initialize_bytes_est -
vs->vs_initialize_bytes_done;
else if (activity == ZPOOL_WAIT_TRIM &&
vs->vs_trim_state == VDEV_TRIM_ACTIVE)
bytes_remaining = vs->vs_trim_bytes_est -
vs->vs_trim_bytes_done;
else
bytes_remaining = 0;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
children = 0;
for (c = 0; c < children; c++)
bytes_remaining += vdev_activity_remaining(child[c], activity);
return (bytes_remaining);
}
/* Add up the total number of bytes left to rebuild across top-level vdevs */
static uint64_t
vdev_activity_top_remaining(nvlist_t *nv)
{
uint64_t bytes_remaining = 0;
nvlist_t **child;
uint_t children;
int error;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
children = 0;
for (uint_t c = 0; c < children; c++) {
vdev_rebuild_stat_t *vrs;
uint_t i;
error = nvlist_lookup_uint64_array(child[c],
ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i);
if (error == 0) {
if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
bytes_remaining += (vrs->vrs_bytes_est -
vrs->vrs_bytes_rebuilt);
}
}
}
return (bytes_remaining);
}
/* Whether any vdevs are 'spare' or 'replacing' vdevs */
static boolean_t
vdev_any_spare_replacing(nvlist_t *nv)
{
nvlist_t **child;
uint_t c, children;
char *vdev_type;
(void) nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &vdev_type);
if (strcmp(vdev_type, VDEV_TYPE_REPLACING) == 0 ||
strcmp(vdev_type, VDEV_TYPE_SPARE) == 0 ||
strcmp(vdev_type, VDEV_TYPE_DRAID_SPARE) == 0) {
return (B_TRUE);
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
children = 0;
for (c = 0; c < children; c++) {
if (vdev_any_spare_replacing(child[c]))
return (B_TRUE);
}
return (B_FALSE);
}
typedef struct wait_data {
char *wd_poolname;
boolean_t wd_scripted;
boolean_t wd_exact;
boolean_t wd_headers_once;
boolean_t wd_should_exit;
/* Which activities to wait for */
boolean_t wd_enabled[ZPOOL_WAIT_NUM_ACTIVITIES];
float wd_interval;
pthread_cond_t wd_cv;
pthread_mutex_t wd_mutex;
} wait_data_t;
/*
* Print to stdout a single line, containing one column for each activity that
* we are waiting for specifying how many bytes of work are left for that
* activity.
*/
static void
print_wait_status_row(wait_data_t *wd, zpool_handle_t *zhp, int row)
{
nvlist_t *config, *nvroot;
uint_t c;
int i;
pool_checkpoint_stat_t *pcs = NULL;
pool_scan_stat_t *pss = NULL;
pool_removal_stat_t *prs = NULL;
char *headers[] = {"DISCARD", "FREE", "INITIALIZE", "REPLACE",
"REMOVE", "RESILVER", "SCRUB", "TRIM"};
int col_widths[ZPOOL_WAIT_NUM_ACTIVITIES];
/* Calculate the width of each column */
for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
/*
* Make sure we have enough space in the col for pretty-printed
* numbers and for the column header, and then leave a couple
* spaces between cols for readability.
*/
col_widths[i] = MAX(strlen(headers[i]), 6) + 2;
}
/* Print header if appropriate */
int term_height = terminal_height();
boolean_t reprint_header = (!wd->wd_headers_once && term_height > 0 &&
row % (term_height-1) == 0);
if (!wd->wd_scripted && (row == 0 || reprint_header)) {
for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
if (wd->wd_enabled[i])
(void) printf("%*s", col_widths[i], headers[i]);
}
(void) printf("\n");
}
/* Bytes of work remaining in each activity */
int64_t bytes_rem[ZPOOL_WAIT_NUM_ACTIVITIES] = {0};
bytes_rem[ZPOOL_WAIT_FREE] =
zpool_get_prop_int(zhp, ZPOOL_PROP_FREEING, NULL);
config = zpool_get_config(zhp, NULL);
nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
if (pcs != NULL && pcs->pcs_state == CS_CHECKPOINT_DISCARDING)
bytes_rem[ZPOOL_WAIT_CKPT_DISCARD] = pcs->pcs_space;
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);
if (prs != NULL && prs->prs_state == DSS_SCANNING)
bytes_rem[ZPOOL_WAIT_REMOVE] = prs->prs_to_copy -
prs->prs_copied;
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&pss, &c);
if (pss != NULL && pss->pss_state == DSS_SCANNING &&
pss->pss_pass_scrub_pause == 0) {
int64_t rem = pss->pss_to_examine - pss->pss_issued;
if (pss->pss_func == POOL_SCAN_SCRUB)
bytes_rem[ZPOOL_WAIT_SCRUB] = rem;
else
bytes_rem[ZPOOL_WAIT_RESILVER] = rem;
} else if (check_rebuilding(nvroot, NULL)) {
bytes_rem[ZPOOL_WAIT_RESILVER] =
vdev_activity_top_remaining(nvroot);
}
bytes_rem[ZPOOL_WAIT_INITIALIZE] =
vdev_activity_remaining(nvroot, ZPOOL_WAIT_INITIALIZE);
bytes_rem[ZPOOL_WAIT_TRIM] =
vdev_activity_remaining(nvroot, ZPOOL_WAIT_TRIM);
/*
* A replace finishes after resilvering finishes, so the amount of work
* left for a replace is the same as for resilvering.
*
* It isn't quite correct to say that if we have any 'spare' or
* 'replacing' vdevs and a resilver is happening, then a replace is in
* progress, like we do here. When a hot spare is used, the faulted vdev
* is not removed after the hot spare is resilvered, so parent 'spare'
* vdev is not removed either. So we could have a 'spare' vdev, but be
* resilvering for a different reason. However, we use it as a heuristic
* because we don't have access to the DTLs, which could tell us whether
* or not we have really finished resilvering a hot spare.
*/
if (vdev_any_spare_replacing(nvroot))
bytes_rem[ZPOOL_WAIT_REPLACE] = bytes_rem[ZPOOL_WAIT_RESILVER];
if (timestamp_fmt != NODATE)
print_timestamp(timestamp_fmt);
for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
char buf[64];
if (!wd->wd_enabled[i])
continue;
if (wd->wd_exact)
(void) snprintf(buf, sizeof (buf), "%" PRIi64,
bytes_rem[i]);
else
zfs_nicenum(bytes_rem[i], buf, sizeof (buf));
if (wd->wd_scripted)
(void) printf(i == 0 ? "%s" : "\t%s", buf);
else
(void) printf(" %*s", col_widths[i] - 1, buf);
}
(void) printf("\n");
(void) fflush(stdout);
}
static void *
wait_status_thread(void *arg)
{
wait_data_t *wd = (wait_data_t *)arg;
zpool_handle_t *zhp;
if ((zhp = zpool_open(g_zfs, wd->wd_poolname)) == NULL)
return (void *)(1);
for (int row = 0; ; row++) {
boolean_t missing;
struct timespec timeout;
int ret = 0;
(void) clock_gettime(CLOCK_REALTIME, &timeout);
if (zpool_refresh_stats(zhp, &missing) != 0 || missing ||
zpool_props_refresh(zhp) != 0) {
zpool_close(zhp);
return (void *)(uintptr_t)(missing ? 0 : 1);
}
print_wait_status_row(wd, zhp, row);
timeout.tv_sec += floor(wd->wd_interval);
long nanos = timeout.tv_nsec +
(wd->wd_interval - floor(wd->wd_interval)) * NANOSEC;
if (nanos >= NANOSEC) {
timeout.tv_sec++;
timeout.tv_nsec = nanos - NANOSEC;
} else {
timeout.tv_nsec = nanos;
}
pthread_mutex_lock(&wd->wd_mutex);
if (!wd->wd_should_exit)
ret = pthread_cond_timedwait(&wd->wd_cv, &wd->wd_mutex,
&timeout);
pthread_mutex_unlock(&wd->wd_mutex);
if (ret == 0) {
break; /* signaled by main thread */
} else if (ret != ETIMEDOUT) {
(void) fprintf(stderr, gettext("pthread_cond_timedwait "
"failed: %s\n"), strerror(ret));
zpool_close(zhp);
return (void *)(uintptr_t)(1);
}
}
zpool_close(zhp);
return (void *)(0);
}
int
zpool_do_wait(int argc, char **argv)
{
boolean_t verbose = B_FALSE;
int c;
char *value;
int i;
unsigned long count;
pthread_t status_thr;
int error = 0;
zpool_handle_t *zhp;
wait_data_t wd;
wd.wd_scripted = B_FALSE;
wd.wd_exact = B_FALSE;
wd.wd_headers_once = B_FALSE;
wd.wd_should_exit = B_FALSE;
pthread_mutex_init(&wd.wd_mutex, NULL);
pthread_cond_init(&wd.wd_cv, NULL);
/* By default, wait for all types of activity. */
for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++)
wd.wd_enabled[i] = B_TRUE;
while ((c = getopt(argc, argv, "HpT:t:")) != -1) {
switch (c) {
case 'H':
wd.wd_scripted = B_TRUE;
break;
case 'n':
wd.wd_headers_once = B_TRUE;
break;
case 'p':
wd.wd_exact = B_TRUE;
break;
case 'T':
get_timestamp_arg(*optarg);
break;
case 't':
{
static char *col_subopts[] = { "discard", "free",
"initialize", "replace", "remove", "resilver",
"scrub", "trim", NULL };
/* Reset activities array */
bzero(&wd.wd_enabled, sizeof (wd.wd_enabled));
while (*optarg != '\0') {
int activity = getsubopt(&optarg, col_subopts,
&value);
if (activity < 0) {
(void) fprintf(stderr,
gettext("invalid activity '%s'\n"),
value);
usage(B_FALSE);
}
wd.wd_enabled[activity] = B_TRUE;
}
break;
}
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
get_interval_count(&argc, argv, &wd.wd_interval, &count);
if (count != 0) {
/* This subcmd only accepts an interval, not a count */
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if (wd.wd_interval != 0)
verbose = B_TRUE;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing 'pool' argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
wd.wd_poolname = argv[0];
if ((zhp = zpool_open(g_zfs, wd.wd_poolname)) == NULL)
return (1);
if (verbose) {
/*
* We use a separate thread for printing status updates because
* the main thread will call lzc_wait(), which blocks as long
* as an activity is in progress, which can be a long time.
*/
if (pthread_create(&status_thr, NULL, wait_status_thread, &wd)
!= 0) {
(void) fprintf(stderr, gettext("failed to create status"
"thread: %s\n"), strerror(errno));
zpool_close(zhp);
return (1);
}
}
/*
* Loop over all activities that we are supposed to wait for until none
* of them are in progress. Note that this means we can end up waiting
* for more activities to complete than just those that were in progress
* when we began waiting; if an activity we are interested in begins
* while we are waiting for another activity, we will wait for both to
* complete before exiting.
*/
for (;;) {
boolean_t missing = B_FALSE;
boolean_t any_waited = B_FALSE;
for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
boolean_t waited;
if (!wd.wd_enabled[i])
continue;
error = zpool_wait_status(zhp, i, &missing, &waited);
if (error != 0 || missing)
break;
any_waited = (any_waited || waited);
}
if (error != 0 || missing || !any_waited)
break;
}
zpool_close(zhp);
if (verbose) {
uintptr_t status;
pthread_mutex_lock(&wd.wd_mutex);
wd.wd_should_exit = B_TRUE;
pthread_cond_signal(&wd.wd_cv);
pthread_mutex_unlock(&wd.wd_mutex);
(void) pthread_join(status_thr, (void *)&status);
if (status != 0)
error = status;
}
pthread_mutex_destroy(&wd.wd_mutex);
pthread_cond_destroy(&wd.wd_cv);
return (error);
}
static int
find_command_idx(char *command, int *idx)
{
int i;
for (i = 0; i < NCOMMAND; i++) {
if (command_table[i].name == NULL)
continue;
if (strcmp(command, command_table[i].name) == 0) {
*idx = i;
return (0);
}
}
return (1);
}
/*
* Display version message
*/
static int
zpool_do_version(int argc, char **argv)
{
if (zfs_version_print() == -1)
return (1);
return (0);
}
/*
* Do zpool_load_compat() and print error message on failure
*/
static zpool_compat_status_t
zpool_do_load_compat(const char *compat, boolean_t *list)
{
char report[1024];
zpool_compat_status_t ret;
ret = zpool_load_compat(compat, list, report, 1024);
switch (ret) {
case ZPOOL_COMPATIBILITY_OK:
break;
case ZPOOL_COMPATIBILITY_NOFILES:
case ZPOOL_COMPATIBILITY_BADFILE:
case ZPOOL_COMPATIBILITY_BADTOKEN:
(void) fprintf(stderr, "Error: %s\n", report);
break;
case ZPOOL_COMPATIBILITY_WARNTOKEN:
(void) fprintf(stderr, "Warning: %s\n", report);
ret = ZPOOL_COMPATIBILITY_OK;
break;
}
return (ret);
}
int
main(int argc, char **argv)
{
int ret = 0;
int i = 0;
char *cmdname;
char **newargv;
(void) setlocale(LC_ALL, "");
(void) setlocale(LC_NUMERIC, "C");
(void) textdomain(TEXT_DOMAIN);
srand(time(NULL));
opterr = 0;
/*
* Make sure the user has specified some command.
*/
if (argc < 2) {
(void) fprintf(stderr, gettext("missing command\n"));
usage(B_FALSE);
}
cmdname = argv[1];
/*
* Special case '-?'
*/
if ((strcmp(cmdname, "-?") == 0) || strcmp(cmdname, "--help") == 0)
usage(B_TRUE);
/*
* Special case '-V|--version'
*/
if ((strcmp(cmdname, "-V") == 0) || (strcmp(cmdname, "--version") == 0))
return (zpool_do_version(argc, argv));
if ((g_zfs = libzfs_init()) == NULL) {
(void) fprintf(stderr, "%s\n", libzfs_error_init(errno));
return (1);
}
libzfs_print_on_error(g_zfs, B_TRUE);
zfs_save_arguments(argc, argv, history_str, sizeof (history_str));
/*
* Many commands modify input strings for string parsing reasons.
* We create a copy to protect the original argv.
*/
newargv = malloc((argc + 1) * sizeof (newargv[0]));
for (i = 0; i < argc; i++)
newargv[i] = strdup(argv[i]);
newargv[argc] = NULL;
/*
* Run the appropriate command.
*/
if (find_command_idx(cmdname, &i) == 0) {
current_command = &command_table[i];
ret = command_table[i].func(argc - 1, newargv + 1);
} else if (strchr(cmdname, '=')) {
verify(find_command_idx("set", &i) == 0);
current_command = &command_table[i];
ret = command_table[i].func(argc, newargv);
} else if (strcmp(cmdname, "freeze") == 0 && argc == 3) {
/*
* 'freeze' is a vile debugging abomination, so we treat
* it as such.
*/
zfs_cmd_t zc = {"\0"};
(void) strlcpy(zc.zc_name, argv[2], sizeof (zc.zc_name));
ret = zfs_ioctl(g_zfs, ZFS_IOC_POOL_FREEZE, &zc);
if (ret != 0) {
(void) fprintf(stderr,
gettext("failed to freeze pool: %d\n"), errno);
ret = 1;
}
log_history = 0;
} else {
(void) fprintf(stderr, gettext("unrecognized "
"command '%s'\n"), cmdname);
usage(B_FALSE);
ret = 1;
}
for (i = 0; i < argc; i++)
free(newargv[i]);
free(newargv);
if (ret == 0 && log_history)
(void) zpool_log_history(g_zfs, history_str);
libzfs_fini(g_zfs);
/*
* The 'ZFS_ABORT' environment variable causes us to dump core on exit
* for the purposes of running ::findleaks.
*/
if (getenv("ZFS_ABORT") != NULL) {
(void) printf("dumping core by request\n");
abort();
}
return (ret);
}
diff --git a/sys/contrib/openzfs/cmd/zpool/zpool_util.h b/sys/contrib/openzfs/cmd/zpool/zpool_util.h
index 71db4dc35608..4002e5794021 100644
--- a/sys/contrib/openzfs/cmd/zpool/zpool_util.h
+++ b/sys/contrib/openzfs/cmd/zpool/zpool_util.h
@@ -1,139 +1,140 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
*/
#ifndef ZPOOL_UTIL_H
#define ZPOOL_UTIL_H
#include <libnvpair.h>
#include <libzfs.h>
#ifdef __cplusplus
extern "C" {
#endif
/* Path to scripts you can run with "zpool status/iostat -c" */
#define ZPOOL_SCRIPTS_DIR SYSCONFDIR"/zfs/zpool.d"
/*
* Basic utility functions
*/
void *safe_malloc(size_t);
void *safe_realloc(void *, size_t);
void zpool_no_memory(void);
uint_t num_logs(nvlist_t *nv);
uint64_t array64_max(uint64_t array[], unsigned int len);
int highbit64(uint64_t i);
int lowbit64(uint64_t i);
/*
* Misc utility functions
*/
char *zpool_get_cmd_search_path(void);
/*
* Virtual device functions
*/
nvlist_t *make_root_vdev(zpool_handle_t *zhp, nvlist_t *props, int force,
int check_rep, boolean_t replacing, boolean_t dryrun, int argc,
char **argv);
nvlist_t *split_mirror_vdev(zpool_handle_t *zhp, char *newname,
nvlist_t *props, splitflags_t flags, int argc, char **argv);
/*
* Pool list functions
*/
int for_each_pool(int, char **, boolean_t unavail, zprop_list_t **,
boolean_t, zpool_iter_f, void *);
/* Vdev list functions */
typedef int (*pool_vdev_iter_f)(zpool_handle_t *, nvlist_t *, void *);
int for_each_vdev(zpool_handle_t *zhp, pool_vdev_iter_f func, void *data);
typedef struct zpool_list zpool_list_t;
zpool_list_t *pool_list_get(int, char **, zprop_list_t **, boolean_t, int *);
void pool_list_update(zpool_list_t *);
int pool_list_iter(zpool_list_t *, int unavail, zpool_iter_f, void *);
void pool_list_free(zpool_list_t *);
int pool_list_count(zpool_list_t *);
void pool_list_remove(zpool_list_t *, zpool_handle_t *);
extern libzfs_handle_t *g_zfs;
typedef struct vdev_cmd_data
{
char **lines; /* Array of lines of output, minus the column name */
int lines_cnt; /* Number of lines in the array */
char **cols; /* Array of column names */
int cols_cnt; /* Number of column names */
char *path; /* vdev path */
char *upath; /* vdev underlying path */
char *pool; /* Pool name */
char *cmd; /* backpointer to cmd */
char *vdev_enc_sysfs_path; /* enclosure sysfs path (if any) */
} vdev_cmd_data_t;
typedef struct vdev_cmd_data_list
{
char *cmd; /* Command to run */
unsigned int count; /* Number of vdev_cmd_data items (vdevs) */
/* fields used to select only certain vdevs, if requested */
libzfs_handle_t *g_zfs;
char **vdev_names;
int vdev_names_count;
int cb_name_flags;
vdev_cmd_data_t *data; /* Array of vdevs */
/* List of unique column names and widths */
char **uniq_cols;
int uniq_cols_cnt;
int *uniq_cols_width;
} vdev_cmd_data_list_t;
vdev_cmd_data_list_t *all_pools_for_each_vdev_run(int argc, char **argv,
char *cmd, libzfs_handle_t *g_zfs, char **vdev_names, int vdev_names_count,
int cb_name_flags);
void free_vdev_cmd_data_list(vdev_cmd_data_list_t *vcdl);
int check_device(const char *path, boolean_t force,
boolean_t isspare, boolean_t iswholedisk);
boolean_t check_sector_size_database(char *path, int *sector_size);
-void vdev_error(const char *fmt, ...);
+void vdev_error(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
int check_file(const char *file, boolean_t force, boolean_t isspare);
void after_zpool_upgrade(zpool_handle_t *zhp);
+int check_file_generic(const char *file, boolean_t force, boolean_t isspare);
#ifdef __cplusplus
}
#endif
#endif /* ZPOOL_UTIL_H */
diff --git a/sys/contrib/openzfs/cmd/zpool/zpool_vdev.c b/sys/contrib/openzfs/cmd/zpool/zpool_vdev.c
index 3d83da641ecb..dcc67e7e2014 100644
--- a/sys/contrib/openzfs/cmd/zpool/zpool_vdev.c
+++ b/sys/contrib/openzfs/cmd/zpool/zpool_vdev.c
@@ -1,1870 +1,1880 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2018 by Delphix. All rights reserved.
* Copyright (c) 2016, 2017 Intel Corporation.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>.
*/
/*
* Functions to convert between a list of vdevs and an nvlist representing the
* configuration. Each entry in the list can be one of:
*
* Device vdevs
* disk=(path=..., devid=...)
* file=(path=...)
*
* Group vdevs
* raidz[1|2]=(...)
* mirror=(...)
*
* Hot spares
*
* While the underlying implementation supports it, group vdevs cannot contain
* other group vdevs. All userland verification of devices is contained within
* this file. If successful, the nvlist returned can be passed directly to the
* kernel; we've done as much verification as possible in userland.
*
* Hot spares are a special case, and passed down as an array of disk vdevs, at
* the same level as the root of the vdev tree.
*
* The only function exported by this file is 'make_root_vdev'. The
* function performs several passes:
*
* 1. Construct the vdev specification. Performs syntax validation and
* makes sure each device is valid.
* 2. Check for devices in use. Using libblkid to make sure that no
* devices are also in use. Some can be overridden using the 'force'
* flag, others cannot.
* 3. Check for replication errors if the 'force' flag is not specified.
* validates that the replication level is consistent across the
* entire pool.
* 4. Call libzfs to label any whole disks with an EFI label.
*/
#include <assert.h>
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
#include <libintl.h>
#include <libnvpair.h>
#include <libzutil.h>
#include <limits.h>
#include <sys/spa.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include "zpool_util.h"
#include <sys/zfs_context.h>
#include <sys/stat.h>
/*
* For any given vdev specification, we can have multiple errors. The
* vdev_error() function keeps track of whether we have seen an error yet, and
* prints out a header if its the first error we've seen.
*/
boolean_t error_seen;
boolean_t is_force;
-/*PRINTFLIKE1*/
void
vdev_error(const char *fmt, ...)
{
va_list ap;
if (!error_seen) {
(void) fprintf(stderr, gettext("invalid vdev specification\n"));
if (!is_force)
(void) fprintf(stderr, gettext("use '-f' to override "
"the following errors:\n"));
else
(void) fprintf(stderr, gettext("the following errors "
"must be manually repaired:\n"));
error_seen = B_TRUE;
}
va_start(ap, fmt);
(void) vfprintf(stderr, fmt, ap);
va_end(ap);
}
/*
* Check that a file is valid. All we can do in this case is check that it's
* not in use by another pool, and not in use by swap.
*/
int
-check_file(const char *file, boolean_t force, boolean_t isspare)
+check_file_generic(const char *file, boolean_t force, boolean_t isspare)
{
char *name;
int fd;
int ret = 0;
pool_state_t state;
boolean_t inuse;
if ((fd = open(file, O_RDONLY)) < 0)
return (0);
if (zpool_in_use(g_zfs, fd, &state, &name, &inuse) == 0 && inuse) {
const char *desc;
switch (state) {
case POOL_STATE_ACTIVE:
desc = gettext("active");
break;
case POOL_STATE_EXPORTED:
desc = gettext("exported");
break;
case POOL_STATE_POTENTIALLY_ACTIVE:
desc = gettext("potentially active");
break;
default:
desc = gettext("unknown");
break;
}
/*
* Allow hot spares to be shared between pools.
*/
if (state == POOL_STATE_SPARE && isspare) {
free(name);
(void) close(fd);
return (0);
}
if (state == POOL_STATE_ACTIVE ||
state == POOL_STATE_SPARE || !force) {
switch (state) {
case POOL_STATE_SPARE:
vdev_error(gettext("%s is reserved as a hot "
"spare for pool %s\n"), file, name);
break;
default:
vdev_error(gettext("%s is part of %s pool "
"'%s'\n"), file, desc, name);
break;
}
ret = -1;
}
free(name);
}
(void) close(fd);
return (ret);
}
/*
* This may be a shorthand device path or it could be total gibberish.
* Check to see if it is a known device available in zfs_vdev_paths.
* As part of this check, see if we've been given an entire disk
* (minus the slice number).
*/
static int
is_shorthand_path(const char *arg, char *path, size_t path_size,
struct stat64 *statbuf, boolean_t *wholedisk)
{
int error;
error = zfs_resolve_shortname(arg, path, path_size);
if (error == 0) {
*wholedisk = zfs_dev_is_whole_disk(path);
if (*wholedisk || (stat64(path, statbuf) == 0))
return (0);
}
strlcpy(path, arg, path_size);
memset(statbuf, 0, sizeof (*statbuf));
*wholedisk = B_FALSE;
return (error);
}
/*
* Determine if the given path is a hot spare within the given configuration.
* If no configuration is given we rely solely on the label.
*/
static boolean_t
is_spare(nvlist_t *config, const char *path)
{
int fd;
pool_state_t state;
char *name = NULL;
nvlist_t *label;
uint64_t guid, spareguid;
nvlist_t *nvroot;
nvlist_t **spares;
uint_t i, nspares;
boolean_t inuse;
if (zpool_is_draid_spare(path))
return (B_TRUE);
if ((fd = open(path, O_RDONLY|O_DIRECT)) < 0)
return (B_FALSE);
if (zpool_in_use(g_zfs, fd, &state, &name, &inuse) != 0 ||
!inuse ||
state != POOL_STATE_SPARE ||
zpool_read_label(fd, &label, NULL) != 0) {
free(name);
(void) close(fd);
return (B_FALSE);
}
free(name);
(void) close(fd);
if (config == NULL) {
nvlist_free(label);
return (B_TRUE);
}
verify(nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) == 0);
nvlist_free(label);
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) == 0) {
for (i = 0; i < nspares; i++) {
verify(nvlist_lookup_uint64(spares[i],
ZPOOL_CONFIG_GUID, &spareguid) == 0);
if (spareguid == guid)
return (B_TRUE);
}
}
return (B_FALSE);
}
/*
* Create a leaf vdev. Determine if this is a file or a device. If it's a
* device, fill in the device id to make a complete nvlist. Valid forms for a
* leaf vdev are:
*
* /dev/xxx Complete disk path
* /xxx Full path to file
* xxx Shorthand for <zfs_vdev_paths>/xxx
* draid* Virtual dRAID spare
*/
static nvlist_t *
make_leaf_vdev(nvlist_t *props, const char *arg, boolean_t is_primary)
{
char path[MAXPATHLEN];
struct stat64 statbuf;
nvlist_t *vdev = NULL;
char *type = NULL;
boolean_t wholedisk = B_FALSE;
uint64_t ashift = 0;
int err;
/*
* Determine what type of vdev this is, and put the full path into
* 'path'. We detect whether this is a device of file afterwards by
* checking the st_mode of the file.
*/
if (arg[0] == '/') {
/*
* Complete device or file path. Exact type is determined by
* examining the file descriptor afterwards. Symbolic links
* are resolved to their real paths to determine whole disk
* and S_ISBLK/S_ISREG type checks. However, we are careful
* to store the given path as ZPOOL_CONFIG_PATH to ensure we
* can leverage udev's persistent device labels.
*/
if (realpath(arg, path) == NULL) {
(void) fprintf(stderr,
gettext("cannot resolve path '%s'\n"), arg);
return (NULL);
}
wholedisk = zfs_dev_is_whole_disk(path);
if (!wholedisk && (stat64(path, &statbuf) != 0)) {
(void) fprintf(stderr,
gettext("cannot open '%s': %s\n"),
path, strerror(errno));
return (NULL);
}
/* After whole disk check restore original passed path */
strlcpy(path, arg, sizeof (path));
} else if (zpool_is_draid_spare(arg)) {
if (!is_primary) {
(void) fprintf(stderr,
gettext("cannot open '%s': dRAID spares can only "
"be used to replace primary vdevs\n"), arg);
return (NULL);
}
wholedisk = B_TRUE;
strlcpy(path, arg, sizeof (path));
type = VDEV_TYPE_DRAID_SPARE;
} else {
err = is_shorthand_path(arg, path, sizeof (path),
&statbuf, &wholedisk);
if (err != 0) {
/*
* If we got ENOENT, then the user gave us
* gibberish, so try to direct them with a
* reasonable error message. Otherwise,
* regurgitate strerror() since it's the best we
* can do.
*/
if (err == ENOENT) {
(void) fprintf(stderr,
gettext("cannot open '%s': no such "
"device in %s\n"), arg, DISK_ROOT);
(void) fprintf(stderr,
gettext("must be a full path or "
"shorthand device name\n"));
return (NULL);
} else {
(void) fprintf(stderr,
gettext("cannot open '%s': %s\n"),
path, strerror(errno));
return (NULL);
}
}
}
if (type == NULL) {
/*
* Determine whether this is a device or a file.
*/
if (wholedisk || S_ISBLK(statbuf.st_mode)) {
type = VDEV_TYPE_DISK;
} else if (S_ISREG(statbuf.st_mode)) {
type = VDEV_TYPE_FILE;
} else {
fprintf(stderr, gettext("cannot use '%s': must "
"be a block device or regular file\n"), path);
return (NULL);
}
}
/*
* Finally, we have the complete device or file, and we know that it is
* acceptable to use. Construct the nvlist to describe this vdev. All
* vdevs have a 'path' element, and devices also have a 'devid' element.
*/
verify(nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) == 0);
verify(nvlist_add_string(vdev, ZPOOL_CONFIG_PATH, path) == 0);
verify(nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE, type) == 0);
if (strcmp(type, VDEV_TYPE_DISK) == 0)
verify(nvlist_add_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK,
(uint64_t)wholedisk) == 0);
/*
* Override defaults if custom properties are provided.
*/
if (props != NULL) {
char *value = NULL;
if (nvlist_lookup_string(props,
zpool_prop_to_name(ZPOOL_PROP_ASHIFT), &value) == 0) {
if (zfs_nicestrtonum(NULL, value, &ashift) != 0) {
(void) fprintf(stderr,
gettext("ashift must be a number.\n"));
return (NULL);
}
if (ashift != 0 &&
(ashift < ASHIFT_MIN || ashift > ASHIFT_MAX)) {
(void) fprintf(stderr,
gettext("invalid 'ashift=%" PRIu64 "' "
"property: only values between %" PRId32 " "
"and %" PRId32 " are allowed.\n"),
ashift, ASHIFT_MIN, ASHIFT_MAX);
return (NULL);
}
}
}
/*
* If the device is known to incorrectly report its physical sector
* size explicitly provide the known correct value.
*/
if (ashift == 0) {
int sector_size;
if (check_sector_size_database(path, &sector_size) == B_TRUE)
ashift = highbit64(sector_size) - 1;
}
if (ashift > 0)
(void) nvlist_add_uint64(vdev, ZPOOL_CONFIG_ASHIFT, ashift);
return (vdev);
}
/*
* Go through and verify the replication level of the pool is consistent.
* Performs the following checks:
*
* For the new spec, verifies that devices in mirrors and raidz are the
* same size.
*
* If the current configuration already has inconsistent replication
* levels, ignore any other potential problems in the new spec.
*
* Otherwise, make sure that the current spec (if there is one) and the new
* spec have consistent replication levels.
*
* If there is no current spec (create), make sure new spec has at least
* one general purpose vdev.
*/
typedef struct replication_level {
char *zprl_type;
uint64_t zprl_children;
uint64_t zprl_parity;
} replication_level_t;
#define ZPOOL_FUZZ (16 * 1024 * 1024)
/*
* N.B. For the purposes of comparing replication levels dRAID can be
* considered functionally equivalent to raidz.
*/
static boolean_t
is_raidz_mirror(replication_level_t *a, replication_level_t *b,
replication_level_t **raidz, replication_level_t **mirror)
{
if ((strcmp(a->zprl_type, "raidz") == 0 ||
strcmp(a->zprl_type, "draid") == 0) &&
strcmp(b->zprl_type, "mirror") == 0) {
*raidz = a;
*mirror = b;
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Comparison for determining if dRAID and raidz where passed in either order.
*/
static boolean_t
is_raidz_draid(replication_level_t *a, replication_level_t *b)
{
if ((strcmp(a->zprl_type, "raidz") == 0 ||
strcmp(a->zprl_type, "draid") == 0) &&
(strcmp(b->zprl_type, "raidz") == 0 ||
strcmp(b->zprl_type, "draid") == 0)) {
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Given a list of toplevel vdevs, return the current replication level. If
* the config is inconsistent, then NULL is returned. If 'fatal' is set, then
* an error message will be displayed for each self-inconsistent vdev.
*/
static replication_level_t *
get_replication(nvlist_t *nvroot, boolean_t fatal)
{
nvlist_t **top;
uint_t t, toplevels;
nvlist_t **child;
uint_t c, children;
nvlist_t *nv;
char *type;
replication_level_t lastrep = {0};
replication_level_t rep;
replication_level_t *ret;
replication_level_t *raidz, *mirror;
boolean_t dontreport;
ret = safe_malloc(sizeof (replication_level_t));
verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&top, &toplevels) == 0);
for (t = 0; t < toplevels; t++) {
uint64_t is_log = B_FALSE;
nv = top[t];
/*
* For separate logs we ignore the top level vdev replication
* constraints.
*/
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &is_log);
if (is_log)
continue;
/* Ignore holes introduced by removing aux devices */
verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
if (strcmp(type, VDEV_TYPE_HOLE) == 0)
continue;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0) {
/*
* This is a 'file' or 'disk' vdev.
*/
rep.zprl_type = type;
rep.zprl_children = 1;
rep.zprl_parity = 0;
} else {
int64_t vdev_size;
/*
* This is a mirror or RAID-Z vdev. Go through and make
* sure the contents are all the same (files vs. disks),
* keeping track of the number of elements in the
* process.
*
* We also check that the size of each vdev (if it can
* be determined) is the same.
*/
rep.zprl_type = type;
rep.zprl_children = 0;
if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
strcmp(type, VDEV_TYPE_DRAID) == 0) {
verify(nvlist_lookup_uint64(nv,
ZPOOL_CONFIG_NPARITY,
&rep.zprl_parity) == 0);
assert(rep.zprl_parity != 0);
} else {
rep.zprl_parity = 0;
}
/*
* The 'dontreport' variable indicates that we've
* already reported an error for this spec, so don't
* bother doing it again.
*/
type = NULL;
dontreport = 0;
vdev_size = -1LL;
for (c = 0; c < children; c++) {
nvlist_t *cnv = child[c];
char *path;
struct stat64 statbuf;
int64_t size = -1LL;
char *childtype;
int fd, err;
rep.zprl_children++;
verify(nvlist_lookup_string(cnv,
ZPOOL_CONFIG_TYPE, &childtype) == 0);
/*
* If this is a replacing or spare vdev, then
* get the real first child of the vdev: do this
* in a loop because replacing and spare vdevs
* can be nested.
*/
while (strcmp(childtype,
VDEV_TYPE_REPLACING) == 0 ||
strcmp(childtype, VDEV_TYPE_SPARE) == 0) {
nvlist_t **rchild;
uint_t rchildren;
verify(nvlist_lookup_nvlist_array(cnv,
ZPOOL_CONFIG_CHILDREN, &rchild,
&rchildren) == 0);
assert(rchildren == 2);
cnv = rchild[0];
verify(nvlist_lookup_string(cnv,
ZPOOL_CONFIG_TYPE,
&childtype) == 0);
}
verify(nvlist_lookup_string(cnv,
ZPOOL_CONFIG_PATH, &path) == 0);
/*
* If we have a raidz/mirror that combines disks
* with files, report it as an error.
*/
if (!dontreport && type != NULL &&
strcmp(type, childtype) != 0) {
if (ret != NULL)
free(ret);
ret = NULL;
if (fatal)
vdev_error(gettext(
"mismatched replication "
"level: %s contains both "
"files and devices\n"),
rep.zprl_type);
else
return (NULL);
dontreport = B_TRUE;
}
/*
* According to stat(2), the value of 'st_size'
* is undefined for block devices and character
* devices. But there is no effective way to
* determine the real size in userland.
*
* Instead, we'll take advantage of an
* implementation detail of spec_size(). If the
* device is currently open, then we (should)
* return a valid size.
*
* If we still don't get a valid size (indicated
* by a size of 0 or MAXOFFSET_T), then ignore
* this device altogether.
*/
if ((fd = open(path, O_RDONLY)) >= 0) {
err = fstat64_blk(fd, &statbuf);
(void) close(fd);
} else {
err = stat64(path, &statbuf);
}
if (err != 0 ||
statbuf.st_size == 0 ||
statbuf.st_size == MAXOFFSET_T)
continue;
size = statbuf.st_size;
/*
* Also make sure that devices and
* slices have a consistent size. If
* they differ by a significant amount
* (~16MB) then report an error.
*/
if (!dontreport &&
(vdev_size != -1LL &&
(llabs(size - vdev_size) >
ZPOOL_FUZZ))) {
if (ret != NULL)
free(ret);
ret = NULL;
if (fatal)
vdev_error(gettext(
"%s contains devices of "
"different sizes\n"),
rep.zprl_type);
else
return (NULL);
dontreport = B_TRUE;
}
type = childtype;
vdev_size = size;
}
}
/*
* At this point, we have the replication of the last toplevel
* vdev in 'rep'. Compare it to 'lastrep' to see if it is
* different.
*/
if (lastrep.zprl_type != NULL) {
if (is_raidz_mirror(&lastrep, &rep, &raidz, &mirror) ||
is_raidz_mirror(&rep, &lastrep, &raidz, &mirror)) {
/*
* Accepted raidz and mirror when they can
* handle the same number of disk failures.
*/
if (raidz->zprl_parity !=
mirror->zprl_children - 1) {
if (ret != NULL)
free(ret);
ret = NULL;
if (fatal)
vdev_error(gettext(
"mismatched replication "
"level: "
"%s and %s vdevs with "
"different redundancy, "
"%llu vs. %llu (%llu-way) "
"are present\n"),
raidz->zprl_type,
mirror->zprl_type,
+ (u_longlong_t)
raidz->zprl_parity,
+ (u_longlong_t)
mirror->zprl_children - 1,
+ (u_longlong_t)
mirror->zprl_children);
else
return (NULL);
}
} else if (is_raidz_draid(&lastrep, &rep)) {
/*
* Accepted raidz and draid when they can
* handle the same number of disk failures.
*/
if (lastrep.zprl_parity != rep.zprl_parity) {
if (ret != NULL)
free(ret);
ret = NULL;
if (fatal)
vdev_error(gettext(
"mismatched replication "
"level: %s and %s vdevs "
"with different "
"redundancy, %llu vs. "
"%llu are present\n"),
lastrep.zprl_type,
rep.zprl_type,
+ (u_longlong_t)
lastrep.zprl_parity,
+ (u_longlong_t)
rep.zprl_parity);
else
return (NULL);
}
} else if (strcmp(lastrep.zprl_type, rep.zprl_type) !=
0) {
if (ret != NULL)
free(ret);
ret = NULL;
if (fatal)
vdev_error(gettext(
"mismatched replication level: "
"both %s and %s vdevs are "
"present\n"),
lastrep.zprl_type, rep.zprl_type);
else
return (NULL);
} else if (lastrep.zprl_parity != rep.zprl_parity) {
if (ret)
free(ret);
ret = NULL;
if (fatal)
vdev_error(gettext(
"mismatched replication level: "
"both %llu and %llu device parity "
"%s vdevs are present\n"),
+ (u_longlong_t)
lastrep.zprl_parity,
- rep.zprl_parity,
+ (u_longlong_t)rep.zprl_parity,
rep.zprl_type);
else
return (NULL);
} else if (lastrep.zprl_children != rep.zprl_children) {
if (ret)
free(ret);
ret = NULL;
if (fatal)
vdev_error(gettext(
"mismatched replication level: "
"both %llu-way and %llu-way %s "
"vdevs are present\n"),
+ (u_longlong_t)
lastrep.zprl_children,
+ (u_longlong_t)
rep.zprl_children,
rep.zprl_type);
else
return (NULL);
}
}
lastrep = rep;
}
if (ret != NULL)
*ret = rep;
return (ret);
}
/*
* Check the replication level of the vdev spec against the current pool. Calls
* get_replication() to make sure the new spec is self-consistent. If the pool
* has a consistent replication level, then we ignore any errors. Otherwise,
* report any difference between the two.
*/
static int
check_replication(nvlist_t *config, nvlist_t *newroot)
{
nvlist_t **child;
uint_t children;
replication_level_t *current = NULL, *new;
replication_level_t *raidz, *mirror;
int ret;
/*
* If we have a current pool configuration, check to see if it's
* self-consistent. If not, simply return success.
*/
if (config != NULL) {
nvlist_t *nvroot;
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
if ((current = get_replication(nvroot, B_FALSE)) == NULL)
return (0);
}
/*
* for spares there may be no children, and therefore no
* replication level to check
*/
if ((nvlist_lookup_nvlist_array(newroot, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0) || (children == 0)) {
free(current);
return (0);
}
/*
* If all we have is logs then there's no replication level to check.
*/
if (num_logs(newroot) == children) {
free(current);
return (0);
}
/*
* Get the replication level of the new vdev spec, reporting any
* inconsistencies found.
*/
if ((new = get_replication(newroot, B_TRUE)) == NULL) {
free(current);
return (-1);
}
/*
* Check to see if the new vdev spec matches the replication level of
* the current pool.
*/
ret = 0;
if (current != NULL) {
if (is_raidz_mirror(current, new, &raidz, &mirror) ||
is_raidz_mirror(new, current, &raidz, &mirror)) {
if (raidz->zprl_parity != mirror->zprl_children - 1) {
vdev_error(gettext(
"mismatched replication level: pool and "
"new vdev with different redundancy, %s "
"and %s vdevs, %llu vs. %llu (%llu-way)\n"),
raidz->zprl_type,
mirror->zprl_type,
- raidz->zprl_parity,
- mirror->zprl_children - 1,
- mirror->zprl_children);
+ (u_longlong_t)raidz->zprl_parity,
+ (u_longlong_t)mirror->zprl_children - 1,
+ (u_longlong_t)mirror->zprl_children);
ret = -1;
}
} else if (strcmp(current->zprl_type, new->zprl_type) != 0) {
vdev_error(gettext(
"mismatched replication level: pool uses %s "
"and new vdev is %s\n"),
current->zprl_type, new->zprl_type);
ret = -1;
} else if (current->zprl_parity != new->zprl_parity) {
vdev_error(gettext(
"mismatched replication level: pool uses %llu "
"device parity and new vdev uses %llu\n"),
- current->zprl_parity, new->zprl_parity);
+ (u_longlong_t)current->zprl_parity,
+ (u_longlong_t)new->zprl_parity);
ret = -1;
} else if (current->zprl_children != new->zprl_children) {
vdev_error(gettext(
"mismatched replication level: pool uses %llu-way "
"%s and new vdev uses %llu-way %s\n"),
- current->zprl_children, current->zprl_type,
- new->zprl_children, new->zprl_type);
+ (u_longlong_t)current->zprl_children,
+ current->zprl_type,
+ (u_longlong_t)new->zprl_children,
+ new->zprl_type);
ret = -1;
}
}
free(new);
if (current != NULL)
free(current);
return (ret);
}
static int
zero_label(char *path)
{
const int size = 4096;
char buf[size];
int err, fd;
if ((fd = open(path, O_WRONLY|O_EXCL)) < 0) {
(void) fprintf(stderr, gettext("cannot open '%s': %s\n"),
path, strerror(errno));
return (-1);
}
memset(buf, 0, size);
err = write(fd, buf, size);
(void) fdatasync(fd);
(void) close(fd);
if (err == -1) {
(void) fprintf(stderr, gettext("cannot zero first %d bytes "
"of '%s': %s\n"), size, path, strerror(errno));
return (-1);
}
if (err != size) {
(void) fprintf(stderr, gettext("could only zero %d/%d bytes "
"of '%s'\n"), err, size, path);
return (-1);
}
return (0);
}
/*
* Go through and find any whole disks in the vdev specification, labelling them
* as appropriate. When constructing the vdev spec, we were unable to open this
* device in order to provide a devid. Now that we have labelled the disk and
* know that slice 0 is valid, we can construct the devid now.
*
* If the disk was already labeled with an EFI label, we will have gotten the
* devid already (because we were able to open the whole disk). Otherwise, we
* need to get the devid after we label the disk.
*/
static int
make_disks(zpool_handle_t *zhp, nvlist_t *nv)
{
nvlist_t **child;
uint_t c, children;
char *type, *path;
char devpath[MAXPATHLEN];
char udevpath[MAXPATHLEN];
uint64_t wholedisk;
struct stat64 statbuf;
int is_exclusive = 0;
int fd;
int ret;
verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0) {
if (strcmp(type, VDEV_TYPE_DISK) != 0)
return (0);
/*
* We have a disk device. If this is a whole disk write
* out the efi partition table, otherwise write zero's to
* the first 4k of the partition. This is to ensure that
* libblkid will not misidentify the partition due to a
* magic value left by the previous filesystem.
*/
verify(!nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path));
verify(!nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
&wholedisk));
if (!wholedisk) {
/*
* Update device id string for mpath nodes (Linux only)
*/
if (is_mpath_whole_disk(path))
update_vdev_config_dev_strs(nv);
if (!is_spare(NULL, path))
(void) zero_label(path);
return (0);
}
if (realpath(path, devpath) == NULL) {
ret = errno;
(void) fprintf(stderr,
gettext("cannot resolve path '%s'\n"), path);
return (ret);
}
/*
* Remove any previously existing symlink from a udev path to
* the device before labeling the disk. This ensures that
* only newly created links are used. Otherwise there is a
* window between when udev deletes and recreates the link
* during which access attempts will fail with ENOENT.
*/
strlcpy(udevpath, path, MAXPATHLEN);
(void) zfs_append_partition(udevpath, MAXPATHLEN);
fd = open(devpath, O_RDWR|O_EXCL);
if (fd == -1) {
if (errno == EBUSY)
is_exclusive = 1;
#ifdef __FreeBSD__
if (errno == EPERM)
is_exclusive = 1;
#endif
} else {
(void) close(fd);
}
/*
* If the partition exists, contains a valid spare label,
* and is opened exclusively there is no need to partition
* it. Hot spares have already been partitioned and are
* held open exclusively by the kernel as a safety measure.
*
* If the provided path is for a /dev/disk/ device its
* symbolic link will be removed, partition table created,
* and then block until udev creates the new link.
*/
if (!is_exclusive && !is_spare(NULL, udevpath)) {
char *devnode = strrchr(devpath, '/') + 1;
ret = strncmp(udevpath, UDISK_ROOT, strlen(UDISK_ROOT));
if (ret == 0) {
ret = lstat64(udevpath, &statbuf);
if (ret == 0 && S_ISLNK(statbuf.st_mode))
(void) unlink(udevpath);
}
/*
* When labeling a pool the raw device node name
* is provided as it appears under /dev/.
*/
if (zpool_label_disk(g_zfs, zhp, devnode) == -1)
return (-1);
/*
* Wait for udev to signal the device is available
* by the provided path.
*/
ret = zpool_label_disk_wait(udevpath, DISK_LABEL_WAIT);
if (ret) {
(void) fprintf(stderr,
gettext("missing link: %s was "
"partitioned but %s is missing\n"),
devnode, udevpath);
return (ret);
}
ret = zero_label(udevpath);
if (ret)
return (ret);
}
/*
* Update the path to refer to the partition. The presence of
* the 'whole_disk' field indicates to the CLI that we should
* chop off the partition number when displaying the device in
* future output.
*/
verify(nvlist_add_string(nv, ZPOOL_CONFIG_PATH, udevpath) == 0);
/*
* Update device id strings for whole disks (Linux only)
*/
update_vdev_config_dev_strs(nv);
return (0);
}
for (c = 0; c < children; c++)
if ((ret = make_disks(zhp, child[c])) != 0)
return (ret);
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
&child, &children) == 0)
for (c = 0; c < children; c++)
if ((ret = make_disks(zhp, child[c])) != 0)
return (ret);
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
&child, &children) == 0)
for (c = 0; c < children; c++)
if ((ret = make_disks(zhp, child[c])) != 0)
return (ret);
return (0);
}
/*
* Go through and find any devices that are in use. We rely on libdiskmgt for
* the majority of this task.
*/
static boolean_t
is_device_in_use(nvlist_t *config, nvlist_t *nv, boolean_t force,
boolean_t replacing, boolean_t isspare)
{
nvlist_t **child;
uint_t c, children;
char *type, *path;
int ret = 0;
char buf[MAXPATHLEN];
uint64_t wholedisk = B_FALSE;
boolean_t anyinuse = B_FALSE;
verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0) {
verify(!nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path));
if (strcmp(type, VDEV_TYPE_DISK) == 0)
verify(!nvlist_lookup_uint64(nv,
ZPOOL_CONFIG_WHOLE_DISK, &wholedisk));
/*
* As a generic check, we look to see if this is a replace of a
* hot spare within the same pool. If so, we allow it
* regardless of what libblkid or zpool_in_use() says.
*/
if (replacing) {
(void) strlcpy(buf, path, sizeof (buf));
if (wholedisk) {
ret = zfs_append_partition(buf, sizeof (buf));
if (ret == -1)
return (-1);
}
if (is_spare(config, buf))
return (B_FALSE);
}
if (strcmp(type, VDEV_TYPE_DISK) == 0)
ret = check_device(path, force, isspare, wholedisk);
else if (strcmp(type, VDEV_TYPE_FILE) == 0)
ret = check_file(path, force, isspare);
return (ret != 0);
}
for (c = 0; c < children; c++)
if (is_device_in_use(config, child[c], force, replacing,
B_FALSE))
anyinuse = B_TRUE;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
&child, &children) == 0)
for (c = 0; c < children; c++)
if (is_device_in_use(config, child[c], force, replacing,
B_TRUE))
anyinuse = B_TRUE;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
&child, &children) == 0)
for (c = 0; c < children; c++)
if (is_device_in_use(config, child[c], force, replacing,
B_FALSE))
anyinuse = B_TRUE;
return (anyinuse);
}
/*
* Returns the parity level extracted from a raidz or draid type.
* If the parity cannot be determined zero is returned.
*/
static int
get_parity(const char *type)
{
long parity = 0;
const char *p;
if (strncmp(type, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0) {
p = type + strlen(VDEV_TYPE_RAIDZ);
if (*p == '\0') {
/* when unspecified default to single parity */
return (1);
} else if (*p == '0') {
/* no zero prefixes allowed */
return (0);
} else {
/* 0-3, no suffixes allowed */
char *end;
errno = 0;
parity = strtol(p, &end, 10);
if (errno != 0 || *end != '\0' ||
parity < 1 || parity > VDEV_RAIDZ_MAXPARITY) {
return (0);
}
}
} else if (strncmp(type, VDEV_TYPE_DRAID,
strlen(VDEV_TYPE_DRAID)) == 0) {
p = type + strlen(VDEV_TYPE_DRAID);
if (*p == '\0' || *p == ':') {
/* when unspecified default to single parity */
return (1);
} else if (*p == '0') {
/* no zero prefixes allowed */
return (0);
} else {
/* 0-3, allowed suffixes: '\0' or ':' */
char *end;
errno = 0;
parity = strtol(p, &end, 10);
if (errno != 0 ||
parity < 1 || parity > VDEV_DRAID_MAXPARITY ||
(*end != '\0' && *end != ':')) {
return (0);
}
}
}
return ((int)parity);
}
/*
* Assign the minimum and maximum number of devices allowed for
* the specified type. On error NULL is returned, otherwise the
* type prefix is returned (raidz, mirror, etc).
*/
static const char *
is_grouping(const char *type, int *mindev, int *maxdev)
{
int nparity;
if (strncmp(type, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
strncmp(type, VDEV_TYPE_DRAID, strlen(VDEV_TYPE_DRAID)) == 0) {
nparity = get_parity(type);
if (nparity == 0)
return (NULL);
if (mindev != NULL)
*mindev = nparity + 1;
if (maxdev != NULL)
*maxdev = 255;
if (strncmp(type, VDEV_TYPE_RAIDZ,
strlen(VDEV_TYPE_RAIDZ)) == 0) {
return (VDEV_TYPE_RAIDZ);
} else {
return (VDEV_TYPE_DRAID);
}
}
if (maxdev != NULL)
*maxdev = INT_MAX;
if (strcmp(type, "mirror") == 0) {
if (mindev != NULL)
*mindev = 2;
return (VDEV_TYPE_MIRROR);
}
if (strcmp(type, "spare") == 0) {
if (mindev != NULL)
*mindev = 1;
return (VDEV_TYPE_SPARE);
}
if (strcmp(type, "log") == 0) {
if (mindev != NULL)
*mindev = 1;
return (VDEV_TYPE_LOG);
}
if (strcmp(type, VDEV_ALLOC_BIAS_SPECIAL) == 0 ||
strcmp(type, VDEV_ALLOC_BIAS_DEDUP) == 0) {
if (mindev != NULL)
*mindev = 1;
return (type);
}
if (strcmp(type, "cache") == 0) {
if (mindev != NULL)
*mindev = 1;
return (VDEV_TYPE_L2CACHE);
}
return (NULL);
}
/*
* Extract the configuration parameters encoded in the dRAID type and
* use them to generate a dRAID configuration. The expected format is:
*
* draid[<parity>][:<data><d|D>][:<children><c|C>][:<spares><s|S>]
*
* The intent is to be able to generate a good configuration when no
* additional information is provided. The only mandatory component
* of the 'type' is the 'draid' prefix. If a value is not provided
* then reasonable defaults are used. The optional components may
* appear in any order but the d/s/c suffix is required.
*
* Valid inputs:
* - data: number of data devices per group (1-255)
* - parity: number of parity blocks per group (1-3)
* - spares: number of distributed spare (0-100)
* - children: total number of devices (1-255)
*
* Examples:
* - zpool create tank draid <devices...>
* - zpool create tank draid2:8d:51c:2s <devices...>
*/
static int
draid_config_by_type(nvlist_t *nv, const char *type, uint64_t children)
{
uint64_t nparity = 1;
uint64_t nspares = 0;
uint64_t ndata = UINT64_MAX;
uint64_t ngroups = 1;
long value;
if (strncmp(type, VDEV_TYPE_DRAID, strlen(VDEV_TYPE_DRAID)) != 0)
return (EINVAL);
nparity = (uint64_t)get_parity(type);
if (nparity == 0)
return (EINVAL);
char *p = (char *)type;
while ((p = strchr(p, ':')) != NULL) {
char *end;
p = p + 1;
errno = 0;
if (!isdigit(p[0])) {
(void) fprintf(stderr, gettext("invalid dRAID "
"syntax; expected [:<number><c|d|s>] not '%s'\n"),
type);
return (EINVAL);
}
/* Expected non-zero value with c/d/s suffix */
value = strtol(p, &end, 10);
char suffix = tolower(*end);
if (errno != 0 ||
(suffix != 'c' && suffix != 'd' && suffix != 's')) {
(void) fprintf(stderr, gettext("invalid dRAID "
"syntax; expected [:<number><c|d|s>] not '%s'\n"),
type);
return (EINVAL);
}
if (suffix == 'c') {
if ((uint64_t)value != children) {
fprintf(stderr,
gettext("invalid number of dRAID children; "
"%llu required but %llu provided\n"),
(u_longlong_t)value,
(u_longlong_t)children);
return (EINVAL);
}
} else if (suffix == 'd') {
ndata = (uint64_t)value;
} else if (suffix == 's') {
nspares = (uint64_t)value;
} else {
verify(0); /* Unreachable */
}
}
/*
* When a specific number of data disks is not provided limit a
* redundancy group to 8 data disks. This value was selected to
* provide a reasonable tradeoff between capacity and performance.
*/
if (ndata == UINT64_MAX) {
if (children > nspares + nparity) {
ndata = MIN(children - nspares - nparity, 8);
} else {
fprintf(stderr, gettext("request number of "
"distributed spares %llu and parity level %llu\n"
"leaves no disks available for data\n"),
(u_longlong_t)nspares, (u_longlong_t)nparity);
return (EINVAL);
}
}
/* Verify the maximum allowed group size is never exceeded. */
if (ndata == 0 || (ndata + nparity > children - nspares)) {
fprintf(stderr, gettext("requested number of dRAID data "
"disks per group %llu is too high,\nat most %llu disks "
"are available for data\n"), (u_longlong_t)ndata,
(u_longlong_t)(children - nspares - nparity));
return (EINVAL);
}
if (nparity == 0 || nparity > VDEV_DRAID_MAXPARITY) {
fprintf(stderr,
gettext("invalid dRAID parity level %llu; must be "
"between 1 and %d\n"), (u_longlong_t)nparity,
VDEV_DRAID_MAXPARITY);
return (EINVAL);
}
/*
* Verify the requested number of spares can be satisfied.
* An arbitrary limit of 100 distributed spares is applied.
*/
if (nspares > 100 || nspares > (children - (ndata + nparity))) {
fprintf(stderr,
gettext("invalid number of dRAID spares %llu; additional "
"disks would be required\n"), (u_longlong_t)nspares);
return (EINVAL);
}
/* Verify the requested number children is sufficient. */
if (children < (ndata + nparity + nspares)) {
fprintf(stderr, gettext("%llu disks were provided, but at "
"least %llu disks are required for this config\n"),
(u_longlong_t)children,
(u_longlong_t)(ndata + nparity + nspares));
}
if (children > VDEV_DRAID_MAX_CHILDREN) {
fprintf(stderr, gettext("%llu disks were provided, but "
"dRAID only supports up to %u disks"),
(u_longlong_t)children, VDEV_DRAID_MAX_CHILDREN);
}
/*
* Calculate the minimum number of groups required to fill a slice.
* This is the LCM of the stripe width (ndata + nparity) and the
* number of data drives (children - nspares).
*/
while (ngroups * (ndata + nparity) % (children - nspares) != 0)
ngroups++;
/* Store the basic dRAID configuration. */
fnvlist_add_uint64(nv, ZPOOL_CONFIG_NPARITY, nparity);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_DRAID_NDATA, ndata);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_DRAID_NSPARES, nspares);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_DRAID_NGROUPS, ngroups);
return (0);
}
/*
* Construct a syntactically valid vdev specification,
* and ensure that all devices and files exist and can be opened.
* Note: we don't bother freeing anything in the error paths
* because the program is just going to exit anyway.
*/
static nvlist_t *
construct_spec(nvlist_t *props, int argc, char **argv)
{
nvlist_t *nvroot, *nv, **top, **spares, **l2cache;
int t, toplevels, mindev, maxdev, nspares, nlogs, nl2cache;
const char *type, *fulltype;
boolean_t is_log, is_special, is_dedup, is_spare;
boolean_t seen_logs;
top = NULL;
toplevels = 0;
spares = NULL;
l2cache = NULL;
nspares = 0;
nlogs = 0;
nl2cache = 0;
is_log = is_special = is_dedup = is_spare = B_FALSE;
seen_logs = B_FALSE;
nvroot = NULL;
while (argc > 0) {
fulltype = argv[0];
nv = NULL;
/*
* If it's a mirror, raidz, or draid the subsequent arguments
* are its leaves -- until we encounter the next mirror,
* raidz or draid.
*/
if ((type = is_grouping(fulltype, &mindev, &maxdev)) != NULL) {
nvlist_t **child = NULL;
int c, children = 0;
if (strcmp(type, VDEV_TYPE_SPARE) == 0) {
if (spares != NULL) {
(void) fprintf(stderr,
gettext("invalid vdev "
"specification: 'spare' can be "
"specified only once\n"));
goto spec_out;
}
is_spare = B_TRUE;
is_log = is_special = is_dedup = B_FALSE;
}
if (strcmp(type, VDEV_TYPE_LOG) == 0) {
if (seen_logs) {
(void) fprintf(stderr,
gettext("invalid vdev "
"specification: 'log' can be "
"specified only once\n"));
goto spec_out;
}
seen_logs = B_TRUE;
is_log = B_TRUE;
is_special = is_dedup = is_spare = B_FALSE;
argc--;
argv++;
/*
* A log is not a real grouping device.
* We just set is_log and continue.
*/
continue;
}
if (strcmp(type, VDEV_ALLOC_BIAS_SPECIAL) == 0) {
is_special = B_TRUE;
is_log = is_dedup = is_spare = B_FALSE;
argc--;
argv++;
continue;
}
if (strcmp(type, VDEV_ALLOC_BIAS_DEDUP) == 0) {
is_dedup = B_TRUE;
is_log = is_special = is_spare = B_FALSE;
argc--;
argv++;
continue;
}
if (strcmp(type, VDEV_TYPE_L2CACHE) == 0) {
if (l2cache != NULL) {
(void) fprintf(stderr,
gettext("invalid vdev "
"specification: 'cache' can be "
"specified only once\n"));
goto spec_out;
}
is_log = is_special = B_FALSE;
is_dedup = is_spare = B_FALSE;
}
if (is_log || is_special || is_dedup) {
if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
(void) fprintf(stderr,
gettext("invalid vdev "
"specification: unsupported '%s' "
"device: %s\n"), is_log ? "log" :
"special", type);
goto spec_out;
}
nlogs++;
}
for (c = 1; c < argc; c++) {
if (is_grouping(argv[c], NULL, NULL) != NULL)
break;
children++;
child = realloc(child,
children * sizeof (nvlist_t *));
if (child == NULL)
zpool_no_memory();
if ((nv = make_leaf_vdev(props, argv[c],
!(is_log || is_special || is_dedup ||
is_spare))) == NULL) {
for (c = 0; c < children - 1; c++)
nvlist_free(child[c]);
free(child);
goto spec_out;
}
child[children - 1] = nv;
}
if (children < mindev) {
(void) fprintf(stderr, gettext("invalid vdev "
"specification: %s requires at least %d "
"devices\n"), argv[0], mindev);
for (c = 0; c < children; c++)
nvlist_free(child[c]);
free(child);
goto spec_out;
}
if (children > maxdev) {
(void) fprintf(stderr, gettext("invalid vdev "
"specification: %s supports no more than "
"%d devices\n"), argv[0], maxdev);
for (c = 0; c < children; c++)
nvlist_free(child[c]);
free(child);
goto spec_out;
}
argc -= c;
argv += c;
if (strcmp(type, VDEV_TYPE_SPARE) == 0) {
spares = child;
nspares = children;
continue;
} else if (strcmp(type, VDEV_TYPE_L2CACHE) == 0) {
l2cache = child;
nl2cache = children;
continue;
} else {
/* create a top-level vdev with children */
verify(nvlist_alloc(&nv, NV_UNIQUE_NAME,
0) == 0);
verify(nvlist_add_string(nv, ZPOOL_CONFIG_TYPE,
type) == 0);
verify(nvlist_add_uint64(nv,
ZPOOL_CONFIG_IS_LOG, is_log) == 0);
if (is_log) {
verify(nvlist_add_string(nv,
ZPOOL_CONFIG_ALLOCATION_BIAS,
VDEV_ALLOC_BIAS_LOG) == 0);
}
if (is_special) {
verify(nvlist_add_string(nv,
ZPOOL_CONFIG_ALLOCATION_BIAS,
VDEV_ALLOC_BIAS_SPECIAL) == 0);
}
if (is_dedup) {
verify(nvlist_add_string(nv,
ZPOOL_CONFIG_ALLOCATION_BIAS,
VDEV_ALLOC_BIAS_DEDUP) == 0);
}
if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {
verify(nvlist_add_uint64(nv,
ZPOOL_CONFIG_NPARITY,
mindev - 1) == 0);
}
if (strcmp(type, VDEV_TYPE_DRAID) == 0) {
if (draid_config_by_type(nv,
fulltype, children) != 0) {
for (c = 0; c < children; c++)
nvlist_free(child[c]);
free(child);
goto spec_out;
}
}
verify(nvlist_add_nvlist_array(nv,
ZPOOL_CONFIG_CHILDREN, child,
children) == 0);
for (c = 0; c < children; c++)
nvlist_free(child[c]);
free(child);
}
} else {
/*
* We have a device. Pass off to make_leaf_vdev() to
* construct the appropriate nvlist describing the vdev.
*/
if ((nv = make_leaf_vdev(props, argv[0], !(is_log ||
is_special || is_dedup || is_spare))) == NULL)
goto spec_out;
verify(nvlist_add_uint64(nv,
ZPOOL_CONFIG_IS_LOG, is_log) == 0);
if (is_log) {
verify(nvlist_add_string(nv,
ZPOOL_CONFIG_ALLOCATION_BIAS,
VDEV_ALLOC_BIAS_LOG) == 0);
nlogs++;
}
if (is_special) {
verify(nvlist_add_string(nv,
ZPOOL_CONFIG_ALLOCATION_BIAS,
VDEV_ALLOC_BIAS_SPECIAL) == 0);
}
if (is_dedup) {
verify(nvlist_add_string(nv,
ZPOOL_CONFIG_ALLOCATION_BIAS,
VDEV_ALLOC_BIAS_DEDUP) == 0);
}
argc--;
argv++;
}
toplevels++;
top = realloc(top, toplevels * sizeof (nvlist_t *));
if (top == NULL)
zpool_no_memory();
top[toplevels - 1] = nv;
}
if (toplevels == 0 && nspares == 0 && nl2cache == 0) {
(void) fprintf(stderr, gettext("invalid vdev "
"specification: at least one toplevel vdev must be "
"specified\n"));
goto spec_out;
}
if (seen_logs && nlogs == 0) {
(void) fprintf(stderr, gettext("invalid vdev specification: "
"log requires at least 1 device\n"));
goto spec_out;
}
/*
* Finally, create nvroot and add all top-level vdevs to it.
*/
verify(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) == 0);
verify(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
VDEV_TYPE_ROOT) == 0);
verify(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
top, toplevels) == 0);
if (nspares != 0)
verify(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
spares, nspares) == 0);
if (nl2cache != 0)
verify(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
l2cache, nl2cache) == 0);
spec_out:
for (t = 0; t < toplevels; t++)
nvlist_free(top[t]);
for (t = 0; t < nspares; t++)
nvlist_free(spares[t]);
for (t = 0; t < nl2cache; t++)
nvlist_free(l2cache[t]);
free(spares);
free(l2cache);
free(top);
return (nvroot);
}
nvlist_t *
split_mirror_vdev(zpool_handle_t *zhp, char *newname, nvlist_t *props,
splitflags_t flags, int argc, char **argv)
{
nvlist_t *newroot = NULL, **child;
uint_t c, children;
if (argc > 0) {
if ((newroot = construct_spec(props, argc, argv)) == NULL) {
(void) fprintf(stderr, gettext("Unable to build a "
"pool from the specified devices\n"));
return (NULL);
}
if (!flags.dryrun && make_disks(zhp, newroot) != 0) {
nvlist_free(newroot);
return (NULL);
}
/* avoid any tricks in the spec */
verify(nvlist_lookup_nvlist_array(newroot,
ZPOOL_CONFIG_CHILDREN, &child, &children) == 0);
for (c = 0; c < children; c++) {
char *path;
const char *type;
int min, max;
verify(nvlist_lookup_string(child[c],
ZPOOL_CONFIG_PATH, &path) == 0);
if ((type = is_grouping(path, &min, &max)) != NULL) {
(void) fprintf(stderr, gettext("Cannot use "
"'%s' as a device for splitting\n"), type);
nvlist_free(newroot);
return (NULL);
}
}
}
if (zpool_vdev_split(zhp, newname, &newroot, props, flags) != 0) {
nvlist_free(newroot);
return (NULL);
}
return (newroot);
}
static int
num_normal_vdevs(nvlist_t *nvroot)
{
nvlist_t **top;
uint_t t, toplevels, normal = 0;
verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&top, &toplevels) == 0);
for (t = 0; t < toplevels; t++) {
uint64_t log = B_FALSE;
(void) nvlist_lookup_uint64(top[t], ZPOOL_CONFIG_IS_LOG, &log);
if (log)
continue;
if (nvlist_exists(top[t], ZPOOL_CONFIG_ALLOCATION_BIAS))
continue;
normal++;
}
return (normal);
}
/*
* Get and validate the contents of the given vdev specification. This ensures
* that the nvlist returned is well-formed, that all the devices exist, and that
* they are not currently in use by any other known consumer. The 'poolconfig'
* parameter is the current configuration of the pool when adding devices
* existing pool, and is used to perform additional checks, such as changing the
* replication level of the pool. It can be 'NULL' to indicate that this is a
* new pool. The 'force' flag controls whether devices should be forcefully
* added, even if they appear in use.
*/
nvlist_t *
make_root_vdev(zpool_handle_t *zhp, nvlist_t *props, int force, int check_rep,
boolean_t replacing, boolean_t dryrun, int argc, char **argv)
{
nvlist_t *newroot;
nvlist_t *poolconfig = NULL;
is_force = force;
/*
* Construct the vdev specification. If this is successful, we know
* that we have a valid specification, and that all devices can be
* opened.
*/
if ((newroot = construct_spec(props, argc, argv)) == NULL)
return (NULL);
if (zhp && ((poolconfig = zpool_get_config(zhp, NULL)) == NULL)) {
nvlist_free(newroot);
return (NULL);
}
/*
* Validate each device to make sure that it's not shared with another
* subsystem. We do this even if 'force' is set, because there are some
* uses (such as a dedicated dump device) that even '-f' cannot
* override.
*/
if (is_device_in_use(poolconfig, newroot, force, replacing, B_FALSE)) {
nvlist_free(newroot);
return (NULL);
}
/*
* Check the replication level of the given vdevs and report any errors
* found. We include the existing pool spec, if any, as we need to
* catch changes against the existing replication level.
*/
if (check_rep && check_replication(poolconfig, newroot) != 0) {
nvlist_free(newroot);
return (NULL);
}
/*
* On pool create the new vdev spec must have one normal vdev.
*/
if (poolconfig == NULL && num_normal_vdevs(newroot) == 0) {
vdev_error(gettext("at least one general top-level vdev must "
"be specified\n"));
nvlist_free(newroot);
return (NULL);
}
/*
* Run through the vdev specification and label any whole disks found.
*/
if (!dryrun && make_disks(zhp, newroot) != 0) {
nvlist_free(newroot);
return (NULL);
}
return (newroot);
}
diff --git a/sys/contrib/openzfs/cmd/zpool_influxdb/zpool_influxdb.c b/sys/contrib/openzfs/cmd/zpool_influxdb/zpool_influxdb.c
index b60d18ee955c..8fc8e7717c53 100644
--- a/sys/contrib/openzfs/cmd/zpool_influxdb/zpool_influxdb.c
+++ b/sys/contrib/openzfs/cmd/zpool_influxdb/zpool_influxdb.c
@@ -1,843 +1,844 @@
/*
* Gather top-level ZFS pool and resilver/scan statistics and print using
* influxdb line protocol
* usage: [options] [pool_name]
* where options are:
* --execd, -e run in telegraf execd input plugin mode, [CR] on
* stdin causes a sample to be printed and wait for
* the next [CR]
* --no-histograms, -n don't print histogram data (reduces cardinality
* if you don't care about histograms)
* --sum-histogram-buckets, -s sum histogram bucket values
*
* To integrate into telegraf use one of:
* 1. the `inputs.execd` plugin with the `--execd` option
* 2. the `inputs.exec` plugin to simply run with no options
*
* NOTE: libzfs is an unstable interface. YMMV.
*
* The design goals of this software include:
* + be as lightweight as possible
* + reduce the number of external dependencies as far as possible, hence
* there is no dependency on a client library for managing the metric
* collection -- info is printed, KISS
* + broken pools or kernel bugs can cause this process to hang in an
* unkillable state. For this reason, it is best to keep the damage limited
* to a small process like zpool_influxdb rather than a larger collector.
*
* Copyright 2018-2020 Richard Elling
*
* This software is dual-licensed MIT and CDDL.
*
* The MIT License (MIT)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License Version 1.0 (CDDL-1.0).
* You can obtain a copy of the license from the top-level file
* "OPENSOLARIS.LICENSE" or at <http://opensource.org/licenses/CDDL-1.0>.
* You may not use this file except in compliance with the license.
*
* See the License for the specific language governing permissions
* and limitations under the License.
*
* CDDL HEADER END
*/
#include <string.h>
#include <getopt.h>
#include <stdio.h>
#include <stdint.h>
#include <inttypes.h>
#include <libzfs.h>
#define POOL_MEASUREMENT "zpool_stats"
#define SCAN_MEASUREMENT "zpool_scan_stats"
#define VDEV_MEASUREMENT "zpool_vdev_stats"
#define POOL_LATENCY_MEASUREMENT "zpool_latency"
#define POOL_QUEUE_MEASUREMENT "zpool_vdev_queue"
#define MIN_LAT_INDEX 10 /* minimum latency index 10 = 1024ns */
#define POOL_IO_SIZE_MEASUREMENT "zpool_io_size"
#define MIN_SIZE_INDEX 9 /* minimum size index 9 = 512 bytes */
/* global options */
int execd_mode = 0;
int no_histograms = 0;
int sum_histogram_buckets = 0;
char metric_data_type = 'u';
uint64_t metric_value_mask = UINT64_MAX;
uint64_t timestamp = 0;
int complained_about_sync = 0;
char *tags = "";
typedef int (*stat_printer_f)(nvlist_t *, const char *, const char *);
/*
* influxdb line protocol rules for escaping are important because the
* zpool name can include characters that need to be escaped
*
* caller is responsible for freeing result
*/
static char *
escape_string(const char *s)
{
const char *c;
char *d;
char *t = (char *)malloc(ZFS_MAX_DATASET_NAME_LEN * 2);
if (t == NULL) {
fprintf(stderr, "error: cannot allocate memory\n");
exit(1);
}
for (c = s, d = t; *c != '\0'; c++, d++) {
switch (*c) {
case ' ':
case ',':
case '=':
case '\\':
*d++ = '\\';
+ /* FALLTHROUGH */
default:
*d = *c;
}
}
*d = '\0';
return (t);
}
/*
* print key=value where value is a uint64_t
*/
static void
print_kv(char *key, uint64_t value)
{
printf("%s=%llu%c", key,
(u_longlong_t)value & metric_value_mask, metric_data_type);
}
/*
* print_scan_status() prints the details as often seen in the "zpool status"
* output. However, unlike the zpool command, which is intended for humans,
* this output is suitable for long-term tracking in influxdb.
* TODO: update to include issued scan data
*/
static int
print_scan_status(nvlist_t *nvroot, const char *pool_name)
{
uint_t c;
int64_t elapsed;
uint64_t examined, pass_exam, paused_time, paused_ts, rate;
uint64_t remaining_time;
pool_scan_stat_t *ps = NULL;
double pct_done;
char *state[DSS_NUM_STATES] = {
"none", "scanning", "finished", "canceled"};
char *func;
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_SCAN_STATS,
(uint64_t **)&ps, &c);
/*
* ignore if there are no stats
*/
if (ps == NULL)
return (0);
/*
* return error if state is bogus
*/
if (ps->pss_state >= DSS_NUM_STATES ||
ps->pss_func >= POOL_SCAN_FUNCS) {
if (complained_about_sync % 1000 == 0) {
fprintf(stderr, "error: cannot decode scan stats: "
"ZFS is out of sync with compiled zpool_influxdb");
complained_about_sync++;
}
return (1);
}
switch (ps->pss_func) {
case POOL_SCAN_NONE:
func = "none_requested";
break;
case POOL_SCAN_SCRUB:
func = "scrub";
break;
case POOL_SCAN_RESILVER:
func = "resilver";
break;
#ifdef POOL_SCAN_REBUILD
case POOL_SCAN_REBUILD:
func = "rebuild";
break;
#endif
default:
func = "scan";
}
/* overall progress */
examined = ps->pss_examined ? ps->pss_examined : 1;
pct_done = 0.0;
if (ps->pss_to_examine > 0)
pct_done = 100.0 * examined / ps->pss_to_examine;
#ifdef EZFS_SCRUB_PAUSED
paused_ts = ps->pss_pass_scrub_pause;
paused_time = ps->pss_pass_scrub_spent_paused;
#else
paused_ts = 0;
paused_time = 0;
#endif
/* calculations for this pass */
if (ps->pss_state == DSS_SCANNING) {
elapsed = (int64_t)time(NULL) - (int64_t)ps->pss_pass_start -
(int64_t)paused_time;
elapsed = (elapsed > 0) ? elapsed : 1;
pass_exam = ps->pss_pass_exam ? ps->pss_pass_exam : 1;
rate = pass_exam / elapsed;
rate = (rate > 0) ? rate : 1;
remaining_time = ps->pss_to_examine - examined / rate;
} else {
elapsed =
(int64_t)ps->pss_end_time - (int64_t)ps->pss_pass_start -
(int64_t)paused_time;
elapsed = (elapsed > 0) ? elapsed : 1;
pass_exam = ps->pss_pass_exam ? ps->pss_pass_exam : 1;
rate = pass_exam / elapsed;
remaining_time = 0;
}
rate = rate ? rate : 1;
/* influxdb line protocol format: "tags metrics timestamp" */
printf("%s%s,function=%s,name=%s,state=%s ",
SCAN_MEASUREMENT, tags, func, pool_name, state[ps->pss_state]);
print_kv("end_ts", ps->pss_end_time);
print_kv(",errors", ps->pss_errors);
print_kv(",examined", examined);
print_kv(",issued", ps->pss_issued);
print_kv(",pass_examined", pass_exam);
print_kv(",pass_issued", ps->pss_pass_issued);
print_kv(",paused_ts", paused_ts);
print_kv(",paused_t", paused_time);
printf(",pct_done=%.2f", pct_done);
print_kv(",processed", ps->pss_processed);
print_kv(",rate", rate);
print_kv(",remaining_t", remaining_time);
print_kv(",start_ts", ps->pss_start_time);
print_kv(",to_examine", ps->pss_to_examine);
print_kv(",to_process", ps->pss_to_process);
printf(" %llu\n", (u_longlong_t)timestamp);
return (0);
}
/*
* get a vdev name that corresponds to the top-level vdev names
* printed by `zpool status`
*/
static char *
get_vdev_name(nvlist_t *nvroot, const char *parent_name)
{
static char vdev_name[256];
char *vdev_type = NULL;
uint64_t vdev_id = 0;
if (nvlist_lookup_string(nvroot, ZPOOL_CONFIG_TYPE,
&vdev_type) != 0) {
vdev_type = "unknown";
}
if (nvlist_lookup_uint64(
nvroot, ZPOOL_CONFIG_ID, &vdev_id) != 0) {
vdev_id = UINT64_MAX;
}
if (parent_name == NULL) {
(void) snprintf(vdev_name, sizeof (vdev_name), "%s",
vdev_type);
} else {
(void) snprintf(vdev_name, sizeof (vdev_name),
"%s/%s-%llu",
parent_name, vdev_type, (u_longlong_t)vdev_id);
}
return (vdev_name);
}
/*
* get a string suitable for an influxdb tag that describes this vdev
*
* By default only the vdev hierarchical name is shown, separated by '/'
* If the vdev has an associated path, which is typical of leaf vdevs,
* then the path is added.
* It would be nice to have the devid instead of the path, but under
* Linux we cannot be sure a devid will exist and we'd rather have
* something than nothing, so we'll use path instead.
*/
static char *
get_vdev_desc(nvlist_t *nvroot, const char *parent_name)
{
static char vdev_desc[2 * MAXPATHLEN];
char *vdev_type = NULL;
uint64_t vdev_id = 0;
char vdev_value[MAXPATHLEN];
char *vdev_path = NULL;
char *s, *t;
if (nvlist_lookup_string(nvroot, ZPOOL_CONFIG_TYPE, &vdev_type) != 0) {
vdev_type = "unknown";
}
if (nvlist_lookup_uint64(nvroot, ZPOOL_CONFIG_ID, &vdev_id) != 0) {
vdev_id = UINT64_MAX;
}
if (nvlist_lookup_string(
nvroot, ZPOOL_CONFIG_PATH, &vdev_path) != 0) {
vdev_path = NULL;
}
if (parent_name == NULL) {
s = escape_string(vdev_type);
(void) snprintf(vdev_value, sizeof (vdev_value), "vdev=%s", s);
free(s);
} else {
s = escape_string((char *)parent_name);
t = escape_string(vdev_type);
(void) snprintf(vdev_value, sizeof (vdev_value),
"vdev=%s/%s-%llu", s, t, (u_longlong_t)vdev_id);
free(s);
free(t);
}
if (vdev_path == NULL) {
(void) snprintf(vdev_desc, sizeof (vdev_desc), "%s",
vdev_value);
} else {
s = escape_string(vdev_path);
(void) snprintf(vdev_desc, sizeof (vdev_desc), "path=%s,%s",
s, vdev_value);
free(s);
}
return (vdev_desc);
}
/*
* vdev summary stats are a combination of the data shown by
* `zpool status` and `zpool list -v`
*/
static int
print_summary_stats(nvlist_t *nvroot, const char *pool_name,
const char *parent_name)
{
uint_t c;
vdev_stat_t *vs;
char *vdev_desc = NULL;
vdev_desc = get_vdev_desc(nvroot, parent_name);
if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) != 0) {
return (1);
}
printf("%s%s,name=%s,state=%s,%s ", POOL_MEASUREMENT, tags,
pool_name, zpool_state_to_name((vdev_state_t)vs->vs_state,
(vdev_aux_t)vs->vs_aux), vdev_desc);
print_kv("alloc", vs->vs_alloc);
print_kv(",free", vs->vs_space - vs->vs_alloc);
print_kv(",size", vs->vs_space);
print_kv(",read_bytes", vs->vs_bytes[ZIO_TYPE_READ]);
print_kv(",read_errors", vs->vs_read_errors);
print_kv(",read_ops", vs->vs_ops[ZIO_TYPE_READ]);
print_kv(",write_bytes", vs->vs_bytes[ZIO_TYPE_WRITE]);
print_kv(",write_errors", vs->vs_write_errors);
print_kv(",write_ops", vs->vs_ops[ZIO_TYPE_WRITE]);
print_kv(",checksum_errors", vs->vs_checksum_errors);
print_kv(",fragmentation", vs->vs_fragmentation);
printf(" %llu\n", (u_longlong_t)timestamp);
return (0);
}
/*
* vdev latency stats are histograms stored as nvlist arrays of uint64.
* Latency stats include the ZIO scheduler classes plus lower-level
* vdev latencies.
*
* In many cases, the top-level "root" view obscures the underlying
* top-level vdev operations. For example, if a pool has a log, special,
* or cache device, then each can behave very differently. It is useful
* to see how each is responding.
*/
static int
print_vdev_latency_stats(nvlist_t *nvroot, const char *pool_name,
const char *parent_name)
{
uint_t c, end = 0;
nvlist_t *nv_ex;
char *vdev_desc = NULL;
/* short_names become part of the metric name and are influxdb-ready */
struct lat_lookup {
char *name;
char *short_name;
uint64_t sum;
uint64_t *array;
};
struct lat_lookup lat_type[] = {
{ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO, "total_read", 0},
{ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO, "total_write", 0},
{ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO, "disk_read", 0},
{ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO, "disk_write", 0},
{ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO, "sync_read", 0},
{ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO, "sync_write", 0},
{ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO, "async_read", 0},
{ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO, "async_write", 0},
{ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO, "scrub", 0},
#ifdef ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO
{ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO, "trim", 0},
#endif
{NULL, NULL}
};
if (nvlist_lookup_nvlist(nvroot,
ZPOOL_CONFIG_VDEV_STATS_EX, &nv_ex) != 0) {
return (6);
}
vdev_desc = get_vdev_desc(nvroot, parent_name);
for (int i = 0; lat_type[i].name; i++) {
if (nvlist_lookup_uint64_array(nv_ex,
lat_type[i].name, &lat_type[i].array, &c) != 0) {
fprintf(stderr, "error: can't get %s\n",
lat_type[i].name);
return (3);
}
/* end count count, all of the arrays are the same size */
end = c - 1;
}
for (int bucket = 0; bucket <= end; bucket++) {
if (bucket < MIN_LAT_INDEX) {
/* don't print, but collect the sum */
for (int i = 0; lat_type[i].name; i++) {
lat_type[i].sum += lat_type[i].array[bucket];
}
continue;
}
if (bucket < end) {
printf("%s%s,le=%0.6f,name=%s,%s ",
POOL_LATENCY_MEASUREMENT, tags,
(float)(1ULL << bucket) * 1e-9,
pool_name, vdev_desc);
} else {
printf("%s%s,le=+Inf,name=%s,%s ",
POOL_LATENCY_MEASUREMENT, tags, pool_name,
vdev_desc);
}
for (int i = 0; lat_type[i].name; i++) {
if (bucket <= MIN_LAT_INDEX || sum_histogram_buckets) {
lat_type[i].sum += lat_type[i].array[bucket];
} else {
lat_type[i].sum = lat_type[i].array[bucket];
}
print_kv(lat_type[i].short_name, lat_type[i].sum);
if (lat_type[i + 1].name != NULL) {
printf(",");
}
}
printf(" %llu\n", (u_longlong_t)timestamp);
}
return (0);
}
/*
* vdev request size stats are histograms stored as nvlist arrays of uint64.
* Request size stats include the ZIO scheduler classes plus lower-level
* vdev sizes. Both independent (ind) and aggregated (agg) sizes are reported.
*
* In many cases, the top-level "root" view obscures the underlying
* top-level vdev operations. For example, if a pool has a log, special,
* or cache device, then each can behave very differently. It is useful
* to see how each is responding.
*/
static int
print_vdev_size_stats(nvlist_t *nvroot, const char *pool_name,
const char *parent_name)
{
uint_t c, end = 0;
nvlist_t *nv_ex;
char *vdev_desc = NULL;
/* short_names become the field name */
struct size_lookup {
char *name;
char *short_name;
uint64_t sum;
uint64_t *array;
};
struct size_lookup size_type[] = {
{ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO, "sync_read_ind"},
{ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO, "sync_write_ind"},
{ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO, "async_read_ind"},
{ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO, "async_write_ind"},
{ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO, "scrub_read_ind"},
{ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO, "sync_read_agg"},
{ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO, "sync_write_agg"},
{ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO, "async_read_agg"},
{ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO, "async_write_agg"},
{ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO, "scrub_read_agg"},
#ifdef ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO
{ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO, "trim_write_ind"},
{ZPOOL_CONFIG_VDEV_AGG_TRIM_HISTO, "trim_write_agg"},
#endif
{NULL, NULL}
};
if (nvlist_lookup_nvlist(nvroot,
ZPOOL_CONFIG_VDEV_STATS_EX, &nv_ex) != 0) {
return (6);
}
vdev_desc = get_vdev_desc(nvroot, parent_name);
for (int i = 0; size_type[i].name; i++) {
if (nvlist_lookup_uint64_array(nv_ex, size_type[i].name,
&size_type[i].array, &c) != 0) {
fprintf(stderr, "error: can't get %s\n",
size_type[i].name);
return (3);
}
/* end count count, all of the arrays are the same size */
end = c - 1;
}
for (int bucket = 0; bucket <= end; bucket++) {
if (bucket < MIN_SIZE_INDEX) {
/* don't print, but collect the sum */
for (int i = 0; size_type[i].name; i++) {
size_type[i].sum += size_type[i].array[bucket];
}
continue;
}
if (bucket < end) {
printf("%s%s,le=%llu,name=%s,%s ",
POOL_IO_SIZE_MEASUREMENT, tags, 1ULL << bucket,
pool_name, vdev_desc);
} else {
printf("%s%s,le=+Inf,name=%s,%s ",
POOL_IO_SIZE_MEASUREMENT, tags, pool_name,
vdev_desc);
}
for (int i = 0; size_type[i].name; i++) {
if (bucket <= MIN_SIZE_INDEX || sum_histogram_buckets) {
size_type[i].sum += size_type[i].array[bucket];
} else {
size_type[i].sum = size_type[i].array[bucket];
}
print_kv(size_type[i].short_name, size_type[i].sum);
if (size_type[i + 1].name != NULL) {
printf(",");
}
}
printf(" %llu\n", (u_longlong_t)timestamp);
}
return (0);
}
/*
* ZIO scheduler queue stats are stored as gauges. This is unfortunate
* because the values can change very rapidly and any point-in-time
* value will quickly be obsoleted. It is also not easy to downsample.
* Thus only the top-level queue stats might be beneficial... maybe.
*/
static int
print_queue_stats(nvlist_t *nvroot, const char *pool_name,
const char *parent_name)
{
nvlist_t *nv_ex;
uint64_t value;
/* short_names are used for the field name */
struct queue_lookup {
char *name;
char *short_name;
};
struct queue_lookup queue_type[] = {
{ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE, "sync_r_active"},
{ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE, "sync_w_active"},
{ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE, "async_r_active"},
{ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE, "async_w_active"},
{ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE, "async_scrub_active"},
{ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE, "sync_r_pend"},
{ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE, "sync_w_pend"},
{ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE, "async_r_pend"},
{ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE, "async_w_pend"},
{ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE, "async_scrub_pend"},
{NULL, NULL}
};
if (nvlist_lookup_nvlist(nvroot,
ZPOOL_CONFIG_VDEV_STATS_EX, &nv_ex) != 0) {
return (6);
}
printf("%s%s,name=%s,%s ", POOL_QUEUE_MEASUREMENT, tags, pool_name,
get_vdev_desc(nvroot, parent_name));
for (int i = 0; queue_type[i].name; i++) {
if (nvlist_lookup_uint64(nv_ex,
queue_type[i].name, &value) != 0) {
fprintf(stderr, "error: can't get %s\n",
queue_type[i].name);
return (3);
}
print_kv(queue_type[i].short_name, value);
if (queue_type[i + 1].name != NULL) {
printf(",");
}
}
printf(" %llu\n", (u_longlong_t)timestamp);
return (0);
}
/*
* top-level vdev stats are at the pool level
*/
static int
print_top_level_vdev_stats(nvlist_t *nvroot, const char *pool_name)
{
nvlist_t *nv_ex;
uint64_t value;
/* short_names become part of the metric name */
struct queue_lookup {
char *name;
char *short_name;
};
struct queue_lookup queue_type[] = {
{ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE, "sync_r_active_queue"},
{ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE, "sync_w_active_queue"},
{ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE, "async_r_active_queue"},
{ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE, "async_w_active_queue"},
{ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE, "async_scrub_active_queue"},
{ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE, "sync_r_pend_queue"},
{ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE, "sync_w_pend_queue"},
{ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE, "async_r_pend_queue"},
{ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE, "async_w_pend_queue"},
{ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE, "async_scrub_pend_queue"},
{NULL, NULL}
};
if (nvlist_lookup_nvlist(nvroot,
ZPOOL_CONFIG_VDEV_STATS_EX, &nv_ex) != 0) {
return (6);
}
printf("%s%s,name=%s,vdev=root ", VDEV_MEASUREMENT, tags,
pool_name);
for (int i = 0; queue_type[i].name; i++) {
if (nvlist_lookup_uint64(nv_ex,
queue_type[i].name, &value) != 0) {
fprintf(stderr, "error: can't get %s\n",
queue_type[i].name);
return (3);
}
if (i > 0)
printf(",");
print_kv(queue_type[i].short_name, value);
}
printf(" %llu\n", (u_longlong_t)timestamp);
return (0);
}
/*
* recursive stats printer
*/
static int
print_recursive_stats(stat_printer_f func, nvlist_t *nvroot,
const char *pool_name, const char *parent_name, int descend)
{
uint_t c, children;
nvlist_t **child;
char vdev_name[256];
int err;
err = func(nvroot, pool_name, parent_name);
if (err)
return (err);
if (descend && nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0) {
(void) strlcpy(vdev_name, get_vdev_name(nvroot, parent_name),
sizeof (vdev_name));
for (c = 0; c < children; c++) {
print_recursive_stats(func, child[c], pool_name,
vdev_name, descend);
}
}
return (0);
}
/*
* call-back to print the stats from the pool config
*
* Note: if the pool is broken, this can hang indefinitely and perhaps in an
* unkillable state.
*/
static int
print_stats(zpool_handle_t *zhp, void *data)
{
uint_t c;
int err;
boolean_t missing;
nvlist_t *config, *nvroot;
vdev_stat_t *vs;
struct timespec tv;
char *pool_name;
/* if not this pool return quickly */
if (data &&
strncmp(data, zpool_get_name(zhp), ZFS_MAX_DATASET_NAME_LEN) != 0) {
zpool_close(zhp);
return (0);
}
if (zpool_refresh_stats(zhp, &missing) != 0) {
zpool_close(zhp);
return (1);
}
config = zpool_get_config(zhp, NULL);
if (clock_gettime(CLOCK_REALTIME, &tv) != 0)
timestamp = (uint64_t)time(NULL) * 1000000000;
else
timestamp =
((uint64_t)tv.tv_sec * 1000000000) + (uint64_t)tv.tv_nsec;
if (nvlist_lookup_nvlist(
config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) != 0) {
zpool_close(zhp);
return (2);
}
if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) != 0) {
zpool_close(zhp);
return (3);
}
pool_name = escape_string(zpool_get_name(zhp));
err = print_recursive_stats(print_summary_stats, nvroot,
pool_name, NULL, 1);
/* if any of these return an error, skip the rest */
if (err == 0)
err = print_top_level_vdev_stats(nvroot, pool_name);
if (no_histograms == 0) {
if (err == 0)
err = print_recursive_stats(print_vdev_latency_stats, nvroot,
pool_name, NULL, 1);
if (err == 0)
err = print_recursive_stats(print_vdev_size_stats, nvroot,
pool_name, NULL, 1);
if (err == 0)
err = print_recursive_stats(print_queue_stats, nvroot,
pool_name, NULL, 0);
}
if (err == 0)
err = print_scan_status(nvroot, pool_name);
free(pool_name);
zpool_close(zhp);
return (err);
}
static void
usage(char *name)
{
fprintf(stderr, "usage: %s [--execd][--no-histograms]"
"[--sum-histogram-buckets] [--signed-int] [poolname]\n", name);
exit(EXIT_FAILURE);
}
int
main(int argc, char *argv[])
{
int opt;
int ret = 8;
char *line = NULL;
size_t len, tagslen = 0;
struct option long_options[] = {
{"execd", no_argument, NULL, 'e'},
{"help", no_argument, NULL, 'h'},
{"no-histograms", no_argument, NULL, 'n'},
{"signed-int", no_argument, NULL, 'i'},
{"sum-histogram-buckets", no_argument, NULL, 's'},
{"tags", required_argument, NULL, 't'},
{0, 0, 0, 0}
};
while ((opt = getopt_long(
argc, argv, "ehinst:", long_options, NULL)) != -1) {
switch (opt) {
case 'e':
execd_mode = 1;
break;
case 'i':
metric_data_type = 'i';
metric_value_mask = INT64_MAX;
break;
case 'n':
no_histograms = 1;
break;
case 's':
sum_histogram_buckets = 1;
break;
case 't':
tagslen = strlen(optarg) + 2;
tags = calloc(tagslen, 1);
if (tags == NULL) {
fprintf(stderr,
"error: cannot allocate memory "
"for tags\n");
exit(1);
}
(void) snprintf(tags, tagslen, ",%s", optarg);
break;
default:
usage(argv[0]);
}
}
libzfs_handle_t *g_zfs;
if ((g_zfs = libzfs_init()) == NULL) {
fprintf(stderr,
"error: cannot initialize libzfs. "
"Is the zfs module loaded or zrepl running?\n");
exit(EXIT_FAILURE);
}
if (execd_mode == 0) {
ret = zpool_iter(g_zfs, print_stats, argv[optind]);
return (ret);
}
while (getline(&line, &len, stdin) != -1) {
ret = zpool_iter(g_zfs, print_stats, argv[optind]);
fflush(stdout);
}
return (ret);
}
diff --git a/sys/contrib/openzfs/cmd/ztest/ztest.c b/sys/contrib/openzfs/cmd/ztest/ztest.c
index a580396ebd8a..5a5c381409a2 100644
--- a/sys/contrib/openzfs/cmd/ztest/ztest.c
+++ b/sys/contrib/openzfs/cmd/ztest/ztest.c
@@ -1,8182 +1,8202 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2018 by Delphix. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2013 Steven Hartland. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2017 Joyent, Inc.
* Copyright (c) 2017, Intel Corporation.
*/
/*
* The objective of this program is to provide a DMU/ZAP/SPA stress test
* that runs entirely in userland, is easy to use, and easy to extend.
*
* The overall design of the ztest program is as follows:
*
* (1) For each major functional area (e.g. adding vdevs to a pool,
* creating and destroying datasets, reading and writing objects, etc)
* we have a simple routine to test that functionality. These
* individual routines do not have to do anything "stressful".
*
* (2) We turn these simple functionality tests into a stress test by
* running them all in parallel, with as many threads as desired,
* and spread across as many datasets, objects, and vdevs as desired.
*
* (3) While all this is happening, we inject faults into the pool to
* verify that self-healing data really works.
*
* (4) Every time we open a dataset, we change its checksum and compression
* functions. Thus even individual objects vary from block to block
* in which checksum they use and whether they're compressed.
*
* (5) To verify that we never lose on-disk consistency after a crash,
* we run the entire test in a child of the main process.
* At random times, the child self-immolates with a SIGKILL.
* This is the software equivalent of pulling the power cord.
* The parent then runs the test again, using the existing
* storage pool, as many times as desired. If backwards compatibility
* testing is enabled ztest will sometimes run the "older" version
* of ztest after a SIGKILL.
*
* (6) To verify that we don't have future leaks or temporal incursions,
* many of the functional tests record the transaction group number
* as part of their data. When reading old data, they verify that
* the transaction group number is less than the current, open txg.
* If you add a new test, please do this if applicable.
*
* (7) Threads are created with a reduced stack size, for sanity checking.
* Therefore, it's important not to allocate huge buffers on the stack.
*
* When run with no arguments, ztest runs for about five minutes and
* produces no output if successful. To get a little bit of information,
* specify -V. To get more information, specify -VV, and so on.
*
* To turn this into an overnight stress test, use -T to specify run time.
*
* You can ask more vdevs [-v], datasets [-d], or threads [-t]
* to increase the pool capacity, fanout, and overall stress level.
*
* Use the -k option to set the desired frequency of kills.
*
* When ztest invokes itself it passes all relevant information through a
* temporary file which is mmap-ed in the child process. This allows shared
* memory to survive the exec syscall. The ztest_shared_hdr_t struct is always
* stored at offset 0 of this file and contains information on the size and
* number of shared structures in the file. The information stored in this file
* must remain backwards compatible with older versions of ztest so that
* ztest can invoke them during backwards compatibility testing (-B).
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/dmu.h>
#include <sys/txg.h>
#include <sys/dbuf.h>
#include <sys/zap.h>
#include <sys/dmu_objset.h>
#include <sys/poll.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/wait.h>
#include <sys/mman.h>
#include <sys/resource.h>
#include <sys/zio.h>
#include <sys/zil.h>
#include <sys/zil_impl.h>
#include <sys/vdev_draid.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_file.h>
#include <sys/vdev_initialize.h>
#include <sys/vdev_raidz.h>
#include <sys/vdev_trim.h>
#include <sys/spa_impl.h>
#include <sys/metaslab_impl.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_destroy.h>
#include <sys/dsl_scan.h>
#include <sys/zio_checksum.h>
#include <sys/zfs_refcount.h>
#include <sys/zfeature.h>
#include <sys/dsl_userhold.h>
#include <sys/abd.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <getopt.h>
#include <signal.h>
#include <umem.h>
#include <ctype.h>
#include <math.h>
#include <sys/fs/zfs.h>
#include <zfs_fletcher.h>
#include <libnvpair.h>
#include <libzutil.h>
#include <sys/crypto/icp.h>
#if (__GLIBC__ && !__UCLIBC__)
#include <execinfo.h> /* for backtrace() */
#endif
static int ztest_fd_data = -1;
static int ztest_fd_rand = -1;
typedef struct ztest_shared_hdr {
uint64_t zh_hdr_size;
uint64_t zh_opts_size;
uint64_t zh_size;
uint64_t zh_stats_size;
uint64_t zh_stats_count;
uint64_t zh_ds_size;
uint64_t zh_ds_count;
} ztest_shared_hdr_t;
static ztest_shared_hdr_t *ztest_shared_hdr;
enum ztest_class_state {
ZTEST_VDEV_CLASS_OFF,
ZTEST_VDEV_CLASS_ON,
ZTEST_VDEV_CLASS_RND
};
#define ZO_GVARS_MAX_ARGLEN ((size_t)64)
#define ZO_GVARS_MAX_COUNT ((size_t)10)
typedef struct ztest_shared_opts {
char zo_pool[ZFS_MAX_DATASET_NAME_LEN];
char zo_dir[ZFS_MAX_DATASET_NAME_LEN];
char zo_alt_ztest[MAXNAMELEN];
char zo_alt_libpath[MAXNAMELEN];
uint64_t zo_vdevs;
uint64_t zo_vdevtime;
size_t zo_vdev_size;
int zo_ashift;
int zo_mirrors;
int zo_raid_children;
int zo_raid_parity;
char zo_raid_type[8];
int zo_draid_data;
int zo_draid_spares;
int zo_datasets;
int zo_threads;
uint64_t zo_passtime;
uint64_t zo_killrate;
int zo_verbose;
int zo_init;
uint64_t zo_time;
uint64_t zo_maxloops;
uint64_t zo_metaslab_force_ganging;
int zo_mmp_test;
int zo_special_vdevs;
int zo_dump_dbgmsg;
int zo_gvars_count;
char zo_gvars[ZO_GVARS_MAX_COUNT][ZO_GVARS_MAX_ARGLEN];
} ztest_shared_opts_t;
/* Default values for command line options. */
#define DEFAULT_POOL "ztest"
#define DEFAULT_VDEV_DIR "/tmp"
#define DEFAULT_VDEV_COUNT 5
#define DEFAULT_VDEV_SIZE (SPA_MINDEVSIZE * 4) /* 256m default size */
#define DEFAULT_VDEV_SIZE_STR "256M"
#define DEFAULT_ASHIFT SPA_MINBLOCKSHIFT
#define DEFAULT_MIRRORS 2
#define DEFAULT_RAID_CHILDREN 4
#define DEFAULT_RAID_PARITY 1
#define DEFAULT_DRAID_DATA 4
#define DEFAULT_DRAID_SPARES 1
#define DEFAULT_DATASETS_COUNT 7
#define DEFAULT_THREADS 23
#define DEFAULT_RUN_TIME 300 /* 300 seconds */
#define DEFAULT_RUN_TIME_STR "300 sec"
#define DEFAULT_PASS_TIME 60 /* 60 seconds */
#define DEFAULT_PASS_TIME_STR "60 sec"
#define DEFAULT_KILL_RATE 70 /* 70% kill rate */
#define DEFAULT_KILLRATE_STR "70%"
#define DEFAULT_INITS 1
#define DEFAULT_MAX_LOOPS 50 /* 5 minutes */
#define DEFAULT_FORCE_GANGING (64 << 10)
#define DEFAULT_FORCE_GANGING_STR "64K"
/* Simplifying assumption: -1 is not a valid default. */
#define NO_DEFAULT -1
static const ztest_shared_opts_t ztest_opts_defaults = {
.zo_pool = DEFAULT_POOL,
.zo_dir = DEFAULT_VDEV_DIR,
.zo_alt_ztest = { '\0' },
.zo_alt_libpath = { '\0' },
.zo_vdevs = DEFAULT_VDEV_COUNT,
.zo_ashift = DEFAULT_ASHIFT,
.zo_mirrors = DEFAULT_MIRRORS,
.zo_raid_children = DEFAULT_RAID_CHILDREN,
.zo_raid_parity = DEFAULT_RAID_PARITY,
.zo_raid_type = VDEV_TYPE_RAIDZ,
.zo_vdev_size = DEFAULT_VDEV_SIZE,
.zo_draid_data = DEFAULT_DRAID_DATA, /* data drives */
.zo_draid_spares = DEFAULT_DRAID_SPARES, /* distributed spares */
.zo_datasets = DEFAULT_DATASETS_COUNT,
.zo_threads = DEFAULT_THREADS,
.zo_passtime = DEFAULT_PASS_TIME,
.zo_killrate = DEFAULT_KILL_RATE,
.zo_verbose = 0,
.zo_mmp_test = 0,
.zo_init = DEFAULT_INITS,
.zo_time = DEFAULT_RUN_TIME,
.zo_maxloops = DEFAULT_MAX_LOOPS, /* max loops during spa_freeze() */
.zo_metaslab_force_ganging = DEFAULT_FORCE_GANGING,
.zo_special_vdevs = ZTEST_VDEV_CLASS_RND,
.zo_gvars_count = 0,
};
extern uint64_t metaslab_force_ganging;
extern uint64_t metaslab_df_alloc_threshold;
extern unsigned long zfs_deadman_synctime_ms;
extern int metaslab_preload_limit;
extern boolean_t zfs_compressed_arc_enabled;
extern int zfs_abd_scatter_enabled;
extern int dmu_object_alloc_chunk_shift;
extern boolean_t zfs_force_some_double_word_sm_entries;
extern unsigned long zio_decompress_fail_fraction;
extern unsigned long zfs_reconstruct_indirect_damage_fraction;
static ztest_shared_opts_t *ztest_shared_opts;
static ztest_shared_opts_t ztest_opts;
static char *ztest_wkeydata = "abcdefghijklmnopqrstuvwxyz012345";
typedef struct ztest_shared_ds {
uint64_t zd_seq;
} ztest_shared_ds_t;
static ztest_shared_ds_t *ztest_shared_ds;
#define ZTEST_GET_SHARED_DS(d) (&ztest_shared_ds[d])
#define BT_MAGIC 0x123456789abcdefULL
#define MAXFAULTS(zs) \
(MAX((zs)->zs_mirrors, 1) * (ztest_opts.zo_raid_parity + 1) - 1)
enum ztest_io_type {
ZTEST_IO_WRITE_TAG,
ZTEST_IO_WRITE_PATTERN,
ZTEST_IO_WRITE_ZEROES,
ZTEST_IO_TRUNCATE,
ZTEST_IO_SETATTR,
ZTEST_IO_REWRITE,
ZTEST_IO_TYPES
};
typedef struct ztest_block_tag {
uint64_t bt_magic;
uint64_t bt_objset;
uint64_t bt_object;
uint64_t bt_dnodesize;
uint64_t bt_offset;
uint64_t bt_gen;
uint64_t bt_txg;
uint64_t bt_crtxg;
} ztest_block_tag_t;
typedef struct bufwad {
uint64_t bw_index;
uint64_t bw_txg;
uint64_t bw_data;
} bufwad_t;
/*
* It would be better to use a rangelock_t per object. Unfortunately
* the rangelock_t is not a drop-in replacement for rl_t, because we
* still need to map from object ID to rangelock_t.
*/
typedef enum {
RL_READER,
RL_WRITER,
RL_APPEND
} rl_type_t;
typedef struct rll {
void *rll_writer;
int rll_readers;
kmutex_t rll_lock;
kcondvar_t rll_cv;
} rll_t;
typedef struct rl {
uint64_t rl_object;
uint64_t rl_offset;
uint64_t rl_size;
rll_t *rl_lock;
} rl_t;
#define ZTEST_RANGE_LOCKS 64
#define ZTEST_OBJECT_LOCKS 64
/*
* Object descriptor. Used as a template for object lookup/create/remove.
*/
typedef struct ztest_od {
uint64_t od_dir;
uint64_t od_object;
dmu_object_type_t od_type;
dmu_object_type_t od_crtype;
uint64_t od_blocksize;
uint64_t od_crblocksize;
uint64_t od_crdnodesize;
uint64_t od_gen;
uint64_t od_crgen;
char od_name[ZFS_MAX_DATASET_NAME_LEN];
} ztest_od_t;
/*
* Per-dataset state.
*/
typedef struct ztest_ds {
ztest_shared_ds_t *zd_shared;
objset_t *zd_os;
pthread_rwlock_t zd_zilog_lock;
zilog_t *zd_zilog;
ztest_od_t *zd_od; /* debugging aid */
char zd_name[ZFS_MAX_DATASET_NAME_LEN];
kmutex_t zd_dirobj_lock;
rll_t zd_object_lock[ZTEST_OBJECT_LOCKS];
rll_t zd_range_lock[ZTEST_RANGE_LOCKS];
} ztest_ds_t;
/*
* Per-iteration state.
*/
typedef void ztest_func_t(ztest_ds_t *zd, uint64_t id);
typedef struct ztest_info {
ztest_func_t *zi_func; /* test function */
uint64_t zi_iters; /* iterations per execution */
uint64_t *zi_interval; /* execute every <interval> seconds */
const char *zi_funcname; /* name of test function */
} ztest_info_t;
typedef struct ztest_shared_callstate {
uint64_t zc_count; /* per-pass count */
uint64_t zc_time; /* per-pass time */
uint64_t zc_next; /* next time to call this function */
} ztest_shared_callstate_t;
static ztest_shared_callstate_t *ztest_shared_callstate;
#define ZTEST_GET_SHARED_CALLSTATE(c) (&ztest_shared_callstate[c])
ztest_func_t ztest_dmu_read_write;
ztest_func_t ztest_dmu_write_parallel;
ztest_func_t ztest_dmu_object_alloc_free;
ztest_func_t ztest_dmu_object_next_chunk;
ztest_func_t ztest_dmu_commit_callbacks;
ztest_func_t ztest_zap;
ztest_func_t ztest_zap_parallel;
ztest_func_t ztest_zil_commit;
ztest_func_t ztest_zil_remount;
ztest_func_t ztest_dmu_read_write_zcopy;
ztest_func_t ztest_dmu_objset_create_destroy;
ztest_func_t ztest_dmu_prealloc;
ztest_func_t ztest_fzap;
ztest_func_t ztest_dmu_snapshot_create_destroy;
ztest_func_t ztest_dsl_prop_get_set;
ztest_func_t ztest_spa_prop_get_set;
ztest_func_t ztest_spa_create_destroy;
ztest_func_t ztest_fault_inject;
ztest_func_t ztest_dmu_snapshot_hold;
ztest_func_t ztest_mmp_enable_disable;
ztest_func_t ztest_scrub;
ztest_func_t ztest_dsl_dataset_promote_busy;
ztest_func_t ztest_vdev_attach_detach;
ztest_func_t ztest_vdev_LUN_growth;
ztest_func_t ztest_vdev_add_remove;
ztest_func_t ztest_vdev_class_add;
ztest_func_t ztest_vdev_aux_add_remove;
ztest_func_t ztest_split_pool;
ztest_func_t ztest_reguid;
ztest_func_t ztest_spa_upgrade;
ztest_func_t ztest_device_removal;
ztest_func_t ztest_spa_checkpoint_create_discard;
ztest_func_t ztest_initialize;
ztest_func_t ztest_trim;
ztest_func_t ztest_fletcher;
ztest_func_t ztest_fletcher_incr;
ztest_func_t ztest_verify_dnode_bt;
uint64_t zopt_always = 0ULL * NANOSEC; /* all the time */
uint64_t zopt_incessant = 1ULL * NANOSEC / 10; /* every 1/10 second */
uint64_t zopt_often = 1ULL * NANOSEC; /* every second */
uint64_t zopt_sometimes = 10ULL * NANOSEC; /* every 10 seconds */
uint64_t zopt_rarely = 60ULL * NANOSEC; /* every 60 seconds */
#define ZTI_INIT(func, iters, interval) \
{ .zi_func = (func), \
.zi_iters = (iters), \
.zi_interval = (interval), \
.zi_funcname = # func }
ztest_info_t ztest_info[] = {
ZTI_INIT(ztest_dmu_read_write, 1, &zopt_always),
ZTI_INIT(ztest_dmu_write_parallel, 10, &zopt_always),
ZTI_INIT(ztest_dmu_object_alloc_free, 1, &zopt_always),
ZTI_INIT(ztest_dmu_object_next_chunk, 1, &zopt_sometimes),
ZTI_INIT(ztest_dmu_commit_callbacks, 1, &zopt_always),
ZTI_INIT(ztest_zap, 30, &zopt_always),
ZTI_INIT(ztest_zap_parallel, 100, &zopt_always),
ZTI_INIT(ztest_split_pool, 1, &zopt_always),
ZTI_INIT(ztest_zil_commit, 1, &zopt_incessant),
ZTI_INIT(ztest_zil_remount, 1, &zopt_sometimes),
ZTI_INIT(ztest_dmu_read_write_zcopy, 1, &zopt_often),
ZTI_INIT(ztest_dmu_objset_create_destroy, 1, &zopt_often),
ZTI_INIT(ztest_dsl_prop_get_set, 1, &zopt_often),
ZTI_INIT(ztest_spa_prop_get_set, 1, &zopt_sometimes),
#if 0
ZTI_INIT(ztest_dmu_prealloc, 1, &zopt_sometimes),
#endif
ZTI_INIT(ztest_fzap, 1, &zopt_sometimes),
ZTI_INIT(ztest_dmu_snapshot_create_destroy, 1, &zopt_sometimes),
ZTI_INIT(ztest_spa_create_destroy, 1, &zopt_sometimes),
ZTI_INIT(ztest_fault_inject, 1, &zopt_sometimes),
ZTI_INIT(ztest_dmu_snapshot_hold, 1, &zopt_sometimes),
ZTI_INIT(ztest_mmp_enable_disable, 1, &zopt_sometimes),
ZTI_INIT(ztest_reguid, 1, &zopt_rarely),
ZTI_INIT(ztest_scrub, 1, &zopt_rarely),
ZTI_INIT(ztest_spa_upgrade, 1, &zopt_rarely),
ZTI_INIT(ztest_dsl_dataset_promote_busy, 1, &zopt_rarely),
ZTI_INIT(ztest_vdev_attach_detach, 1, &zopt_sometimes),
ZTI_INIT(ztest_vdev_LUN_growth, 1, &zopt_rarely),
ZTI_INIT(ztest_vdev_add_remove, 1, &ztest_opts.zo_vdevtime),
ZTI_INIT(ztest_vdev_class_add, 1, &ztest_opts.zo_vdevtime),
ZTI_INIT(ztest_vdev_aux_add_remove, 1, &ztest_opts.zo_vdevtime),
ZTI_INIT(ztest_device_removal, 1, &zopt_sometimes),
ZTI_INIT(ztest_spa_checkpoint_create_discard, 1, &zopt_rarely),
ZTI_INIT(ztest_initialize, 1, &zopt_sometimes),
ZTI_INIT(ztest_trim, 1, &zopt_sometimes),
ZTI_INIT(ztest_fletcher, 1, &zopt_rarely),
ZTI_INIT(ztest_fletcher_incr, 1, &zopt_rarely),
ZTI_INIT(ztest_verify_dnode_bt, 1, &zopt_sometimes),
};
#define ZTEST_FUNCS (sizeof (ztest_info) / sizeof (ztest_info_t))
/*
* The following struct is used to hold a list of uncalled commit callbacks.
* The callbacks are ordered by txg number.
*/
typedef struct ztest_cb_list {
kmutex_t zcl_callbacks_lock;
list_t zcl_callbacks;
} ztest_cb_list_t;
/*
* Stuff we need to share writably between parent and child.
*/
typedef struct ztest_shared {
boolean_t zs_do_init;
hrtime_t zs_proc_start;
hrtime_t zs_proc_stop;
hrtime_t zs_thread_start;
hrtime_t zs_thread_stop;
hrtime_t zs_thread_kill;
uint64_t zs_enospc_count;
uint64_t zs_vdev_next_leaf;
uint64_t zs_vdev_aux;
uint64_t zs_alloc;
uint64_t zs_space;
uint64_t zs_splits;
uint64_t zs_mirrors;
uint64_t zs_metaslab_sz;
uint64_t zs_metaslab_df_alloc_threshold;
uint64_t zs_guid;
} ztest_shared_t;
#define ID_PARALLEL -1ULL
static char ztest_dev_template[] = "%s/%s.%llua";
static char ztest_aux_template[] = "%s/%s.%s.%llu";
ztest_shared_t *ztest_shared;
static spa_t *ztest_spa = NULL;
static ztest_ds_t *ztest_ds;
static kmutex_t ztest_vdev_lock;
static boolean_t ztest_device_removal_active = B_FALSE;
static boolean_t ztest_pool_scrubbed = B_FALSE;
static kmutex_t ztest_checkpoint_lock;
/*
* The ztest_name_lock protects the pool and dataset namespace used by
* the individual tests. To modify the namespace, consumers must grab
* this lock as writer. Grabbing the lock as reader will ensure that the
* namespace does not change while the lock is held.
*/
static pthread_rwlock_t ztest_name_lock;
static boolean_t ztest_dump_core = B_TRUE;
static boolean_t ztest_exiting;
/* Global commit callback list */
static ztest_cb_list_t zcl;
/* Commit cb delay */
static uint64_t zc_min_txg_delay = UINT64_MAX;
static int zc_cb_counter = 0;
/*
* Minimum number of commit callbacks that need to be registered for us to check
* whether the minimum txg delay is acceptable.
*/
#define ZTEST_COMMIT_CB_MIN_REG 100
/*
* If a number of txgs equal to this threshold have been created after a commit
* callback has been registered but not called, then we assume there is an
* implementation bug.
*/
#define ZTEST_COMMIT_CB_THRESH (TXG_CONCURRENT_STATES + 1000)
enum ztest_object {
ZTEST_META_DNODE = 0,
ZTEST_DIROBJ,
ZTEST_OBJECTS
};
static void usage(boolean_t) __NORETURN;
static int ztest_scrub_impl(spa_t *spa);
/*
* These libumem hooks provide a reasonable set of defaults for the allocator's
* debugging facilities.
*/
const char *
_umem_debug_init(void)
{
return ("default,verbose"); /* $UMEM_DEBUG setting */
}
const char *
_umem_logging_init(void)
{
return ("fail,contents"); /* $UMEM_LOGGING setting */
}
static void
dump_debug_buffer(void)
{
ssize_t ret __attribute__((unused));
if (!ztest_opts.zo_dump_dbgmsg)
return;
/*
* We use write() instead of printf() so that this function
* is safe to call from a signal handler.
*/
ret = write(STDOUT_FILENO, "\n", 1);
zfs_dbgmsg_print("ztest");
}
#define BACKTRACE_SZ 100
static void sig_handler(int signo)
{
struct sigaction action;
#if (__GLIBC__ && !__UCLIBC__) /* backtrace() is a GNU extension */
int nptrs;
void *buffer[BACKTRACE_SZ];
nptrs = backtrace(buffer, BACKTRACE_SZ);
backtrace_symbols_fd(buffer, nptrs, STDERR_FILENO);
#endif
dump_debug_buffer();
/*
* Restore default action and re-raise signal so SIGSEGV and
* SIGABRT can trigger a core dump.
*/
action.sa_handler = SIG_DFL;
sigemptyset(&action.sa_mask);
action.sa_flags = 0;
(void) sigaction(signo, &action, NULL);
raise(signo);
}
#define FATAL_MSG_SZ 1024
char *fatal_msg;
-static void
+static __attribute__((noreturn)) __attribute__((format(printf, 2, 3))) void
fatal(int do_perror, char *message, ...)
{
va_list args;
int save_errno = errno;
char *buf;
(void) fflush(stdout);
buf = umem_alloc(FATAL_MSG_SZ, UMEM_NOFAIL);
va_start(args, message);
(void) sprintf(buf, "ztest: ");
/* LINTED */
(void) vsprintf(buf + strlen(buf), message, args);
va_end(args);
if (do_perror) {
(void) snprintf(buf + strlen(buf), FATAL_MSG_SZ - strlen(buf),
": %s", strerror(save_errno));
}
(void) fprintf(stderr, "%s\n", buf);
fatal_msg = buf; /* to ease debugging */
if (ztest_dump_core)
abort();
else
dump_debug_buffer();
exit(3);
}
static int
str2shift(const char *buf)
{
const char *ends = "BKMGTPEZ";
int i;
if (buf[0] == '\0')
return (0);
for (i = 0; i < strlen(ends); i++) {
if (toupper(buf[0]) == ends[i])
break;
}
if (i == strlen(ends)) {
(void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n",
buf);
usage(B_FALSE);
}
if (buf[1] == '\0' || (toupper(buf[1]) == 'B' && buf[2] == '\0')) {
return (10*i);
}
(void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n", buf);
usage(B_FALSE);
- /* NOTREACHED */
}
static uint64_t
nicenumtoull(const char *buf)
{
char *end;
uint64_t val;
val = strtoull(buf, &end, 0);
if (end == buf) {
(void) fprintf(stderr, "ztest: bad numeric value: %s\n", buf);
usage(B_FALSE);
} else if (end[0] == '.') {
double fval = strtod(buf, &end);
fval *= pow(2, str2shift(end));
/*
* UINT64_MAX is not exactly representable as a double.
* The closest representation is UINT64_MAX + 1, so we
* use a >= comparison instead of > for the bounds check.
*/
if (fval >= (double)UINT64_MAX) {
(void) fprintf(stderr, "ztest: value too large: %s\n",
buf);
usage(B_FALSE);
}
val = (uint64_t)fval;
} else {
int shift = str2shift(end);
if (shift >= 64 || (val << shift) >> shift != val) {
(void) fprintf(stderr, "ztest: value too large: %s\n",
buf);
usage(B_FALSE);
}
val <<= shift;
}
return (val);
}
typedef struct ztest_option {
const char short_opt;
const char *long_opt;
const char *long_opt_param;
const char *comment;
unsigned int default_int;
char *default_str;
} ztest_option_t;
/*
* The following option_table is used for generating the usage info as well as
* the long and short option information for calling getopt_long().
*/
static ztest_option_t option_table[] = {
{ 'v', "vdevs", "INTEGER", "Number of vdevs", DEFAULT_VDEV_COUNT,
NULL},
{ 's', "vdev-size", "INTEGER", "Size of each vdev",
NO_DEFAULT, DEFAULT_VDEV_SIZE_STR},
{ 'a', "alignment-shift", "INTEGER",
"Alignment shift; use 0 for random", DEFAULT_ASHIFT, NULL},
{ 'm', "mirror-copies", "INTEGER", "Number of mirror copies",
DEFAULT_MIRRORS, NULL},
{ 'r', "raid-disks", "INTEGER", "Number of raidz/draid disks",
DEFAULT_RAID_CHILDREN, NULL},
{ 'R', "raid-parity", "INTEGER", "Raid parity",
DEFAULT_RAID_PARITY, NULL},
{ 'K', "raid-kind", "raidz|draid|random", "Raid kind",
NO_DEFAULT, "random"},
{ 'D', "draid-data", "INTEGER", "Number of draid data drives",
DEFAULT_DRAID_DATA, NULL},
{ 'S', "draid-spares", "INTEGER", "Number of draid spares",
DEFAULT_DRAID_SPARES, NULL},
{ 'd', "datasets", "INTEGER", "Number of datasets",
DEFAULT_DATASETS_COUNT, NULL},
{ 't', "threads", "INTEGER", "Number of ztest threads",
DEFAULT_THREADS, NULL},
{ 'g', "gang-block-threshold", "INTEGER",
"Metaslab gang block threshold",
NO_DEFAULT, DEFAULT_FORCE_GANGING_STR},
{ 'i', "init-count", "INTEGER", "Number of times to initialize pool",
DEFAULT_INITS, NULL},
{ 'k', "kill-percentage", "INTEGER", "Kill percentage",
NO_DEFAULT, DEFAULT_KILLRATE_STR},
{ 'p', "pool-name", "STRING", "Pool name",
NO_DEFAULT, DEFAULT_POOL},
{ 'f', "vdev-file-directory", "PATH", "File directory for vdev files",
NO_DEFAULT, DEFAULT_VDEV_DIR},
{ 'M', "multi-host", NULL,
"Multi-host; simulate pool imported on remote host",
NO_DEFAULT, NULL},
{ 'E', "use-existing-pool", NULL,
"Use existing pool instead of creating new one", NO_DEFAULT, NULL},
{ 'T', "run-time", "INTEGER", "Total run time",
NO_DEFAULT, DEFAULT_RUN_TIME_STR},
{ 'P', "pass-time", "INTEGER", "Time per pass",
NO_DEFAULT, DEFAULT_PASS_TIME_STR},
{ 'F', "freeze-loops", "INTEGER", "Max loops in spa_freeze()",
DEFAULT_MAX_LOOPS, NULL},
{ 'B', "alt-ztest", "PATH", "Alternate ztest path",
NO_DEFAULT, NULL},
{ 'C', "vdev-class-state", "on|off|random", "vdev class state",
NO_DEFAULT, "random"},
{ 'o', "option", "\"OPTION=INTEGER\"",
"Set global variable to an unsigned 32-bit integer value",
NO_DEFAULT, NULL},
{ 'G', "dump-debug-msg", NULL,
"Dump zfs_dbgmsg buffer before exiting due to an error",
NO_DEFAULT, NULL},
{ 'V', "verbose", NULL,
"Verbose (use multiple times for ever more verbosity)",
NO_DEFAULT, NULL},
{ 'h', "help", NULL, "Show this help",
NO_DEFAULT, NULL},
{0, 0, 0, 0, 0, 0}
};
static struct option *long_opts = NULL;
static char *short_opts = NULL;
static void
init_options(void)
{
ASSERT3P(long_opts, ==, NULL);
ASSERT3P(short_opts, ==, NULL);
int count = sizeof (option_table) / sizeof (option_table[0]);
long_opts = umem_alloc(sizeof (struct option) * count, UMEM_NOFAIL);
short_opts = umem_alloc(sizeof (char) * 2 * count, UMEM_NOFAIL);
int short_opt_index = 0;
for (int i = 0; i < count; i++) {
long_opts[i].val = option_table[i].short_opt;
long_opts[i].name = option_table[i].long_opt;
long_opts[i].has_arg = option_table[i].long_opt_param != NULL
? required_argument : no_argument;
long_opts[i].flag = NULL;
short_opts[short_opt_index++] = option_table[i].short_opt;
if (option_table[i].long_opt_param != NULL) {
short_opts[short_opt_index++] = ':';
}
}
}
static void
fini_options(void)
{
int count = sizeof (option_table) / sizeof (option_table[0]);
umem_free(long_opts, sizeof (struct option) * count);
umem_free(short_opts, sizeof (char) * 2 * count);
long_opts = NULL;
short_opts = NULL;
}
static void
usage(boolean_t requested)
{
char option[80];
FILE *fp = requested ? stdout : stderr;
(void) fprintf(fp, "Usage: %s [OPTIONS...]\n", DEFAULT_POOL);
for (int i = 0; option_table[i].short_opt != 0; i++) {
if (option_table[i].long_opt_param != NULL) {
(void) sprintf(option, " -%c --%s=%s",
option_table[i].short_opt,
option_table[i].long_opt,
option_table[i].long_opt_param);
} else {
(void) sprintf(option, " -%c --%s",
option_table[i].short_opt,
option_table[i].long_opt);
}
(void) fprintf(fp, " %-40s%s", option,
option_table[i].comment);
if (option_table[i].long_opt_param != NULL) {
if (option_table[i].default_str != NULL) {
(void) fprintf(fp, " (default: %s)",
option_table[i].default_str);
} else if (option_table[i].default_int != NO_DEFAULT) {
(void) fprintf(fp, " (default: %u)",
option_table[i].default_int);
}
}
(void) fprintf(fp, "\n");
}
exit(requested ? 0 : 1);
}
static uint64_t
ztest_random(uint64_t range)
{
uint64_t r;
ASSERT3S(ztest_fd_rand, >=, 0);
if (range == 0)
return (0);
if (read(ztest_fd_rand, &r, sizeof (r)) != sizeof (r))
- fatal(1, "short read from /dev/urandom");
+ fatal(B_TRUE, "short read from /dev/urandom");
return (r % range);
}
static void
ztest_parse_name_value(const char *input, ztest_shared_opts_t *zo)
{
char name[32];
char *value;
int state = ZTEST_VDEV_CLASS_RND;
(void) strlcpy(name, input, sizeof (name));
value = strchr(name, '=');
if (value == NULL) {
(void) fprintf(stderr, "missing value in property=value "
"'-C' argument (%s)\n", input);
usage(B_FALSE);
}
*(value) = '\0';
value++;
if (strcmp(value, "on") == 0) {
state = ZTEST_VDEV_CLASS_ON;
} else if (strcmp(value, "off") == 0) {
state = ZTEST_VDEV_CLASS_OFF;
} else if (strcmp(value, "random") == 0) {
state = ZTEST_VDEV_CLASS_RND;
} else {
(void) fprintf(stderr, "invalid property value '%s'\n", value);
usage(B_FALSE);
}
if (strcmp(name, "special") == 0) {
zo->zo_special_vdevs = state;
} else {
(void) fprintf(stderr, "invalid property name '%s'\n", name);
usage(B_FALSE);
}
if (zo->zo_verbose >= 3)
(void) printf("%s vdev state is '%s'\n", name, value);
}
static void
process_options(int argc, char **argv)
{
char *path;
ztest_shared_opts_t *zo = &ztest_opts;
int opt;
uint64_t value;
char altdir[MAXNAMELEN] = { 0 };
char raid_kind[8] = { "random" };
bcopy(&ztest_opts_defaults, zo, sizeof (*zo));
init_options();
while ((opt = getopt_long(argc, argv, short_opts, long_opts,
NULL)) != EOF) {
value = 0;
switch (opt) {
case 'v':
case 's':
case 'a':
case 'm':
case 'r':
case 'R':
case 'D':
case 'S':
case 'd':
case 't':
case 'g':
case 'i':
case 'k':
case 'T':
case 'P':
case 'F':
value = nicenumtoull(optarg);
}
switch (opt) {
case 'v':
zo->zo_vdevs = value;
break;
case 's':
zo->zo_vdev_size = MAX(SPA_MINDEVSIZE, value);
break;
case 'a':
zo->zo_ashift = value;
break;
case 'm':
zo->zo_mirrors = value;
break;
case 'r':
zo->zo_raid_children = MAX(1, value);
break;
case 'R':
zo->zo_raid_parity = MIN(MAX(value, 1), 3);
break;
case 'K':
(void) strlcpy(raid_kind, optarg, sizeof (raid_kind));
break;
case 'D':
zo->zo_draid_data = MAX(1, value);
break;
case 'S':
zo->zo_draid_spares = MAX(1, value);
break;
case 'd':
zo->zo_datasets = MAX(1, value);
break;
case 't':
zo->zo_threads = MAX(1, value);
break;
case 'g':
zo->zo_metaslab_force_ganging =
MAX(SPA_MINBLOCKSIZE << 1, value);
break;
case 'i':
zo->zo_init = value;
break;
case 'k':
zo->zo_killrate = value;
break;
case 'p':
(void) strlcpy(zo->zo_pool, optarg,
sizeof (zo->zo_pool));
break;
case 'f':
path = realpath(optarg, NULL);
if (path == NULL) {
(void) fprintf(stderr, "error: %s: %s\n",
optarg, strerror(errno));
usage(B_FALSE);
} else {
(void) strlcpy(zo->zo_dir, path,
sizeof (zo->zo_dir));
free(path);
}
break;
case 'M':
zo->zo_mmp_test = 1;
break;
case 'V':
zo->zo_verbose++;
break;
case 'E':
zo->zo_init = 0;
break;
case 'T':
zo->zo_time = value;
break;
case 'P':
zo->zo_passtime = MAX(1, value);
break;
case 'F':
zo->zo_maxloops = MAX(1, value);
break;
case 'B':
(void) strlcpy(altdir, optarg, sizeof (altdir));
break;
case 'C':
ztest_parse_name_value(optarg, zo);
break;
case 'o':
if (zo->zo_gvars_count >= ZO_GVARS_MAX_COUNT) {
(void) fprintf(stderr,
"max global var count (%zu) exceeded\n",
ZO_GVARS_MAX_COUNT);
usage(B_FALSE);
}
char *v = zo->zo_gvars[zo->zo_gvars_count];
if (strlcpy(v, optarg, ZO_GVARS_MAX_ARGLEN) >=
ZO_GVARS_MAX_ARGLEN) {
(void) fprintf(stderr,
"global var option '%s' is too long\n",
optarg);
usage(B_FALSE);
}
zo->zo_gvars_count++;
break;
case 'G':
zo->zo_dump_dbgmsg = 1;
break;
case 'h':
usage(B_TRUE);
break;
case '?':
default:
usage(B_FALSE);
break;
}
}
fini_options();
/* When raid choice is 'random' add a draid pool 50% of the time */
if (strcmp(raid_kind, "random") == 0) {
(void) strlcpy(raid_kind, (ztest_random(2) == 0) ?
"draid" : "raidz", sizeof (raid_kind));
if (ztest_opts.zo_verbose >= 3)
(void) printf("choosing RAID type '%s'\n", raid_kind);
}
if (strcmp(raid_kind, "draid") == 0) {
uint64_t min_devsize;
/* With fewer disk use 256M, otherwise 128M is OK */
min_devsize = (ztest_opts.zo_raid_children < 16) ?
(256ULL << 20) : (128ULL << 20);
/* No top-level mirrors with dRAID for now */
zo->zo_mirrors = 0;
/* Use more appropriate defaults for dRAID */
if (zo->zo_vdevs == ztest_opts_defaults.zo_vdevs)
zo->zo_vdevs = 1;
if (zo->zo_raid_children ==
ztest_opts_defaults.zo_raid_children)
zo->zo_raid_children = 16;
if (zo->zo_ashift < 12)
zo->zo_ashift = 12;
if (zo->zo_vdev_size < min_devsize)
zo->zo_vdev_size = min_devsize;
if (zo->zo_draid_data + zo->zo_raid_parity >
zo->zo_raid_children - zo->zo_draid_spares) {
(void) fprintf(stderr, "error: too few draid "
"children (%d) for stripe width (%d)\n",
zo->zo_raid_children,
zo->zo_draid_data + zo->zo_raid_parity);
usage(B_FALSE);
}
(void) strlcpy(zo->zo_raid_type, VDEV_TYPE_DRAID,
sizeof (zo->zo_raid_type));
} else /* using raidz */ {
ASSERT0(strcmp(raid_kind, "raidz"));
zo->zo_raid_parity = MIN(zo->zo_raid_parity,
zo->zo_raid_children - 1);
}
zo->zo_vdevtime =
(zo->zo_vdevs > 0 ? zo->zo_time * NANOSEC / zo->zo_vdevs :
UINT64_MAX >> 2);
if (strlen(altdir) > 0) {
char *cmd;
char *realaltdir;
char *bin;
char *ztest;
char *isa;
int isalen;
cmd = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
realaltdir = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
VERIFY3P(NULL, !=, realpath(getexecname(), cmd));
if (0 != access(altdir, F_OK)) {
ztest_dump_core = B_FALSE;
fatal(B_TRUE, "invalid alternate ztest path: %s",
altdir);
}
VERIFY3P(NULL, !=, realpath(altdir, realaltdir));
/*
* 'cmd' should be of the form "<anything>/usr/bin/<isa>/ztest".
* We want to extract <isa> to determine if we should use
* 32 or 64 bit binaries.
*/
bin = strstr(cmd, "/usr/bin/");
ztest = strstr(bin, "/ztest");
isa = bin + 9;
isalen = ztest - isa;
(void) snprintf(zo->zo_alt_ztest, sizeof (zo->zo_alt_ztest),
"%s/usr/bin/%.*s/ztest", realaltdir, isalen, isa);
(void) snprintf(zo->zo_alt_libpath, sizeof (zo->zo_alt_libpath),
"%s/usr/lib/%.*s", realaltdir, isalen, isa);
if (0 != access(zo->zo_alt_ztest, X_OK)) {
ztest_dump_core = B_FALSE;
fatal(B_TRUE, "invalid alternate ztest: %s",
zo->zo_alt_ztest);
} else if (0 != access(zo->zo_alt_libpath, X_OK)) {
ztest_dump_core = B_FALSE;
fatal(B_TRUE, "invalid alternate lib directory %s",
zo->zo_alt_libpath);
}
umem_free(cmd, MAXPATHLEN);
umem_free(realaltdir, MAXPATHLEN);
}
}
static void
ztest_kill(ztest_shared_t *zs)
{
zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(ztest_spa));
zs->zs_space = metaslab_class_get_space(spa_normal_class(ztest_spa));
/*
* Before we kill off ztest, make sure that the config is updated.
* See comment above spa_write_cachefile().
*/
mutex_enter(&spa_namespace_lock);
spa_write_cachefile(ztest_spa, B_FALSE, B_FALSE);
mutex_exit(&spa_namespace_lock);
(void) kill(getpid(), SIGKILL);
}
/* ARGSUSED */
static void
ztest_record_enospc(const char *s)
{
ztest_shared->zs_enospc_count++;
}
static uint64_t
ztest_get_ashift(void)
{
if (ztest_opts.zo_ashift == 0)
return (SPA_MINBLOCKSHIFT + ztest_random(5));
return (ztest_opts.zo_ashift);
}
static boolean_t
ztest_is_draid_spare(const char *name)
{
uint64_t spare_id = 0, parity = 0, vdev_id = 0;
- if (sscanf(name, VDEV_TYPE_DRAID "%llu-%llu-%llu",
- (u_longlong_t *)&parity, (u_longlong_t *)&vdev_id,
- (u_longlong_t *)&spare_id) == 3) {
+ if (sscanf(name, VDEV_TYPE_DRAID "%"PRIu64"-%"PRIu64"-%"PRIu64"",
+ &parity, &vdev_id, &spare_id) == 3) {
return (B_TRUE);
}
return (B_FALSE);
}
static nvlist_t *
make_vdev_file(char *path, char *aux, char *pool, size_t size, uint64_t ashift)
{
char *pathbuf;
uint64_t vdev;
nvlist_t *file;
boolean_t draid_spare = B_FALSE;
pathbuf = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
if (ashift == 0)
ashift = ztest_get_ashift();
if (path == NULL) {
path = pathbuf;
if (aux != NULL) {
vdev = ztest_shared->zs_vdev_aux;
(void) snprintf(path, MAXPATHLEN,
ztest_aux_template, ztest_opts.zo_dir,
pool == NULL ? ztest_opts.zo_pool : pool,
aux, vdev);
} else {
vdev = ztest_shared->zs_vdev_next_leaf++;
(void) snprintf(path, MAXPATHLEN,
ztest_dev_template, ztest_opts.zo_dir,
pool == NULL ? ztest_opts.zo_pool : pool, vdev);
}
} else {
draid_spare = ztest_is_draid_spare(path);
}
if (size != 0 && !draid_spare) {
int fd = open(path, O_RDWR | O_CREAT | O_TRUNC, 0666);
if (fd == -1)
- fatal(1, "can't open %s", path);
+ fatal(B_TRUE, "can't open %s", path);
if (ftruncate(fd, size) != 0)
- fatal(1, "can't ftruncate %s", path);
+ fatal(B_TRUE, "can't ftruncate %s", path);
(void) close(fd);
}
file = fnvlist_alloc();
fnvlist_add_string(file, ZPOOL_CONFIG_TYPE,
draid_spare ? VDEV_TYPE_DRAID_SPARE : VDEV_TYPE_FILE);
fnvlist_add_string(file, ZPOOL_CONFIG_PATH, path);
fnvlist_add_uint64(file, ZPOOL_CONFIG_ASHIFT, ashift);
umem_free(pathbuf, MAXPATHLEN);
return (file);
}
static nvlist_t *
make_vdev_raid(char *path, char *aux, char *pool, size_t size,
uint64_t ashift, int r)
{
nvlist_t *raid, **child;
int c;
if (r < 2)
return (make_vdev_file(path, aux, pool, size, ashift));
child = umem_alloc(r * sizeof (nvlist_t *), UMEM_NOFAIL);
for (c = 0; c < r; c++)
child[c] = make_vdev_file(path, aux, pool, size, ashift);
raid = fnvlist_alloc();
fnvlist_add_string(raid, ZPOOL_CONFIG_TYPE,
ztest_opts.zo_raid_type);
fnvlist_add_uint64(raid, ZPOOL_CONFIG_NPARITY,
ztest_opts.zo_raid_parity);
fnvlist_add_nvlist_array(raid, ZPOOL_CONFIG_CHILDREN, child, r);
if (strcmp(ztest_opts.zo_raid_type, VDEV_TYPE_DRAID) == 0) {
uint64_t ndata = ztest_opts.zo_draid_data;
uint64_t nparity = ztest_opts.zo_raid_parity;
uint64_t nspares = ztest_opts.zo_draid_spares;
uint64_t children = ztest_opts.zo_raid_children;
uint64_t ngroups = 1;
/*
* Calculate the minimum number of groups required to fill a
* slice. This is the LCM of the stripe width (data + parity)
* and the number of data drives (children - spares).
*/
while (ngroups * (ndata + nparity) % (children - nspares) != 0)
ngroups++;
/* Store the basic dRAID configuration. */
fnvlist_add_uint64(raid, ZPOOL_CONFIG_DRAID_NDATA, ndata);
fnvlist_add_uint64(raid, ZPOOL_CONFIG_DRAID_NSPARES, nspares);
fnvlist_add_uint64(raid, ZPOOL_CONFIG_DRAID_NGROUPS, ngroups);
}
for (c = 0; c < r; c++)
fnvlist_free(child[c]);
umem_free(child, r * sizeof (nvlist_t *));
return (raid);
}
static nvlist_t *
make_vdev_mirror(char *path, char *aux, char *pool, size_t size,
uint64_t ashift, int r, int m)
{
nvlist_t *mirror, **child;
int c;
if (m < 1)
return (make_vdev_raid(path, aux, pool, size, ashift, r));
child = umem_alloc(m * sizeof (nvlist_t *), UMEM_NOFAIL);
for (c = 0; c < m; c++)
child[c] = make_vdev_raid(path, aux, pool, size, ashift, r);
mirror = fnvlist_alloc();
fnvlist_add_string(mirror, ZPOOL_CONFIG_TYPE, VDEV_TYPE_MIRROR);
fnvlist_add_nvlist_array(mirror, ZPOOL_CONFIG_CHILDREN, child, m);
for (c = 0; c < m; c++)
fnvlist_free(child[c]);
umem_free(child, m * sizeof (nvlist_t *));
return (mirror);
}
static nvlist_t *
make_vdev_root(char *path, char *aux, char *pool, size_t size, uint64_t ashift,
const char *class, int r, int m, int t)
{
nvlist_t *root, **child;
int c;
boolean_t log;
ASSERT3S(t, >, 0);
log = (class != NULL && strcmp(class, "log") == 0);
child = umem_alloc(t * sizeof (nvlist_t *), UMEM_NOFAIL);
for (c = 0; c < t; c++) {
child[c] = make_vdev_mirror(path, aux, pool, size, ashift,
r, m);
fnvlist_add_uint64(child[c], ZPOOL_CONFIG_IS_LOG, log);
if (class != NULL && class[0] != '\0') {
ASSERT(m > 1 || log); /* expecting a mirror */
fnvlist_add_string(child[c],
ZPOOL_CONFIG_ALLOCATION_BIAS, class);
}
}
root = fnvlist_alloc();
fnvlist_add_string(root, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT);
fnvlist_add_nvlist_array(root, aux ? aux : ZPOOL_CONFIG_CHILDREN,
child, t);
for (c = 0; c < t; c++)
fnvlist_free(child[c]);
umem_free(child, t * sizeof (nvlist_t *));
return (root);
}
/*
* Find a random spa version. Returns back a random spa version in the
* range [initial_version, SPA_VERSION_FEATURES].
*/
static uint64_t
ztest_random_spa_version(uint64_t initial_version)
{
uint64_t version = initial_version;
if (version <= SPA_VERSION_BEFORE_FEATURES) {
version = version +
ztest_random(SPA_VERSION_BEFORE_FEATURES - version + 1);
}
if (version > SPA_VERSION_BEFORE_FEATURES)
version = SPA_VERSION_FEATURES;
ASSERT(SPA_VERSION_IS_SUPPORTED(version));
return (version);
}
static int
ztest_random_blocksize(void)
{
ASSERT3U(ztest_spa->spa_max_ashift, !=, 0);
/*
* Choose a block size >= the ashift.
* If the SPA supports new MAXBLOCKSIZE, test up to 1MB blocks.
*/
int maxbs = SPA_OLD_MAXBLOCKSHIFT;
if (spa_maxblocksize(ztest_spa) == SPA_MAXBLOCKSIZE)
maxbs = 20;
uint64_t block_shift =
ztest_random(maxbs - ztest_spa->spa_max_ashift + 1);
return (1 << (SPA_MINBLOCKSHIFT + block_shift));
}
static int
ztest_random_dnodesize(void)
{
int slots;
int max_slots = spa_maxdnodesize(ztest_spa) >> DNODE_SHIFT;
if (max_slots == DNODE_MIN_SLOTS)
return (DNODE_MIN_SIZE);
/*
* Weight the random distribution more heavily toward smaller
* dnode sizes since that is more likely to reflect real-world
* usage.
*/
ASSERT3U(max_slots, >, 4);
switch (ztest_random(10)) {
case 0:
slots = 5 + ztest_random(max_slots - 4);
break;
case 1 ... 4:
slots = 2 + ztest_random(3);
break;
default:
slots = 1;
break;
}
return (slots << DNODE_SHIFT);
}
static int
ztest_random_ibshift(void)
{
return (DN_MIN_INDBLKSHIFT +
ztest_random(DN_MAX_INDBLKSHIFT - DN_MIN_INDBLKSHIFT + 1));
}
static uint64_t
ztest_random_vdev_top(spa_t *spa, boolean_t log_ok)
{
uint64_t top;
vdev_t *rvd = spa->spa_root_vdev;
vdev_t *tvd;
ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
do {
top = ztest_random(rvd->vdev_children);
tvd = rvd->vdev_child[top];
} while (!vdev_is_concrete(tvd) || (tvd->vdev_islog && !log_ok) ||
tvd->vdev_mg == NULL || tvd->vdev_mg->mg_class == NULL);
return (top);
}
static uint64_t
ztest_random_dsl_prop(zfs_prop_t prop)
{
uint64_t value;
do {
value = zfs_prop_random_value(prop, ztest_random(-1ULL));
} while (prop == ZFS_PROP_CHECKSUM && value == ZIO_CHECKSUM_OFF);
return (value);
}
static int
ztest_dsl_prop_set_uint64(char *osname, zfs_prop_t prop, uint64_t value,
boolean_t inherit)
{
const char *propname = zfs_prop_to_name(prop);
const char *valname;
char *setpoint;
uint64_t curval;
int error;
error = dsl_prop_set_int(osname, propname,
(inherit ? ZPROP_SRC_NONE : ZPROP_SRC_LOCAL), value);
if (error == ENOSPC) {
ztest_record_enospc(FTAG);
return (error);
}
ASSERT0(error);
setpoint = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
VERIFY0(dsl_prop_get_integer(osname, propname, &curval, setpoint));
if (ztest_opts.zo_verbose >= 6) {
int err;
err = zfs_prop_index_to_string(prop, curval, &valname);
if (err)
(void) printf("%s %s = %llu at '%s'\n", osname,
propname, (unsigned long long)curval, setpoint);
else
(void) printf("%s %s = %s at '%s'\n",
osname, propname, valname, setpoint);
}
umem_free(setpoint, MAXPATHLEN);
return (error);
}
static int
ztest_spa_prop_set_uint64(zpool_prop_t prop, uint64_t value)
{
spa_t *spa = ztest_spa;
nvlist_t *props = NULL;
int error;
props = fnvlist_alloc();
fnvlist_add_uint64(props, zpool_prop_to_name(prop), value);
error = spa_prop_set(spa, props);
fnvlist_free(props);
if (error == ENOSPC) {
ztest_record_enospc(FTAG);
return (error);
}
ASSERT0(error);
return (error);
}
static int
ztest_dmu_objset_own(const char *name, dmu_objset_type_t type,
boolean_t readonly, boolean_t decrypt, void *tag, objset_t **osp)
{
int err;
char *cp = NULL;
char ddname[ZFS_MAX_DATASET_NAME_LEN];
strcpy(ddname, name);
cp = strchr(ddname, '@');
if (cp != NULL)
*cp = '\0';
err = dmu_objset_own(name, type, readonly, decrypt, tag, osp);
while (decrypt && err == EACCES) {
dsl_crypto_params_t *dcp;
nvlist_t *crypto_args = fnvlist_alloc();
fnvlist_add_uint8_array(crypto_args, "wkeydata",
(uint8_t *)ztest_wkeydata, WRAPPING_KEY_LEN);
VERIFY0(dsl_crypto_params_create_nvlist(DCP_CMD_NONE, NULL,
crypto_args, &dcp));
err = spa_keystore_load_wkey(ddname, dcp, B_FALSE);
/*
* Note: if there was an error loading, the wkey was not
* consumed, and needs to be freed.
*/
dsl_crypto_params_free(dcp, (err != 0));
fnvlist_free(crypto_args);
if (err == EINVAL) {
/*
* We couldn't load a key for this dataset so try
* the parent. This loop will eventually hit the
* encryption root since ztest only makes clones
* as children of their origin datasets.
*/
cp = strrchr(ddname, '/');
if (cp == NULL)
return (err);
*cp = '\0';
err = EACCES;
continue;
} else if (err != 0) {
break;
}
err = dmu_objset_own(name, type, readonly, decrypt, tag, osp);
break;
}
return (err);
}
static void
ztest_rll_init(rll_t *rll)
{
rll->rll_writer = NULL;
rll->rll_readers = 0;
mutex_init(&rll->rll_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&rll->rll_cv, NULL, CV_DEFAULT, NULL);
}
static void
ztest_rll_destroy(rll_t *rll)
{
ASSERT3P(rll->rll_writer, ==, NULL);
ASSERT0(rll->rll_readers);
mutex_destroy(&rll->rll_lock);
cv_destroy(&rll->rll_cv);
}
static void
ztest_rll_lock(rll_t *rll, rl_type_t type)
{
mutex_enter(&rll->rll_lock);
if (type == RL_READER) {
while (rll->rll_writer != NULL)
(void) cv_wait(&rll->rll_cv, &rll->rll_lock);
rll->rll_readers++;
} else {
while (rll->rll_writer != NULL || rll->rll_readers)
(void) cv_wait(&rll->rll_cv, &rll->rll_lock);
rll->rll_writer = curthread;
}
mutex_exit(&rll->rll_lock);
}
static void
ztest_rll_unlock(rll_t *rll)
{
mutex_enter(&rll->rll_lock);
if (rll->rll_writer) {
ASSERT0(rll->rll_readers);
rll->rll_writer = NULL;
} else {
ASSERT3S(rll->rll_readers, >, 0);
ASSERT3P(rll->rll_writer, ==, NULL);
rll->rll_readers--;
}
if (rll->rll_writer == NULL && rll->rll_readers == 0)
cv_broadcast(&rll->rll_cv);
mutex_exit(&rll->rll_lock);
}
static void
ztest_object_lock(ztest_ds_t *zd, uint64_t object, rl_type_t type)
{
rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)];
ztest_rll_lock(rll, type);
}
static void
ztest_object_unlock(ztest_ds_t *zd, uint64_t object)
{
rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)];
ztest_rll_unlock(rll);
}
static rl_t *
ztest_range_lock(ztest_ds_t *zd, uint64_t object, uint64_t offset,
uint64_t size, rl_type_t type)
{
uint64_t hash = object ^ (offset % (ZTEST_RANGE_LOCKS + 1));
rll_t *rll = &zd->zd_range_lock[hash & (ZTEST_RANGE_LOCKS - 1)];
rl_t *rl;
rl = umem_alloc(sizeof (*rl), UMEM_NOFAIL);
rl->rl_object = object;
rl->rl_offset = offset;
rl->rl_size = size;
rl->rl_lock = rll;
ztest_rll_lock(rll, type);
return (rl);
}
static void
ztest_range_unlock(rl_t *rl)
{
rll_t *rll = rl->rl_lock;
ztest_rll_unlock(rll);
umem_free(rl, sizeof (*rl));
}
static void
ztest_zd_init(ztest_ds_t *zd, ztest_shared_ds_t *szd, objset_t *os)
{
zd->zd_os = os;
zd->zd_zilog = dmu_objset_zil(os);
zd->zd_shared = szd;
dmu_objset_name(os, zd->zd_name);
int l;
if (zd->zd_shared != NULL)
zd->zd_shared->zd_seq = 0;
VERIFY0(pthread_rwlock_init(&zd->zd_zilog_lock, NULL));
mutex_init(&zd->zd_dirobj_lock, NULL, MUTEX_DEFAULT, NULL);
for (l = 0; l < ZTEST_OBJECT_LOCKS; l++)
ztest_rll_init(&zd->zd_object_lock[l]);
for (l = 0; l < ZTEST_RANGE_LOCKS; l++)
ztest_rll_init(&zd->zd_range_lock[l]);
}
static void
ztest_zd_fini(ztest_ds_t *zd)
{
int l;
mutex_destroy(&zd->zd_dirobj_lock);
(void) pthread_rwlock_destroy(&zd->zd_zilog_lock);
for (l = 0; l < ZTEST_OBJECT_LOCKS; l++)
ztest_rll_destroy(&zd->zd_object_lock[l]);
for (l = 0; l < ZTEST_RANGE_LOCKS; l++)
ztest_rll_destroy(&zd->zd_range_lock[l]);
}
#define TXG_MIGHTWAIT (ztest_random(10) == 0 ? TXG_NOWAIT : TXG_WAIT)
static uint64_t
ztest_tx_assign(dmu_tx_t *tx, uint64_t txg_how, const char *tag)
{
uint64_t txg;
int error;
/*
* Attempt to assign tx to some transaction group.
*/
error = dmu_tx_assign(tx, txg_how);
if (error) {
if (error == ERESTART) {
ASSERT3U(txg_how, ==, TXG_NOWAIT);
dmu_tx_wait(tx);
} else {
ASSERT3U(error, ==, ENOSPC);
ztest_record_enospc(tag);
}
dmu_tx_abort(tx);
return (0);
}
txg = dmu_tx_get_txg(tx);
ASSERT3U(txg, !=, 0);
return (txg);
}
static void
ztest_bt_generate(ztest_block_tag_t *bt, objset_t *os, uint64_t object,
uint64_t dnodesize, uint64_t offset, uint64_t gen, uint64_t txg,
uint64_t crtxg)
{
bt->bt_magic = BT_MAGIC;
bt->bt_objset = dmu_objset_id(os);
bt->bt_object = object;
bt->bt_dnodesize = dnodesize;
bt->bt_offset = offset;
bt->bt_gen = gen;
bt->bt_txg = txg;
bt->bt_crtxg = crtxg;
}
static void
ztest_bt_verify(ztest_block_tag_t *bt, objset_t *os, uint64_t object,
uint64_t dnodesize, uint64_t offset, uint64_t gen, uint64_t txg,
uint64_t crtxg)
{
ASSERT3U(bt->bt_magic, ==, BT_MAGIC);
ASSERT3U(bt->bt_objset, ==, dmu_objset_id(os));
ASSERT3U(bt->bt_object, ==, object);
ASSERT3U(bt->bt_dnodesize, ==, dnodesize);
ASSERT3U(bt->bt_offset, ==, offset);
ASSERT3U(bt->bt_gen, <=, gen);
ASSERT3U(bt->bt_txg, <=, txg);
ASSERT3U(bt->bt_crtxg, ==, crtxg);
}
static ztest_block_tag_t *
ztest_bt_bonus(dmu_buf_t *db)
{
dmu_object_info_t doi;
ztest_block_tag_t *bt;
dmu_object_info_from_db(db, &doi);
ASSERT3U(doi.doi_bonus_size, <=, db->db_size);
ASSERT3U(doi.doi_bonus_size, >=, sizeof (*bt));
bt = (void *)((char *)db->db_data + doi.doi_bonus_size - sizeof (*bt));
return (bt);
}
/*
* Generate a token to fill up unused bonus buffer space. Try to make
* it unique to the object, generation, and offset to verify that data
* is not getting overwritten by data from other dnodes.
*/
#define ZTEST_BONUS_FILL_TOKEN(obj, ds, gen, offset) \
(((ds) << 48) | ((gen) << 32) | ((obj) << 8) | (offset))
/*
* Fill up the unused bonus buffer region before the block tag with a
* verifiable pattern. Filling the whole bonus area with non-zero data
* helps ensure that all dnode traversal code properly skips the
* interior regions of large dnodes.
*/
static void
ztest_fill_unused_bonus(dmu_buf_t *db, void *end, uint64_t obj,
objset_t *os, uint64_t gen)
{
uint64_t *bonusp;
ASSERT(IS_P2ALIGNED((char *)end - (char *)db->db_data, 8));
for (bonusp = db->db_data; bonusp < (uint64_t *)end; bonusp++) {
uint64_t token = ZTEST_BONUS_FILL_TOKEN(obj, dmu_objset_id(os),
gen, bonusp - (uint64_t *)db->db_data);
*bonusp = token;
}
}
/*
* Verify that the unused area of a bonus buffer is filled with the
* expected tokens.
*/
static void
ztest_verify_unused_bonus(dmu_buf_t *db, void *end, uint64_t obj,
objset_t *os, uint64_t gen)
{
uint64_t *bonusp;
for (bonusp = db->db_data; bonusp < (uint64_t *)end; bonusp++) {
uint64_t token = ZTEST_BONUS_FILL_TOKEN(obj, dmu_objset_id(os),
gen, bonusp - (uint64_t *)db->db_data);
VERIFY3U(*bonusp, ==, token);
}
}
/*
* ZIL logging ops
*/
#define lrz_type lr_mode
#define lrz_blocksize lr_uid
#define lrz_ibshift lr_gid
#define lrz_bonustype lr_rdev
#define lrz_dnodesize lr_crtime[1]
static void
ztest_log_create(ztest_ds_t *zd, dmu_tx_t *tx, lr_create_t *lr)
{
char *name = (void *)(lr + 1); /* name follows lr */
size_t namesize = strlen(name) + 1;
itx_t *itx;
if (zil_replaying(zd->zd_zilog, tx))
return;
itx = zil_itx_create(TX_CREATE, sizeof (*lr) + namesize);
bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
sizeof (*lr) + namesize - sizeof (lr_t));
zil_itx_assign(zd->zd_zilog, itx, tx);
}
static void
ztest_log_remove(ztest_ds_t *zd, dmu_tx_t *tx, lr_remove_t *lr, uint64_t object)
{
char *name = (void *)(lr + 1); /* name follows lr */
size_t namesize = strlen(name) + 1;
itx_t *itx;
if (zil_replaying(zd->zd_zilog, tx))
return;
itx = zil_itx_create(TX_REMOVE, sizeof (*lr) + namesize);
bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
sizeof (*lr) + namesize - sizeof (lr_t));
itx->itx_oid = object;
zil_itx_assign(zd->zd_zilog, itx, tx);
}
static void
ztest_log_write(ztest_ds_t *zd, dmu_tx_t *tx, lr_write_t *lr)
{
itx_t *itx;
itx_wr_state_t write_state = ztest_random(WR_NUM_STATES);
if (zil_replaying(zd->zd_zilog, tx))
return;
if (lr->lr_length > zil_max_log_data(zd->zd_zilog))
write_state = WR_INDIRECT;
itx = zil_itx_create(TX_WRITE,
sizeof (*lr) + (write_state == WR_COPIED ? lr->lr_length : 0));
if (write_state == WR_COPIED &&
dmu_read(zd->zd_os, lr->lr_foid, lr->lr_offset, lr->lr_length,
((lr_write_t *)&itx->itx_lr) + 1, DMU_READ_NO_PREFETCH) != 0) {
zil_itx_destroy(itx);
itx = zil_itx_create(TX_WRITE, sizeof (*lr));
write_state = WR_NEED_COPY;
}
itx->itx_private = zd;
itx->itx_wr_state = write_state;
itx->itx_sync = (ztest_random(8) == 0);
bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
sizeof (*lr) - sizeof (lr_t));
zil_itx_assign(zd->zd_zilog, itx, tx);
}
static void
ztest_log_truncate(ztest_ds_t *zd, dmu_tx_t *tx, lr_truncate_t *lr)
{
itx_t *itx;
if (zil_replaying(zd->zd_zilog, tx))
return;
itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
sizeof (*lr) - sizeof (lr_t));
itx->itx_sync = B_FALSE;
zil_itx_assign(zd->zd_zilog, itx, tx);
}
static void
ztest_log_setattr(ztest_ds_t *zd, dmu_tx_t *tx, lr_setattr_t *lr)
{
itx_t *itx;
if (zil_replaying(zd->zd_zilog, tx))
return;
itx = zil_itx_create(TX_SETATTR, sizeof (*lr));
bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
sizeof (*lr) - sizeof (lr_t));
itx->itx_sync = B_FALSE;
zil_itx_assign(zd->zd_zilog, itx, tx);
}
/*
* ZIL replay ops
*/
static int
ztest_replay_create(void *arg1, void *arg2, boolean_t byteswap)
{
ztest_ds_t *zd = arg1;
lr_create_t *lr = arg2;
char *name = (void *)(lr + 1); /* name follows lr */
objset_t *os = zd->zd_os;
ztest_block_tag_t *bbt;
dmu_buf_t *db;
dmu_tx_t *tx;
uint64_t txg;
int error = 0;
int bonuslen;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
ASSERT3U(lr->lr_doid, ==, ZTEST_DIROBJ);
ASSERT3S(name[0], !=, '\0');
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, lr->lr_doid, B_TRUE, name);
if (lr->lrz_type == DMU_OT_ZAP_OTHER) {
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
} else {
dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
}
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
if (txg == 0)
return (ENOSPC);
ASSERT3U(dmu_objset_zil(os)->zl_replay, ==, !!lr->lr_foid);
bonuslen = DN_BONUS_SIZE(lr->lrz_dnodesize);
if (lr->lrz_type == DMU_OT_ZAP_OTHER) {
if (lr->lr_foid == 0) {
lr->lr_foid = zap_create_dnsize(os,
lr->lrz_type, lr->lrz_bonustype,
bonuslen, lr->lrz_dnodesize, tx);
} else {
error = zap_create_claim_dnsize(os, lr->lr_foid,
lr->lrz_type, lr->lrz_bonustype,
bonuslen, lr->lrz_dnodesize, tx);
}
} else {
if (lr->lr_foid == 0) {
lr->lr_foid = dmu_object_alloc_dnsize(os,
lr->lrz_type, 0, lr->lrz_bonustype,
bonuslen, lr->lrz_dnodesize, tx);
} else {
error = dmu_object_claim_dnsize(os, lr->lr_foid,
lr->lrz_type, 0, lr->lrz_bonustype,
bonuslen, lr->lrz_dnodesize, tx);
}
}
if (error) {
ASSERT3U(error, ==, EEXIST);
ASSERT(zd->zd_zilog->zl_replay);
dmu_tx_commit(tx);
return (error);
}
ASSERT3U(lr->lr_foid, !=, 0);
if (lr->lrz_type != DMU_OT_ZAP_OTHER)
VERIFY0(dmu_object_set_blocksize(os, lr->lr_foid,
lr->lrz_blocksize, lr->lrz_ibshift, tx));
VERIFY0(dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
bbt = ztest_bt_bonus(db);
dmu_buf_will_dirty(db, tx);
ztest_bt_generate(bbt, os, lr->lr_foid, lr->lrz_dnodesize, -1ULL,
lr->lr_gen, txg, txg);
ztest_fill_unused_bonus(db, bbt, lr->lr_foid, os, lr->lr_gen);
dmu_buf_rele(db, FTAG);
VERIFY0(zap_add(os, lr->lr_doid, name, sizeof (uint64_t), 1,
&lr->lr_foid, tx));
(void) ztest_log_create(zd, tx, lr);
dmu_tx_commit(tx);
return (0);
}
static int
ztest_replay_remove(void *arg1, void *arg2, boolean_t byteswap)
{
ztest_ds_t *zd = arg1;
lr_remove_t *lr = arg2;
char *name = (void *)(lr + 1); /* name follows lr */
objset_t *os = zd->zd_os;
dmu_object_info_t doi;
dmu_tx_t *tx;
uint64_t object, txg;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
ASSERT3U(lr->lr_doid, ==, ZTEST_DIROBJ);
ASSERT3S(name[0], !=, '\0');
VERIFY0(
zap_lookup(os, lr->lr_doid, name, sizeof (object), 1, &object));
ASSERT3U(object, !=, 0);
ztest_object_lock(zd, object, RL_WRITER);
VERIFY0(dmu_object_info(os, object, &doi));
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, lr->lr_doid, B_FALSE, name);
dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END);
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
if (txg == 0) {
ztest_object_unlock(zd, object);
return (ENOSPC);
}
if (doi.doi_type == DMU_OT_ZAP_OTHER) {
VERIFY0(zap_destroy(os, object, tx));
} else {
VERIFY0(dmu_object_free(os, object, tx));
}
VERIFY0(zap_remove(os, lr->lr_doid, name, tx));
(void) ztest_log_remove(zd, tx, lr, object);
dmu_tx_commit(tx);
ztest_object_unlock(zd, object);
return (0);
}
static int
ztest_replay_write(void *arg1, void *arg2, boolean_t byteswap)
{
ztest_ds_t *zd = arg1;
lr_write_t *lr = arg2;
objset_t *os = zd->zd_os;
void *data = lr + 1; /* data follows lr */
uint64_t offset, length;
ztest_block_tag_t *bt = data;
ztest_block_tag_t *bbt;
uint64_t gen, txg, lrtxg, crtxg;
dmu_object_info_t doi;
dmu_tx_t *tx;
dmu_buf_t *db;
arc_buf_t *abuf = NULL;
rl_t *rl;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
offset = lr->lr_offset;
length = lr->lr_length;
/* If it's a dmu_sync() block, write the whole block */
if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
if (length < blocksize) {
offset -= offset % blocksize;
length = blocksize;
}
}
if (bt->bt_magic == BSWAP_64(BT_MAGIC))
byteswap_uint64_array(bt, sizeof (*bt));
if (bt->bt_magic != BT_MAGIC)
bt = NULL;
ztest_object_lock(zd, lr->lr_foid, RL_READER);
rl = ztest_range_lock(zd, lr->lr_foid, offset, length, RL_WRITER);
VERIFY0(dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
dmu_object_info_from_db(db, &doi);
bbt = ztest_bt_bonus(db);
ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
gen = bbt->bt_gen;
crtxg = bbt->bt_crtxg;
lrtxg = lr->lr_common.lrc_txg;
tx = dmu_tx_create(os);
dmu_tx_hold_write(tx, lr->lr_foid, offset, length);
if (ztest_random(8) == 0 && length == doi.doi_data_block_size &&
P2PHASE(offset, length) == 0)
abuf = dmu_request_arcbuf(db, length);
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
if (txg == 0) {
if (abuf != NULL)
dmu_return_arcbuf(abuf);
dmu_buf_rele(db, FTAG);
ztest_range_unlock(rl);
ztest_object_unlock(zd, lr->lr_foid);
return (ENOSPC);
}
if (bt != NULL) {
/*
* Usually, verify the old data before writing new data --
* but not always, because we also want to verify correct
* behavior when the data was not recently read into cache.
*/
ASSERT0(offset % doi.doi_data_block_size);
if (ztest_random(4) != 0) {
int prefetch = ztest_random(2) ?
DMU_READ_PREFETCH : DMU_READ_NO_PREFETCH;
ztest_block_tag_t rbt;
VERIFY(dmu_read(os, lr->lr_foid, offset,
sizeof (rbt), &rbt, prefetch) == 0);
if (rbt.bt_magic == BT_MAGIC) {
ztest_bt_verify(&rbt, os, lr->lr_foid, 0,
offset, gen, txg, crtxg);
}
}
/*
* Writes can appear to be newer than the bonus buffer because
* the ztest_get_data() callback does a dmu_read() of the
* open-context data, which may be different than the data
* as it was when the write was generated.
*/
if (zd->zd_zilog->zl_replay) {
ztest_bt_verify(bt, os, lr->lr_foid, 0, offset,
MAX(gen, bt->bt_gen), MAX(txg, lrtxg),
bt->bt_crtxg);
}
/*
* Set the bt's gen/txg to the bonus buffer's gen/txg
* so that all of the usual ASSERTs will work.
*/
ztest_bt_generate(bt, os, lr->lr_foid, 0, offset, gen, txg,
crtxg);
}
if (abuf == NULL) {
dmu_write(os, lr->lr_foid, offset, length, data, tx);
} else {
bcopy(data, abuf->b_data, length);
dmu_assign_arcbuf_by_dbuf(db, offset, abuf, tx);
}
(void) ztest_log_write(zd, tx, lr);
dmu_buf_rele(db, FTAG);
dmu_tx_commit(tx);
ztest_range_unlock(rl);
ztest_object_unlock(zd, lr->lr_foid);
return (0);
}
static int
ztest_replay_truncate(void *arg1, void *arg2, boolean_t byteswap)
{
ztest_ds_t *zd = arg1;
lr_truncate_t *lr = arg2;
objset_t *os = zd->zd_os;
dmu_tx_t *tx;
uint64_t txg;
rl_t *rl;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
ztest_object_lock(zd, lr->lr_foid, RL_READER);
rl = ztest_range_lock(zd, lr->lr_foid, lr->lr_offset, lr->lr_length,
RL_WRITER);
tx = dmu_tx_create(os);
dmu_tx_hold_free(tx, lr->lr_foid, lr->lr_offset, lr->lr_length);
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
if (txg == 0) {
ztest_range_unlock(rl);
ztest_object_unlock(zd, lr->lr_foid);
return (ENOSPC);
}
VERIFY0(dmu_free_range(os, lr->lr_foid, lr->lr_offset,
lr->lr_length, tx));
(void) ztest_log_truncate(zd, tx, lr);
dmu_tx_commit(tx);
ztest_range_unlock(rl);
ztest_object_unlock(zd, lr->lr_foid);
return (0);
}
static int
ztest_replay_setattr(void *arg1, void *arg2, boolean_t byteswap)
{
ztest_ds_t *zd = arg1;
lr_setattr_t *lr = arg2;
objset_t *os = zd->zd_os;
dmu_tx_t *tx;
dmu_buf_t *db;
ztest_block_tag_t *bbt;
uint64_t txg, lrtxg, crtxg, dnodesize;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
ztest_object_lock(zd, lr->lr_foid, RL_WRITER);
VERIFY0(dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
tx = dmu_tx_create(os);
dmu_tx_hold_bonus(tx, lr->lr_foid);
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
if (txg == 0) {
dmu_buf_rele(db, FTAG);
ztest_object_unlock(zd, lr->lr_foid);
return (ENOSPC);
}
bbt = ztest_bt_bonus(db);
ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
crtxg = bbt->bt_crtxg;
lrtxg = lr->lr_common.lrc_txg;
dnodesize = bbt->bt_dnodesize;
if (zd->zd_zilog->zl_replay) {
ASSERT3U(lr->lr_size, !=, 0);
ASSERT3U(lr->lr_mode, !=, 0);
ASSERT3U(lrtxg, !=, 0);
} else {
/*
* Randomly change the size and increment the generation.
*/
lr->lr_size = (ztest_random(db->db_size / sizeof (*bbt)) + 1) *
sizeof (*bbt);
lr->lr_mode = bbt->bt_gen + 1;
ASSERT0(lrtxg);
}
/*
* Verify that the current bonus buffer is not newer than our txg.
*/
ztest_bt_verify(bbt, os, lr->lr_foid, dnodesize, -1ULL, lr->lr_mode,
MAX(txg, lrtxg), crtxg);
dmu_buf_will_dirty(db, tx);
ASSERT3U(lr->lr_size, >=, sizeof (*bbt));
ASSERT3U(lr->lr_size, <=, db->db_size);
VERIFY0(dmu_set_bonus(db, lr->lr_size, tx));
bbt = ztest_bt_bonus(db);
ztest_bt_generate(bbt, os, lr->lr_foid, dnodesize, -1ULL, lr->lr_mode,
txg, crtxg);
ztest_fill_unused_bonus(db, bbt, lr->lr_foid, os, bbt->bt_gen);
dmu_buf_rele(db, FTAG);
(void) ztest_log_setattr(zd, tx, lr);
dmu_tx_commit(tx);
ztest_object_unlock(zd, lr->lr_foid);
return (0);
}
zil_replay_func_t *ztest_replay_vector[TX_MAX_TYPE] = {
NULL, /* 0 no such transaction type */
ztest_replay_create, /* TX_CREATE */
NULL, /* TX_MKDIR */
NULL, /* TX_MKXATTR */
NULL, /* TX_SYMLINK */
ztest_replay_remove, /* TX_REMOVE */
NULL, /* TX_RMDIR */
NULL, /* TX_LINK */
NULL, /* TX_RENAME */
ztest_replay_write, /* TX_WRITE */
ztest_replay_truncate, /* TX_TRUNCATE */
ztest_replay_setattr, /* TX_SETATTR */
NULL, /* TX_ACL */
NULL, /* TX_CREATE_ACL */
NULL, /* TX_CREATE_ATTR */
NULL, /* TX_CREATE_ACL_ATTR */
NULL, /* TX_MKDIR_ACL */
NULL, /* TX_MKDIR_ATTR */
NULL, /* TX_MKDIR_ACL_ATTR */
NULL, /* TX_WRITE2 */
};
/*
* ZIL get_data callbacks
*/
/* ARGSUSED */
static void
ztest_get_done(zgd_t *zgd, int error)
{
ztest_ds_t *zd = zgd->zgd_private;
uint64_t object = ((rl_t *)zgd->zgd_lr)->rl_object;
if (zgd->zgd_db)
dmu_buf_rele(zgd->zgd_db, zgd);
ztest_range_unlock((rl_t *)zgd->zgd_lr);
ztest_object_unlock(zd, object);
umem_free(zgd, sizeof (*zgd));
}
static int
ztest_get_data(void *arg, uint64_t arg2, lr_write_t *lr, char *buf,
struct lwb *lwb, zio_t *zio)
{
ztest_ds_t *zd = arg;
objset_t *os = zd->zd_os;
uint64_t object = lr->lr_foid;
uint64_t offset = lr->lr_offset;
uint64_t size = lr->lr_length;
uint64_t txg = lr->lr_common.lrc_txg;
uint64_t crtxg;
dmu_object_info_t doi;
dmu_buf_t *db;
zgd_t *zgd;
int error;
ASSERT3P(lwb, !=, NULL);
ASSERT3P(zio, !=, NULL);
ASSERT3U(size, !=, 0);
ztest_object_lock(zd, object, RL_READER);
error = dmu_bonus_hold(os, object, FTAG, &db);
if (error) {
ztest_object_unlock(zd, object);
return (error);
}
crtxg = ztest_bt_bonus(db)->bt_crtxg;
if (crtxg == 0 || crtxg > txg) {
dmu_buf_rele(db, FTAG);
ztest_object_unlock(zd, object);
return (ENOENT);
}
dmu_object_info_from_db(db, &doi);
dmu_buf_rele(db, FTAG);
db = NULL;
zgd = umem_zalloc(sizeof (*zgd), UMEM_NOFAIL);
zgd->zgd_lwb = lwb;
zgd->zgd_private = zd;
if (buf != NULL) { /* immediate write */
zgd->zgd_lr = (struct zfs_locked_range *)ztest_range_lock(zd,
object, offset, size, RL_READER);
error = dmu_read(os, object, offset, size, buf,
DMU_READ_NO_PREFETCH);
ASSERT0(error);
} else {
size = doi.doi_data_block_size;
if (ISP2(size)) {
offset = P2ALIGN(offset, size);
} else {
ASSERT3U(offset, <, size);
offset = 0;
}
zgd->zgd_lr = (struct zfs_locked_range *)ztest_range_lock(zd,
object, offset, size, RL_READER);
error = dmu_buf_hold(os, object, offset, zgd, &db,
DMU_READ_NO_PREFETCH);
if (error == 0) {
blkptr_t *bp = &lr->lr_blkptr;
zgd->zgd_db = db;
zgd->zgd_bp = bp;
ASSERT3U(db->db_offset, ==, offset);
ASSERT3U(db->db_size, ==, size);
error = dmu_sync(zio, lr->lr_common.lrc_txg,
ztest_get_done, zgd);
if (error == 0)
return (0);
}
}
ztest_get_done(zgd, error);
return (error);
}
static void *
ztest_lr_alloc(size_t lrsize, char *name)
{
char *lr;
size_t namesize = name ? strlen(name) + 1 : 0;
lr = umem_zalloc(lrsize + namesize, UMEM_NOFAIL);
if (name)
bcopy(name, lr + lrsize, namesize);
return (lr);
}
static void
ztest_lr_free(void *lr, size_t lrsize, char *name)
{
size_t namesize = name ? strlen(name) + 1 : 0;
umem_free(lr, lrsize + namesize);
}
/*
* Lookup a bunch of objects. Returns the number of objects not found.
*/
static int
ztest_lookup(ztest_ds_t *zd, ztest_od_t *od, int count)
{
int missing = 0;
int error;
int i;
ASSERT(MUTEX_HELD(&zd->zd_dirobj_lock));
for (i = 0; i < count; i++, od++) {
od->od_object = 0;
error = zap_lookup(zd->zd_os, od->od_dir, od->od_name,
sizeof (uint64_t), 1, &od->od_object);
if (error) {
ASSERT3S(error, ==, ENOENT);
ASSERT0(od->od_object);
missing++;
} else {
dmu_buf_t *db;
ztest_block_tag_t *bbt;
dmu_object_info_t doi;
ASSERT3U(od->od_object, !=, 0);
ASSERT0(missing); /* there should be no gaps */
ztest_object_lock(zd, od->od_object, RL_READER);
VERIFY0(dmu_bonus_hold(zd->zd_os, od->od_object,
FTAG, &db));
dmu_object_info_from_db(db, &doi);
bbt = ztest_bt_bonus(db);
ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
od->od_type = doi.doi_type;
od->od_blocksize = doi.doi_data_block_size;
od->od_gen = bbt->bt_gen;
dmu_buf_rele(db, FTAG);
ztest_object_unlock(zd, od->od_object);
}
}
return (missing);
}
static int
ztest_create(ztest_ds_t *zd, ztest_od_t *od, int count)
{
int missing = 0;
int i;
ASSERT(MUTEX_HELD(&zd->zd_dirobj_lock));
for (i = 0; i < count; i++, od++) {
if (missing) {
od->od_object = 0;
missing++;
continue;
}
lr_create_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name);
lr->lr_doid = od->od_dir;
lr->lr_foid = 0; /* 0 to allocate, > 0 to claim */
lr->lrz_type = od->od_crtype;
lr->lrz_blocksize = od->od_crblocksize;
lr->lrz_ibshift = ztest_random_ibshift();
lr->lrz_bonustype = DMU_OT_UINT64_OTHER;
lr->lrz_dnodesize = od->od_crdnodesize;
lr->lr_gen = od->od_crgen;
lr->lr_crtime[0] = time(NULL);
if (ztest_replay_create(zd, lr, B_FALSE) != 0) {
ASSERT0(missing);
od->od_object = 0;
missing++;
} else {
od->od_object = lr->lr_foid;
od->od_type = od->od_crtype;
od->od_blocksize = od->od_crblocksize;
od->od_gen = od->od_crgen;
ASSERT3U(od->od_object, !=, 0);
}
ztest_lr_free(lr, sizeof (*lr), od->od_name);
}
return (missing);
}
static int
ztest_remove(ztest_ds_t *zd, ztest_od_t *od, int count)
{
int missing = 0;
int error;
int i;
ASSERT(MUTEX_HELD(&zd->zd_dirobj_lock));
od += count - 1;
for (i = count - 1; i >= 0; i--, od--) {
if (missing) {
missing++;
continue;
}
/*
* No object was found.
*/
if (od->od_object == 0)
continue;
lr_remove_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name);
lr->lr_doid = od->od_dir;
if ((error = ztest_replay_remove(zd, lr, B_FALSE)) != 0) {
ASSERT3U(error, ==, ENOSPC);
missing++;
} else {
od->od_object = 0;
}
ztest_lr_free(lr, sizeof (*lr), od->od_name);
}
return (missing);
}
static int
ztest_write(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size,
void *data)
{
lr_write_t *lr;
int error;
lr = ztest_lr_alloc(sizeof (*lr) + size, NULL);
lr->lr_foid = object;
lr->lr_offset = offset;
lr->lr_length = size;
lr->lr_blkoff = 0;
BP_ZERO(&lr->lr_blkptr);
bcopy(data, lr + 1, size);
error = ztest_replay_write(zd, lr, B_FALSE);
ztest_lr_free(lr, sizeof (*lr) + size, NULL);
return (error);
}
static int
ztest_truncate(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size)
{
lr_truncate_t *lr;
int error;
lr = ztest_lr_alloc(sizeof (*lr), NULL);
lr->lr_foid = object;
lr->lr_offset = offset;
lr->lr_length = size;
error = ztest_replay_truncate(zd, lr, B_FALSE);
ztest_lr_free(lr, sizeof (*lr), NULL);
return (error);
}
static int
ztest_setattr(ztest_ds_t *zd, uint64_t object)
{
lr_setattr_t *lr;
int error;
lr = ztest_lr_alloc(sizeof (*lr), NULL);
lr->lr_foid = object;
lr->lr_size = 0;
lr->lr_mode = 0;
error = ztest_replay_setattr(zd, lr, B_FALSE);
ztest_lr_free(lr, sizeof (*lr), NULL);
return (error);
}
static void
ztest_prealloc(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size)
{
objset_t *os = zd->zd_os;
dmu_tx_t *tx;
uint64_t txg;
rl_t *rl;
txg_wait_synced(dmu_objset_pool(os), 0);
ztest_object_lock(zd, object, RL_READER);
rl = ztest_range_lock(zd, object, offset, size, RL_WRITER);
tx = dmu_tx_create(os);
dmu_tx_hold_write(tx, object, offset, size);
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
if (txg != 0) {
dmu_prealloc(os, object, offset, size, tx);
dmu_tx_commit(tx);
txg_wait_synced(dmu_objset_pool(os), txg);
} else {
(void) dmu_free_long_range(os, object, offset, size);
}
ztest_range_unlock(rl);
ztest_object_unlock(zd, object);
}
static void
ztest_io(ztest_ds_t *zd, uint64_t object, uint64_t offset)
{
int err;
ztest_block_tag_t wbt;
dmu_object_info_t doi;
enum ztest_io_type io_type;
uint64_t blocksize;
void *data;
VERIFY0(dmu_object_info(zd->zd_os, object, &doi));
blocksize = doi.doi_data_block_size;
data = umem_alloc(blocksize, UMEM_NOFAIL);
/*
* Pick an i/o type at random, biased toward writing block tags.
*/
io_type = ztest_random(ZTEST_IO_TYPES);
if (ztest_random(2) == 0)
io_type = ZTEST_IO_WRITE_TAG;
(void) pthread_rwlock_rdlock(&zd->zd_zilog_lock);
switch (io_type) {
case ZTEST_IO_WRITE_TAG:
ztest_bt_generate(&wbt, zd->zd_os, object, doi.doi_dnodesize,
offset, 0, 0, 0);
(void) ztest_write(zd, object, offset, sizeof (wbt), &wbt);
break;
case ZTEST_IO_WRITE_PATTERN:
(void) memset(data, 'a' + (object + offset) % 5, blocksize);
if (ztest_random(2) == 0) {
/*
* Induce fletcher2 collisions to ensure that
* zio_ddt_collision() detects and resolves them
* when using fletcher2-verify for deduplication.
*/
((uint64_t *)data)[0] ^= 1ULL << 63;
((uint64_t *)data)[4] ^= 1ULL << 63;
}
(void) ztest_write(zd, object, offset, blocksize, data);
break;
case ZTEST_IO_WRITE_ZEROES:
bzero(data, blocksize);
(void) ztest_write(zd, object, offset, blocksize, data);
break;
case ZTEST_IO_TRUNCATE:
(void) ztest_truncate(zd, object, offset, blocksize);
break;
case ZTEST_IO_SETATTR:
(void) ztest_setattr(zd, object);
break;
default:
break;
case ZTEST_IO_REWRITE:
(void) pthread_rwlock_rdlock(&ztest_name_lock);
err = ztest_dsl_prop_set_uint64(zd->zd_name,
ZFS_PROP_CHECKSUM, spa_dedup_checksum(ztest_spa),
B_FALSE);
VERIFY(err == 0 || err == ENOSPC);
err = ztest_dsl_prop_set_uint64(zd->zd_name,
ZFS_PROP_COMPRESSION,
ztest_random_dsl_prop(ZFS_PROP_COMPRESSION),
B_FALSE);
VERIFY(err == 0 || err == ENOSPC);
(void) pthread_rwlock_unlock(&ztest_name_lock);
VERIFY0(dmu_read(zd->zd_os, object, offset, blocksize, data,
DMU_READ_NO_PREFETCH));
(void) ztest_write(zd, object, offset, blocksize, data);
break;
}
(void) pthread_rwlock_unlock(&zd->zd_zilog_lock);
umem_free(data, blocksize);
}
/*
* Initialize an object description template.
*/
static void
ztest_od_init(ztest_od_t *od, uint64_t id, char *tag, uint64_t index,
dmu_object_type_t type, uint64_t blocksize, uint64_t dnodesize,
uint64_t gen)
{
od->od_dir = ZTEST_DIROBJ;
od->od_object = 0;
od->od_crtype = type;
od->od_crblocksize = blocksize ? blocksize : ztest_random_blocksize();
od->od_crdnodesize = dnodesize ? dnodesize : ztest_random_dnodesize();
od->od_crgen = gen;
od->od_type = DMU_OT_NONE;
od->od_blocksize = 0;
od->od_gen = 0;
- (void) snprintf(od->od_name, sizeof (od->od_name), "%s(%lld)[%llu]",
- tag, (longlong_t)id, (u_longlong_t)index);
+ (void) snprintf(od->od_name, sizeof (od->od_name),
+ "%s(%"PRId64")[%"PRIu64"]",
+ tag, id, index);
}
/*
* Lookup or create the objects for a test using the od template.
* If the objects do not all exist, or if 'remove' is specified,
* remove any existing objects and create new ones. Otherwise,
* use the existing objects.
*/
static int
ztest_object_init(ztest_ds_t *zd, ztest_od_t *od, size_t size, boolean_t remove)
{
int count = size / sizeof (*od);
int rv = 0;
mutex_enter(&zd->zd_dirobj_lock);
if ((ztest_lookup(zd, od, count) != 0 || remove) &&
(ztest_remove(zd, od, count) != 0 ||
ztest_create(zd, od, count) != 0))
rv = -1;
zd->zd_od = od;
mutex_exit(&zd->zd_dirobj_lock);
return (rv);
}
/* ARGSUSED */
void
ztest_zil_commit(ztest_ds_t *zd, uint64_t id)
{
zilog_t *zilog = zd->zd_zilog;
(void) pthread_rwlock_rdlock(&zd->zd_zilog_lock);
zil_commit(zilog, ztest_random(ZTEST_OBJECTS));
/*
* Remember the committed values in zd, which is in parent/child
* shared memory. If we die, the next iteration of ztest_run()
* will verify that the log really does contain this record.
*/
mutex_enter(&zilog->zl_lock);
ASSERT3P(zd->zd_shared, !=, NULL);
ASSERT3U(zd->zd_shared->zd_seq, <=, zilog->zl_commit_lr_seq);
zd->zd_shared->zd_seq = zilog->zl_commit_lr_seq;
mutex_exit(&zilog->zl_lock);
(void) pthread_rwlock_unlock(&zd->zd_zilog_lock);
}
/*
* This function is designed to simulate the operations that occur during a
* mount/unmount operation. We hold the dataset across these operations in an
* attempt to expose any implicit assumptions about ZIL management.
*/
/* ARGSUSED */
void
ztest_zil_remount(ztest_ds_t *zd, uint64_t id)
{
objset_t *os = zd->zd_os;
/*
* We hold the ztest_vdev_lock so we don't cause problems with
* other threads that wish to remove a log device, such as
* ztest_device_removal().
*/
mutex_enter(&ztest_vdev_lock);
/*
* We grab the zd_dirobj_lock to ensure that no other thread is
* updating the zil (i.e. adding in-memory log records) and the
* zd_zilog_lock to block any I/O.
*/
mutex_enter(&zd->zd_dirobj_lock);
(void) pthread_rwlock_wrlock(&zd->zd_zilog_lock);
/* zfsvfs_teardown() */
zil_close(zd->zd_zilog);
/* zfsvfs_setup() */
VERIFY3P(zil_open(os, ztest_get_data), ==, zd->zd_zilog);
zil_replay(os, zd, ztest_replay_vector);
(void) pthread_rwlock_unlock(&zd->zd_zilog_lock);
mutex_exit(&zd->zd_dirobj_lock);
mutex_exit(&ztest_vdev_lock);
}
/*
* Verify that we can't destroy an active pool, create an existing pool,
* or create a pool with a bad vdev spec.
*/
/* ARGSUSED */
void
ztest_spa_create_destroy(ztest_ds_t *zd, uint64_t id)
{
ztest_shared_opts_t *zo = &ztest_opts;
spa_t *spa;
nvlist_t *nvroot;
if (zo->zo_mmp_test)
return;
/*
* Attempt to create using a bad file.
*/
nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, NULL, 0, 0, 1);
VERIFY3U(ENOENT, ==,
spa_create("ztest_bad_file", nvroot, NULL, NULL, NULL));
fnvlist_free(nvroot);
/*
* Attempt to create using a bad mirror.
*/
nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, NULL, 0, 2, 1);
VERIFY3U(ENOENT, ==,
spa_create("ztest_bad_mirror", nvroot, NULL, NULL, NULL));
fnvlist_free(nvroot);
/*
* Attempt to create an existing pool. It shouldn't matter
* what's in the nvroot; we should fail with EEXIST.
*/
(void) pthread_rwlock_rdlock(&ztest_name_lock);
nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, NULL, 0, 0, 1);
VERIFY3U(EEXIST, ==,
spa_create(zo->zo_pool, nvroot, NULL, NULL, NULL));
fnvlist_free(nvroot);
/*
* We open a reference to the spa and then we try to export it
* expecting one of the following errors:
*
* EBUSY
* Because of the reference we just opened.
*
* ZFS_ERR_EXPORT_IN_PROGRESS
* For the case that there is another ztest thread doing
* an export concurrently.
*/
VERIFY0(spa_open(zo->zo_pool, &spa, FTAG));
int error = spa_destroy(zo->zo_pool);
if (error != EBUSY && error != ZFS_ERR_EXPORT_IN_PROGRESS) {
- fatal(0, "spa_destroy(%s) returned unexpected value %d",
+ fatal(B_FALSE, "spa_destroy(%s) returned unexpected value %d",
spa->spa_name, error);
}
spa_close(spa, FTAG);
(void) pthread_rwlock_unlock(&ztest_name_lock);
}
/*
* Start and then stop the MMP threads to ensure the startup and shutdown code
* works properly. Actual protection and property-related code tested via ZTS.
*/
/* ARGSUSED */
void
ztest_mmp_enable_disable(ztest_ds_t *zd, uint64_t id)
{
ztest_shared_opts_t *zo = &ztest_opts;
spa_t *spa = ztest_spa;
if (zo->zo_mmp_test)
return;
/*
* Since enabling MMP involves setting a property, it could not be done
* while the pool is suspended.
*/
if (spa_suspended(spa))
return;
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
mutex_enter(&spa->spa_props_lock);
zfs_multihost_fail_intervals = 0;
if (!spa_multihost(spa)) {
spa->spa_multihost = B_TRUE;
mmp_thread_start(spa);
}
mutex_exit(&spa->spa_props_lock);
spa_config_exit(spa, SCL_CONFIG, FTAG);
txg_wait_synced(spa_get_dsl(spa), 0);
mmp_signal_all_threads();
txg_wait_synced(spa_get_dsl(spa), 0);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
mutex_enter(&spa->spa_props_lock);
if (spa_multihost(spa)) {
mmp_thread_stop(spa);
spa->spa_multihost = B_FALSE;
}
mutex_exit(&spa->spa_props_lock);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
/* ARGSUSED */
void
ztest_spa_upgrade(ztest_ds_t *zd, uint64_t id)
{
spa_t *spa;
uint64_t initial_version = SPA_VERSION_INITIAL;
uint64_t version, newversion;
nvlist_t *nvroot, *props;
char *name;
if (ztest_opts.zo_mmp_test)
return;
/* dRAID added after feature flags, skip upgrade test. */
if (strcmp(ztest_opts.zo_raid_type, VDEV_TYPE_DRAID) == 0)
return;
mutex_enter(&ztest_vdev_lock);
name = kmem_asprintf("%s_upgrade", ztest_opts.zo_pool);
/*
* Clean up from previous runs.
*/
(void) spa_destroy(name);
nvroot = make_vdev_root(NULL, NULL, name, ztest_opts.zo_vdev_size, 0,
NULL, ztest_opts.zo_raid_children, ztest_opts.zo_mirrors, 1);
/*
* If we're configuring a RAIDZ device then make sure that the
* initial version is capable of supporting that feature.
*/
switch (ztest_opts.zo_raid_parity) {
case 0:
case 1:
initial_version = SPA_VERSION_INITIAL;
break;
case 2:
initial_version = SPA_VERSION_RAIDZ2;
break;
case 3:
initial_version = SPA_VERSION_RAIDZ3;
break;
}
/*
* Create a pool with a spa version that can be upgraded. Pick
* a value between initial_version and SPA_VERSION_BEFORE_FEATURES.
*/
do {
version = ztest_random_spa_version(initial_version);
} while (version > SPA_VERSION_BEFORE_FEATURES);
props = fnvlist_alloc();
fnvlist_add_uint64(props,
zpool_prop_to_name(ZPOOL_PROP_VERSION), version);
VERIFY0(spa_create(name, nvroot, props, NULL, NULL));
fnvlist_free(nvroot);
fnvlist_free(props);
VERIFY0(spa_open(name, &spa, FTAG));
VERIFY3U(spa_version(spa), ==, version);
newversion = ztest_random_spa_version(version + 1);
if (ztest_opts.zo_verbose >= 4) {
- (void) printf("upgrading spa version from %llu to %llu\n",
- (u_longlong_t)version, (u_longlong_t)newversion);
+ (void) printf("upgrading spa version from "
+ "%"PRIu64" to %"PRIu64"\n",
+ version, newversion);
}
spa_upgrade(spa, newversion);
VERIFY3U(spa_version(spa), >, version);
VERIFY3U(spa_version(spa), ==, fnvlist_lookup_uint64(spa->spa_config,
zpool_prop_to_name(ZPOOL_PROP_VERSION)));
spa_close(spa, FTAG);
kmem_strfree(name);
mutex_exit(&ztest_vdev_lock);
}
static void
ztest_spa_checkpoint(spa_t *spa)
{
ASSERT(MUTEX_HELD(&ztest_checkpoint_lock));
int error = spa_checkpoint(spa->spa_name);
switch (error) {
case 0:
case ZFS_ERR_DEVRM_IN_PROGRESS:
case ZFS_ERR_DISCARDING_CHECKPOINT:
case ZFS_ERR_CHECKPOINT_EXISTS:
break;
case ENOSPC:
ztest_record_enospc(FTAG);
break;
default:
- fatal(0, "spa_checkpoint(%s) = %d", spa->spa_name, error);
+ fatal(B_FALSE, "spa_checkpoint(%s) = %d", spa->spa_name, error);
}
}
static void
ztest_spa_discard_checkpoint(spa_t *spa)
{
ASSERT(MUTEX_HELD(&ztest_checkpoint_lock));
int error = spa_checkpoint_discard(spa->spa_name);
switch (error) {
case 0:
case ZFS_ERR_DISCARDING_CHECKPOINT:
case ZFS_ERR_NO_CHECKPOINT:
break;
default:
- fatal(0, "spa_discard_checkpoint(%s) = %d",
+ fatal(B_FALSE, "spa_discard_checkpoint(%s) = %d",
spa->spa_name, error);
}
}
/* ARGSUSED */
void
ztest_spa_checkpoint_create_discard(ztest_ds_t *zd, uint64_t id)
{
spa_t *spa = ztest_spa;
mutex_enter(&ztest_checkpoint_lock);
if (ztest_random(2) == 0) {
ztest_spa_checkpoint(spa);
} else {
ztest_spa_discard_checkpoint(spa);
}
mutex_exit(&ztest_checkpoint_lock);
}
static vdev_t *
vdev_lookup_by_path(vdev_t *vd, const char *path)
{
vdev_t *mvd;
int c;
if (vd->vdev_path != NULL && strcmp(path, vd->vdev_path) == 0)
return (vd);
for (c = 0; c < vd->vdev_children; c++)
if ((mvd = vdev_lookup_by_path(vd->vdev_child[c], path)) !=
NULL)
return (mvd);
return (NULL);
}
static int
spa_num_top_vdevs(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
ASSERT3U(spa_config_held(spa, SCL_VDEV, RW_READER), ==, SCL_VDEV);
return (rvd->vdev_children);
}
/*
* Verify that vdev_add() works as expected.
*/
/* ARGSUSED */
void
ztest_vdev_add_remove(ztest_ds_t *zd, uint64_t id)
{
ztest_shared_t *zs = ztest_shared;
spa_t *spa = ztest_spa;
uint64_t leaves;
uint64_t guid;
nvlist_t *nvroot;
int error;
if (ztest_opts.zo_mmp_test)
return;
mutex_enter(&ztest_vdev_lock);
leaves = MAX(zs->zs_mirrors + zs->zs_splits, 1) *
ztest_opts.zo_raid_children;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
ztest_shared->zs_vdev_next_leaf = spa_num_top_vdevs(spa) * leaves;
/*
* If we have slogs then remove them 1/4 of the time.
*/
if (spa_has_slogs(spa) && ztest_random(4) == 0) {
metaslab_group_t *mg;
/*
* find the first real slog in log allocation class
*/
mg = spa_log_class(spa)->mc_allocator[0].mca_rotor;
while (!mg->mg_vd->vdev_islog)
mg = mg->mg_next;
guid = mg->mg_vd->vdev_guid;
spa_config_exit(spa, SCL_VDEV, FTAG);
/*
* We have to grab the zs_name_lock as writer to
* prevent a race between removing a slog (dmu_objset_find)
* and destroying a dataset. Removing the slog will
* grab a reference on the dataset which may cause
* dsl_destroy_head() to fail with EBUSY thus
* leaving the dataset in an inconsistent state.
*/
pthread_rwlock_wrlock(&ztest_name_lock);
error = spa_vdev_remove(spa, guid, B_FALSE);
pthread_rwlock_unlock(&ztest_name_lock);
switch (error) {
case 0:
case EEXIST: /* Generic zil_reset() error */
case EBUSY: /* Replay required */
case EACCES: /* Crypto key not loaded */
case ZFS_ERR_CHECKPOINT_EXISTS:
case ZFS_ERR_DISCARDING_CHECKPOINT:
break;
default:
- fatal(0, "spa_vdev_remove() = %d", error);
+ fatal(B_FALSE, "spa_vdev_remove() = %d", error);
}
} else {
spa_config_exit(spa, SCL_VDEV, FTAG);
/*
* Make 1/4 of the devices be log devices
*/
nvroot = make_vdev_root(NULL, NULL, NULL,
ztest_opts.zo_vdev_size, 0, (ztest_random(4) == 0) ?
"log" : NULL, ztest_opts.zo_raid_children, zs->zs_mirrors,
1);
error = spa_vdev_add(spa, nvroot);
fnvlist_free(nvroot);
switch (error) {
case 0:
break;
case ENOSPC:
ztest_record_enospc("spa_vdev_add");
break;
default:
- fatal(0, "spa_vdev_add() = %d", error);
+ fatal(B_FALSE, "spa_vdev_add() = %d", error);
}
}
mutex_exit(&ztest_vdev_lock);
}
/* ARGSUSED */
void
ztest_vdev_class_add(ztest_ds_t *zd, uint64_t id)
{
ztest_shared_t *zs = ztest_shared;
spa_t *spa = ztest_spa;
uint64_t leaves;
nvlist_t *nvroot;
const char *class = (ztest_random(2) == 0) ?
VDEV_ALLOC_BIAS_SPECIAL : VDEV_ALLOC_BIAS_DEDUP;
int error;
/*
* By default add a special vdev 50% of the time
*/
if ((ztest_opts.zo_special_vdevs == ZTEST_VDEV_CLASS_OFF) ||
(ztest_opts.zo_special_vdevs == ZTEST_VDEV_CLASS_RND &&
ztest_random(2) == 0)) {
return;
}
mutex_enter(&ztest_vdev_lock);
/* Only test with mirrors */
if (zs->zs_mirrors < 2) {
mutex_exit(&ztest_vdev_lock);
return;
}
/* requires feature@allocation_classes */
if (!spa_feature_is_enabled(spa, SPA_FEATURE_ALLOCATION_CLASSES)) {
mutex_exit(&ztest_vdev_lock);
return;
}
leaves = MAX(zs->zs_mirrors + zs->zs_splits, 1) *
ztest_opts.zo_raid_children;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
ztest_shared->zs_vdev_next_leaf = spa_num_top_vdevs(spa) * leaves;
spa_config_exit(spa, SCL_VDEV, FTAG);
nvroot = make_vdev_root(NULL, NULL, NULL, ztest_opts.zo_vdev_size, 0,
class, ztest_opts.zo_raid_children, zs->zs_mirrors, 1);
error = spa_vdev_add(spa, nvroot);
fnvlist_free(nvroot);
if (error == ENOSPC)
ztest_record_enospc("spa_vdev_add");
else if (error != 0)
- fatal(0, "spa_vdev_add() = %d", error);
+ fatal(B_FALSE, "spa_vdev_add() = %d", error);
/*
* 50% of the time allow small blocks in the special class
*/
if (error == 0 &&
spa_special_class(spa)->mc_groups == 1 && ztest_random(2) == 0) {
if (ztest_opts.zo_verbose >= 3)
(void) printf("Enabling special VDEV small blocks\n");
(void) ztest_dsl_prop_set_uint64(zd->zd_name,
ZFS_PROP_SPECIAL_SMALL_BLOCKS, 32768, B_FALSE);
}
mutex_exit(&ztest_vdev_lock);
if (ztest_opts.zo_verbose >= 3) {
metaslab_class_t *mc;
if (strcmp(class, VDEV_ALLOC_BIAS_SPECIAL) == 0)
mc = spa_special_class(spa);
else
mc = spa_dedup_class(spa);
(void) printf("Added a %s mirrored vdev (of %d)\n",
class, (int)mc->mc_groups);
}
}
/*
* Verify that adding/removing aux devices (l2arc, hot spare) works as expected.
*/
/* ARGSUSED */
void
ztest_vdev_aux_add_remove(ztest_ds_t *zd, uint64_t id)
{
ztest_shared_t *zs = ztest_shared;
spa_t *spa = ztest_spa;
vdev_t *rvd = spa->spa_root_vdev;
spa_aux_vdev_t *sav;
char *aux;
char *path;
uint64_t guid = 0;
int error, ignore_err = 0;
if (ztest_opts.zo_mmp_test)
return;
path = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
if (ztest_random(2) == 0) {
sav = &spa->spa_spares;
aux = ZPOOL_CONFIG_SPARES;
} else {
sav = &spa->spa_l2cache;
aux = ZPOOL_CONFIG_L2CACHE;
}
mutex_enter(&ztest_vdev_lock);
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
if (sav->sav_count != 0 && ztest_random(4) == 0) {
/*
* Pick a random device to remove.
*/
vdev_t *svd = sav->sav_vdevs[ztest_random(sav->sav_count)];
/* dRAID spares cannot be removed; try anyways to see ENOTSUP */
if (strstr(svd->vdev_path, VDEV_TYPE_DRAID) != NULL)
ignore_err = ENOTSUP;
guid = svd->vdev_guid;
} else {
/*
* Find an unused device we can add.
*/
zs->zs_vdev_aux = 0;
for (;;) {
int c;
(void) snprintf(path, MAXPATHLEN, ztest_aux_template,
ztest_opts.zo_dir, ztest_opts.zo_pool, aux,
zs->zs_vdev_aux);
for (c = 0; c < sav->sav_count; c++)
if (strcmp(sav->sav_vdevs[c]->vdev_path,
path) == 0)
break;
if (c == sav->sav_count &&
vdev_lookup_by_path(rvd, path) == NULL)
break;
zs->zs_vdev_aux++;
}
}
spa_config_exit(spa, SCL_VDEV, FTAG);
if (guid == 0) {
/*
* Add a new device.
*/
nvlist_t *nvroot = make_vdev_root(NULL, aux, NULL,
(ztest_opts.zo_vdev_size * 5) / 4, 0, NULL, 0, 0, 1);
error = spa_vdev_add(spa, nvroot);
switch (error) {
case 0:
break;
default:
- fatal(0, "spa_vdev_add(%p) = %d", nvroot, error);
+ fatal(B_FALSE, "spa_vdev_add(%p) = %d", nvroot, error);
}
fnvlist_free(nvroot);
} else {
/*
* Remove an existing device. Sometimes, dirty its
* vdev state first to make sure we handle removal
* of devices that have pending state changes.
*/
if (ztest_random(2) == 0)
(void) vdev_online(spa, guid, 0, NULL);
error = spa_vdev_remove(spa, guid, B_FALSE);
switch (error) {
case 0:
case EBUSY:
case ZFS_ERR_CHECKPOINT_EXISTS:
case ZFS_ERR_DISCARDING_CHECKPOINT:
break;
default:
if (error != ignore_err)
- fatal(0, "spa_vdev_remove(%llu) = %d", guid,
- error);
+ fatal(B_FALSE,
+ "spa_vdev_remove(%"PRIu64") = %d",
+ guid, error);
}
}
mutex_exit(&ztest_vdev_lock);
umem_free(path, MAXPATHLEN);
}
/*
* split a pool if it has mirror tlvdevs
*/
/* ARGSUSED */
void
ztest_split_pool(ztest_ds_t *zd, uint64_t id)
{
ztest_shared_t *zs = ztest_shared;
spa_t *spa = ztest_spa;
vdev_t *rvd = spa->spa_root_vdev;
nvlist_t *tree, **child, *config, *split, **schild;
uint_t c, children, schildren = 0, lastlogid = 0;
int error = 0;
if (ztest_opts.zo_mmp_test)
return;
mutex_enter(&ztest_vdev_lock);
/* ensure we have a usable config; mirrors of raidz aren't supported */
if (zs->zs_mirrors < 3 || ztest_opts.zo_raid_children > 1) {
mutex_exit(&ztest_vdev_lock);
return;
}
/* clean up the old pool, if any */
(void) spa_destroy("splitp");
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
/* generate a config from the existing config */
mutex_enter(&spa->spa_props_lock);
tree = fnvlist_lookup_nvlist(spa->spa_config, ZPOOL_CONFIG_VDEV_TREE);
mutex_exit(&spa->spa_props_lock);
VERIFY0(nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN,
&child, &children));
schild = malloc(rvd->vdev_children * sizeof (nvlist_t *));
for (c = 0; c < children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
nvlist_t **mchild;
uint_t mchildren;
if (tvd->vdev_islog || tvd->vdev_ops == &vdev_hole_ops) {
schild[schildren] = fnvlist_alloc();
fnvlist_add_string(schild[schildren],
ZPOOL_CONFIG_TYPE, VDEV_TYPE_HOLE);
fnvlist_add_uint64(schild[schildren],
ZPOOL_CONFIG_IS_HOLE, 1);
if (lastlogid == 0)
lastlogid = schildren;
++schildren;
continue;
}
lastlogid = 0;
VERIFY0(nvlist_lookup_nvlist_array(child[c],
ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren));
schild[schildren++] = fnvlist_dup(mchild[0]);
}
/* OK, create a config that can be used to split */
split = fnvlist_alloc();
fnvlist_add_string(split, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT);
fnvlist_add_nvlist_array(split, ZPOOL_CONFIG_CHILDREN, schild,
lastlogid != 0 ? lastlogid : schildren);
config = fnvlist_alloc();
fnvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, split);
for (c = 0; c < schildren; c++)
fnvlist_free(schild[c]);
free(schild);
fnvlist_free(split);
spa_config_exit(spa, SCL_VDEV, FTAG);
(void) pthread_rwlock_wrlock(&ztest_name_lock);
error = spa_vdev_split_mirror(spa, "splitp", config, NULL, B_FALSE);
(void) pthread_rwlock_unlock(&ztest_name_lock);
fnvlist_free(config);
if (error == 0) {
(void) printf("successful split - results:\n");
mutex_enter(&spa_namespace_lock);
show_pool_stats(spa);
show_pool_stats(spa_lookup("splitp"));
mutex_exit(&spa_namespace_lock);
++zs->zs_splits;
--zs->zs_mirrors;
}
mutex_exit(&ztest_vdev_lock);
}
/*
* Verify that we can attach and detach devices.
*/
/* ARGSUSED */
void
ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id)
{
ztest_shared_t *zs = ztest_shared;
spa_t *spa = ztest_spa;
spa_aux_vdev_t *sav = &spa->spa_spares;
vdev_t *rvd = spa->spa_root_vdev;
vdev_t *oldvd, *newvd, *pvd;
nvlist_t *root;
uint64_t leaves;
uint64_t leaf, top;
uint64_t ashift = ztest_get_ashift();
uint64_t oldguid, pguid;
uint64_t oldsize, newsize;
char *oldpath, *newpath;
int replacing;
int oldvd_has_siblings = B_FALSE;
int newvd_is_spare = B_FALSE;
int newvd_is_dspare = B_FALSE;
int oldvd_is_log;
int error, expected_error;
if (ztest_opts.zo_mmp_test)
return;
oldpath = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
newpath = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
mutex_enter(&ztest_vdev_lock);
leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raid_children;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
/*
* If a vdev is in the process of being removed, its removal may
* finish while we are in progress, leading to an unexpected error
* value. Don't bother trying to attach while we are in the middle
* of removal.
*/
if (ztest_device_removal_active) {
spa_config_exit(spa, SCL_ALL, FTAG);
goto out;
}
/*
* Decide whether to do an attach or a replace.
*/
replacing = ztest_random(2);
/*
* Pick a random top-level vdev.
*/
top = ztest_random_vdev_top(spa, B_TRUE);
/*
* Pick a random leaf within it.
*/
leaf = ztest_random(leaves);
/*
* Locate this vdev.
*/
oldvd = rvd->vdev_child[top];
/* pick a child from the mirror */
if (zs->zs_mirrors >= 1) {
ASSERT3P(oldvd->vdev_ops, ==, &vdev_mirror_ops);
ASSERT3U(oldvd->vdev_children, >=, zs->zs_mirrors);
oldvd = oldvd->vdev_child[leaf / ztest_opts.zo_raid_children];
}
/* pick a child out of the raidz group */
if (ztest_opts.zo_raid_children > 1) {
if (strcmp(oldvd->vdev_ops->vdev_op_type, "raidz") == 0)
ASSERT3P(oldvd->vdev_ops, ==, &vdev_raidz_ops);
else
ASSERT3P(oldvd->vdev_ops, ==, &vdev_draid_ops);
ASSERT3U(oldvd->vdev_children, ==, ztest_opts.zo_raid_children);
oldvd = oldvd->vdev_child[leaf % ztest_opts.zo_raid_children];
}
/*
* If we're already doing an attach or replace, oldvd may be a
* mirror vdev -- in which case, pick a random child.
*/
while (oldvd->vdev_children != 0) {
oldvd_has_siblings = B_TRUE;
ASSERT3U(oldvd->vdev_children, >=, 2);
oldvd = oldvd->vdev_child[ztest_random(oldvd->vdev_children)];
}
oldguid = oldvd->vdev_guid;
oldsize = vdev_get_min_asize(oldvd);
oldvd_is_log = oldvd->vdev_top->vdev_islog;
(void) strcpy(oldpath, oldvd->vdev_path);
pvd = oldvd->vdev_parent;
pguid = pvd->vdev_guid;
/*
* If oldvd has siblings, then half of the time, detach it. Prior
* to the detach the pool is scrubbed in order to prevent creating
* unrepairable blocks as a result of the data corruption injection.
*/
if (oldvd_has_siblings && ztest_random(2) == 0) {
spa_config_exit(spa, SCL_ALL, FTAG);
error = ztest_scrub_impl(spa);
if (error)
goto out;
error = spa_vdev_detach(spa, oldguid, pguid, B_FALSE);
if (error != 0 && error != ENODEV && error != EBUSY &&
error != ENOTSUP && error != ZFS_ERR_CHECKPOINT_EXISTS &&
error != ZFS_ERR_DISCARDING_CHECKPOINT)
- fatal(0, "detach (%s) returned %d", oldpath, error);
+ fatal(B_FALSE, "detach (%s) returned %d",
+ oldpath, error);
goto out;
}
/*
* For the new vdev, choose with equal probability between the two
* standard paths (ending in either 'a' or 'b') or a random hot spare.
*/
if (sav->sav_count != 0 && ztest_random(3) == 0) {
newvd = sav->sav_vdevs[ztest_random(sav->sav_count)];
newvd_is_spare = B_TRUE;
if (newvd->vdev_ops == &vdev_draid_spare_ops)
newvd_is_dspare = B_TRUE;
(void) strcpy(newpath, newvd->vdev_path);
} else {
(void) snprintf(newpath, MAXPATHLEN, ztest_dev_template,
ztest_opts.zo_dir, ztest_opts.zo_pool,
top * leaves + leaf);
if (ztest_random(2) == 0)
newpath[strlen(newpath) - 1] = 'b';
newvd = vdev_lookup_by_path(rvd, newpath);
}
if (newvd) {
/*
* Reopen to ensure the vdev's asize field isn't stale.
*/
vdev_reopen(newvd);
newsize = vdev_get_min_asize(newvd);
} else {
/*
* Make newsize a little bigger or smaller than oldsize.
* If it's smaller, the attach should fail.
* If it's larger, and we're doing a replace,
* we should get dynamic LUN growth when we're done.
*/
newsize = 10 * oldsize / (9 + ztest_random(3));
}
/*
* If pvd is not a mirror or root, the attach should fail with ENOTSUP,
* unless it's a replace; in that case any non-replacing parent is OK.
*
* If newvd is already part of the pool, it should fail with EBUSY.
*
* If newvd is too small, it should fail with EOVERFLOW.
*
* If newvd is a distributed spare and it's being attached to a
* dRAID which is not its parent it should fail with EINVAL.
*/
if (pvd->vdev_ops != &vdev_mirror_ops &&
pvd->vdev_ops != &vdev_root_ops && (!replacing ||
pvd->vdev_ops == &vdev_replacing_ops ||
pvd->vdev_ops == &vdev_spare_ops))
expected_error = ENOTSUP;
else if (newvd_is_spare && (!replacing || oldvd_is_log))
expected_error = ENOTSUP;
else if (newvd == oldvd)
expected_error = replacing ? 0 : EBUSY;
else if (vdev_lookup_by_path(rvd, newpath) != NULL)
expected_error = EBUSY;
else if (!newvd_is_dspare && newsize < oldsize)
expected_error = EOVERFLOW;
else if (ashift > oldvd->vdev_top->vdev_ashift)
expected_error = EDOM;
else if (newvd_is_dspare && pvd != vdev_draid_spare_get_parent(newvd))
expected_error = ENOTSUP;
else
expected_error = 0;
spa_config_exit(spa, SCL_ALL, FTAG);
/*
* Build the nvlist describing newpath.
*/
root = make_vdev_root(newpath, NULL, NULL, newvd == NULL ? newsize : 0,
ashift, NULL, 0, 0, 1);
/*
* When supported select either a healing or sequential resilver.
*/
boolean_t rebuilding = B_FALSE;
if (pvd->vdev_ops == &vdev_mirror_ops ||
pvd->vdev_ops == &vdev_root_ops) {
rebuilding = !!ztest_random(2);
}
error = spa_vdev_attach(spa, oldguid, root, replacing, rebuilding);
fnvlist_free(root);
/*
* If our parent was the replacing vdev, but the replace completed,
* then instead of failing with ENOTSUP we may either succeed,
* fail with ENODEV, or fail with EOVERFLOW.
*/
if (expected_error == ENOTSUP &&
(error == 0 || error == ENODEV || error == EOVERFLOW))
expected_error = error;
/*
* If someone grew the LUN, the replacement may be too small.
*/
if (error == EOVERFLOW || error == EBUSY)
expected_error = error;
if (error == ZFS_ERR_CHECKPOINT_EXISTS ||
error == ZFS_ERR_DISCARDING_CHECKPOINT ||
error == ZFS_ERR_RESILVER_IN_PROGRESS ||
error == ZFS_ERR_REBUILD_IN_PROGRESS)
expected_error = error;
if (error != expected_error && expected_error != EBUSY) {
- fatal(0, "attach (%s %llu, %s %llu, %d) "
+ fatal(B_FALSE, "attach (%s %"PRIu64", %s %"PRIu64", %d) "
"returned %d, expected %d",
oldpath, oldsize, newpath,
newsize, replacing, error, expected_error);
}
out:
mutex_exit(&ztest_vdev_lock);
umem_free(oldpath, MAXPATHLEN);
umem_free(newpath, MAXPATHLEN);
}
/* ARGSUSED */
void
ztest_device_removal(ztest_ds_t *zd, uint64_t id)
{
spa_t *spa = ztest_spa;
vdev_t *vd;
uint64_t guid;
int error;
mutex_enter(&ztest_vdev_lock);
if (ztest_device_removal_active) {
mutex_exit(&ztest_vdev_lock);
return;
}
/*
* Remove a random top-level vdev and wait for removal to finish.
*/
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
vd = vdev_lookup_top(spa, ztest_random_vdev_top(spa, B_FALSE));
guid = vd->vdev_guid;
spa_config_exit(spa, SCL_VDEV, FTAG);
error = spa_vdev_remove(spa, guid, B_FALSE);
if (error == 0) {
ztest_device_removal_active = B_TRUE;
mutex_exit(&ztest_vdev_lock);
/*
* spa->spa_vdev_removal is created in a sync task that
* is initiated via dsl_sync_task_nowait(). Since the
* task may not run before spa_vdev_remove() returns, we
* must wait at least 1 txg to ensure that the removal
* struct has been created.
*/
txg_wait_synced(spa_get_dsl(spa), 0);
while (spa->spa_removing_phys.sr_state == DSS_SCANNING)
txg_wait_synced(spa_get_dsl(spa), 0);
} else {
mutex_exit(&ztest_vdev_lock);
return;
}
/*
* The pool needs to be scrubbed after completing device removal.
* Failure to do so may result in checksum errors due to the
* strategy employed by ztest_fault_inject() when selecting which
* offset are redundant and can be damaged.
*/
error = spa_scan(spa, POOL_SCAN_SCRUB);
if (error == 0) {
while (dsl_scan_scrubbing(spa_get_dsl(spa)))
txg_wait_synced(spa_get_dsl(spa), 0);
}
mutex_enter(&ztest_vdev_lock);
ztest_device_removal_active = B_FALSE;
mutex_exit(&ztest_vdev_lock);
}
/*
* Callback function which expands the physical size of the vdev.
*/
static vdev_t *
grow_vdev(vdev_t *vd, void *arg)
{
spa_t *spa __maybe_unused = vd->vdev_spa;
size_t *newsize = arg;
size_t fsize;
int fd;
ASSERT3S(spa_config_held(spa, SCL_STATE, RW_READER), ==, SCL_STATE);
ASSERT(vd->vdev_ops->vdev_op_leaf);
if ((fd = open(vd->vdev_path, O_RDWR)) == -1)
return (vd);
fsize = lseek(fd, 0, SEEK_END);
VERIFY0(ftruncate(fd, *newsize));
if (ztest_opts.zo_verbose >= 6) {
(void) printf("%s grew from %lu to %lu bytes\n",
vd->vdev_path, (ulong_t)fsize, (ulong_t)*newsize);
}
(void) close(fd);
return (NULL);
}
/*
* Callback function which expands a given vdev by calling vdev_online().
*/
/* ARGSUSED */
static vdev_t *
online_vdev(vdev_t *vd, void *arg)
{
spa_t *spa = vd->vdev_spa;
vdev_t *tvd = vd->vdev_top;
uint64_t guid = vd->vdev_guid;
uint64_t generation = spa->spa_config_generation + 1;
vdev_state_t newstate = VDEV_STATE_UNKNOWN;
int error;
ASSERT3S(spa_config_held(spa, SCL_STATE, RW_READER), ==, SCL_STATE);
ASSERT(vd->vdev_ops->vdev_op_leaf);
/* Calling vdev_online will initialize the new metaslabs */
spa_config_exit(spa, SCL_STATE, spa);
error = vdev_online(spa, guid, ZFS_ONLINE_EXPAND, &newstate);
spa_config_enter(spa, SCL_STATE, spa, RW_READER);
/*
* If vdev_online returned an error or the underlying vdev_open
* failed then we abort the expand. The only way to know that
* vdev_open fails is by checking the returned newstate.
*/
if (error || newstate != VDEV_STATE_HEALTHY) {
if (ztest_opts.zo_verbose >= 5) {
- (void) printf("Unable to expand vdev, state %llu, "
- "error %d\n", (u_longlong_t)newstate, error);
+ (void) printf("Unable to expand vdev, state %u, "
+ "error %d\n", newstate, error);
}
return (vd);
}
ASSERT3U(newstate, ==, VDEV_STATE_HEALTHY);
/*
* Since we dropped the lock we need to ensure that we're
* still talking to the original vdev. It's possible this
* vdev may have been detached/replaced while we were
* trying to online it.
*/
if (generation != spa->spa_config_generation) {
if (ztest_opts.zo_verbose >= 5) {
(void) printf("vdev configuration has changed, "
- "guid %llu, state %llu, expected gen %llu, "
- "got gen %llu\n",
- (u_longlong_t)guid,
- (u_longlong_t)tvd->vdev_state,
- (u_longlong_t)generation,
- (u_longlong_t)spa->spa_config_generation);
+ "guid %"PRIu64", state %"PRIu64", "
+ "expected gen %"PRIu64", got gen %"PRIu64"\n",
+ guid,
+ tvd->vdev_state,
+ generation,
+ spa->spa_config_generation);
}
return (vd);
}
return (NULL);
}
/*
* Traverse the vdev tree calling the supplied function.
* We continue to walk the tree until we either have walked all
* children or we receive a non-NULL return from the callback.
* If a NULL callback is passed, then we just return back the first
* leaf vdev we encounter.
*/
static vdev_t *
vdev_walk_tree(vdev_t *vd, vdev_t *(*func)(vdev_t *, void *), void *arg)
{
uint_t c;
if (vd->vdev_ops->vdev_op_leaf) {
if (func == NULL)
return (vd);
else
return (func(vd, arg));
}
for (c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if ((cvd = vdev_walk_tree(cvd, func, arg)) != NULL)
return (cvd);
}
return (NULL);
}
/*
* Verify that dynamic LUN growth works as expected.
*/
/* ARGSUSED */
void
ztest_vdev_LUN_growth(ztest_ds_t *zd, uint64_t id)
{
spa_t *spa = ztest_spa;
vdev_t *vd, *tvd;
metaslab_class_t *mc;
metaslab_group_t *mg;
size_t psize, newsize;
uint64_t top;
uint64_t old_class_space, new_class_space, old_ms_count, new_ms_count;
mutex_enter(&ztest_checkpoint_lock);
mutex_enter(&ztest_vdev_lock);
spa_config_enter(spa, SCL_STATE, spa, RW_READER);
/*
* If there is a vdev removal in progress, it could complete while
* we are running, in which case we would not be able to verify
* that the metaslab_class space increased (because it decreases
* when the device removal completes).
*/
if (ztest_device_removal_active) {
spa_config_exit(spa, SCL_STATE, spa);
mutex_exit(&ztest_vdev_lock);
mutex_exit(&ztest_checkpoint_lock);
return;
}
top = ztest_random_vdev_top(spa, B_TRUE);
tvd = spa->spa_root_vdev->vdev_child[top];
mg = tvd->vdev_mg;
mc = mg->mg_class;
old_ms_count = tvd->vdev_ms_count;
old_class_space = metaslab_class_get_space(mc);
/*
* Determine the size of the first leaf vdev associated with
* our top-level device.
*/
vd = vdev_walk_tree(tvd, NULL, NULL);
ASSERT3P(vd, !=, NULL);
ASSERT(vd->vdev_ops->vdev_op_leaf);
psize = vd->vdev_psize;
/*
* We only try to expand the vdev if it's healthy, less than 4x its
* original size, and it has a valid psize.
*/
if (tvd->vdev_state != VDEV_STATE_HEALTHY ||
psize == 0 || psize >= 4 * ztest_opts.zo_vdev_size) {
spa_config_exit(spa, SCL_STATE, spa);
mutex_exit(&ztest_vdev_lock);
mutex_exit(&ztest_checkpoint_lock);
return;
}
ASSERT3U(psize, >, 0);
newsize = psize + MAX(psize / 8, SPA_MAXBLOCKSIZE);
ASSERT3U(newsize, >, psize);
if (ztest_opts.zo_verbose >= 6) {
(void) printf("Expanding LUN %s from %lu to %lu\n",
vd->vdev_path, (ulong_t)psize, (ulong_t)newsize);
}
/*
* Growing the vdev is a two step process:
* 1). expand the physical size (i.e. relabel)
* 2). online the vdev to create the new metaslabs
*/
if (vdev_walk_tree(tvd, grow_vdev, &newsize) != NULL ||
vdev_walk_tree(tvd, online_vdev, NULL) != NULL ||
tvd->vdev_state != VDEV_STATE_HEALTHY) {
if (ztest_opts.zo_verbose >= 5) {
(void) printf("Could not expand LUN because "
"the vdev configuration changed.\n");
}
spa_config_exit(spa, SCL_STATE, spa);
mutex_exit(&ztest_vdev_lock);
mutex_exit(&ztest_checkpoint_lock);
return;
}
spa_config_exit(spa, SCL_STATE, spa);
/*
* Expanding the LUN will update the config asynchronously,
* thus we must wait for the async thread to complete any
* pending tasks before proceeding.
*/
for (;;) {
boolean_t done;
mutex_enter(&spa->spa_async_lock);
done = (spa->spa_async_thread == NULL && !spa->spa_async_tasks);
mutex_exit(&spa->spa_async_lock);
if (done)
break;
txg_wait_synced(spa_get_dsl(spa), 0);
(void) poll(NULL, 0, 100);
}
spa_config_enter(spa, SCL_STATE, spa, RW_READER);
tvd = spa->spa_root_vdev->vdev_child[top];
new_ms_count = tvd->vdev_ms_count;
new_class_space = metaslab_class_get_space(mc);
if (tvd->vdev_mg != mg || mg->mg_class != mc) {
if (ztest_opts.zo_verbose >= 5) {
(void) printf("Could not verify LUN expansion due to "
"intervening vdev offline or remove.\n");
}
spa_config_exit(spa, SCL_STATE, spa);
mutex_exit(&ztest_vdev_lock);
mutex_exit(&ztest_checkpoint_lock);
return;
}
/*
* Make sure we were able to grow the vdev.
*/
if (new_ms_count <= old_ms_count) {
- fatal(0, "LUN expansion failed: ms_count %llu < %llu\n",
+ fatal(B_FALSE,
+ "LUN expansion failed: ms_count %"PRIu64" < %"PRIu64"\n",
old_ms_count, new_ms_count);
}
/*
* Make sure we were able to grow the pool.
*/
if (new_class_space <= old_class_space) {
- fatal(0, "LUN expansion failed: class_space %llu < %llu\n",
+ fatal(B_FALSE,
+ "LUN expansion failed: class_space %"PRIu64" < %"PRIu64"\n",
old_class_space, new_class_space);
}
if (ztest_opts.zo_verbose >= 5) {
char oldnumbuf[NN_NUMBUF_SZ], newnumbuf[NN_NUMBUF_SZ];
nicenum(old_class_space, oldnumbuf, sizeof (oldnumbuf));
nicenum(new_class_space, newnumbuf, sizeof (newnumbuf));
(void) printf("%s grew from %s to %s\n",
spa->spa_name, oldnumbuf, newnumbuf);
}
spa_config_exit(spa, SCL_STATE, spa);
mutex_exit(&ztest_vdev_lock);
mutex_exit(&ztest_checkpoint_lock);
}
/*
* Verify that dmu_objset_{create,destroy,open,close} work as expected.
*/
/* ARGSUSED */
static void
ztest_objset_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
{
/*
* Create the objects common to all ztest datasets.
*/
VERIFY0(zap_create_claim(os, ZTEST_DIROBJ,
DMU_OT_ZAP_OTHER, DMU_OT_NONE, 0, tx));
}
static int
ztest_dataset_create(char *dsname)
{
int err;
uint64_t rand;
dsl_crypto_params_t *dcp = NULL;
/*
* 50% of the time, we create encrypted datasets
* using a random cipher suite and a hard-coded
* wrapping key.
*/
rand = ztest_random(2);
if (rand != 0) {
nvlist_t *crypto_args = fnvlist_alloc();
nvlist_t *props = fnvlist_alloc();
/* slight bias towards the default cipher suite */
rand = ztest_random(ZIO_CRYPT_FUNCTIONS);
if (rand < ZIO_CRYPT_AES_128_CCM)
rand = ZIO_CRYPT_ON;
fnvlist_add_uint64(props,
zfs_prop_to_name(ZFS_PROP_ENCRYPTION), rand);
fnvlist_add_uint8_array(crypto_args, "wkeydata",
(uint8_t *)ztest_wkeydata, WRAPPING_KEY_LEN);
/*
* These parameters aren't really used by the kernel. They
* are simply stored so that userspace knows how to load
* the wrapping key.
*/
fnvlist_add_uint64(props,
zfs_prop_to_name(ZFS_PROP_KEYFORMAT), ZFS_KEYFORMAT_RAW);
fnvlist_add_string(props,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION), "prompt");
fnvlist_add_uint64(props,
zfs_prop_to_name(ZFS_PROP_PBKDF2_SALT), 0ULL);
fnvlist_add_uint64(props,
zfs_prop_to_name(ZFS_PROP_PBKDF2_ITERS), 0ULL);
VERIFY0(dsl_crypto_params_create_nvlist(DCP_CMD_NONE, props,
crypto_args, &dcp));
/*
* Cycle through all available encryption implementations
* to verify interoperability.
*/
VERIFY0(gcm_impl_set("cycle"));
VERIFY0(aes_impl_set("cycle"));
fnvlist_free(crypto_args);
fnvlist_free(props);
}
err = dmu_objset_create(dsname, DMU_OST_OTHER, 0, dcp,
ztest_objset_create_cb, NULL);
dsl_crypto_params_free(dcp, !!err);
rand = ztest_random(100);
if (err || rand < 80)
return (err);
if (ztest_opts.zo_verbose >= 5)
(void) printf("Setting dataset %s to sync always\n", dsname);
return (ztest_dsl_prop_set_uint64(dsname, ZFS_PROP_SYNC,
ZFS_SYNC_ALWAYS, B_FALSE));
}
/* ARGSUSED */
static int
ztest_objset_destroy_cb(const char *name, void *arg)
{
objset_t *os;
dmu_object_info_t doi;
int error;
/*
* Verify that the dataset contains a directory object.
*/
VERIFY0(ztest_dmu_objset_own(name, DMU_OST_OTHER, B_TRUE,
B_TRUE, FTAG, &os));
error = dmu_object_info(os, ZTEST_DIROBJ, &doi);
if (error != ENOENT) {
/* We could have crashed in the middle of destroying it */
ASSERT0(error);
ASSERT3U(doi.doi_type, ==, DMU_OT_ZAP_OTHER);
ASSERT3S(doi.doi_physical_blocks_512, >=, 0);
}
dmu_objset_disown(os, B_TRUE, FTAG);
/*
* Destroy the dataset.
*/
if (strchr(name, '@') != NULL) {
VERIFY0(dsl_destroy_snapshot(name, B_TRUE));
} else {
error = dsl_destroy_head(name);
if (error == ENOSPC) {
/* There could be checkpoint or insufficient slop */
ztest_record_enospc(FTAG);
} else if (error != EBUSY) {
/* There could be a hold on this dataset */
ASSERT0(error);
}
}
return (0);
}
static boolean_t
ztest_snapshot_create(char *osname, uint64_t id)
{
char snapname[ZFS_MAX_DATASET_NAME_LEN];
int error;
- (void) snprintf(snapname, sizeof (snapname), "%llu", (u_longlong_t)id);
+ (void) snprintf(snapname, sizeof (snapname), "%"PRIu64"", id);
error = dmu_objset_snapshot_one(osname, snapname);
if (error == ENOSPC) {
ztest_record_enospc(FTAG);
return (B_FALSE);
}
if (error != 0 && error != EEXIST) {
- fatal(0, "ztest_snapshot_create(%s@%s) = %d", osname,
+ fatal(B_FALSE, "ztest_snapshot_create(%s@%s) = %d", osname,
snapname, error);
}
return (B_TRUE);
}
static boolean_t
ztest_snapshot_destroy(char *osname, uint64_t id)
{
char snapname[ZFS_MAX_DATASET_NAME_LEN];
int error;
- (void) snprintf(snapname, sizeof (snapname), "%s@%llu", osname,
- (u_longlong_t)id);
+ (void) snprintf(snapname, sizeof (snapname), "%s@%"PRIu64"",
+ osname, id);
error = dsl_destroy_snapshot(snapname, B_FALSE);
if (error != 0 && error != ENOENT)
- fatal(0, "ztest_snapshot_destroy(%s) = %d", snapname, error);
+ fatal(B_FALSE, "ztest_snapshot_destroy(%s) = %d",
+ snapname, error);
return (B_TRUE);
}
/* ARGSUSED */
void
ztest_dmu_objset_create_destroy(ztest_ds_t *zd, uint64_t id)
{
ztest_ds_t *zdtmp;
int iters;
int error;
objset_t *os, *os2;
char name[ZFS_MAX_DATASET_NAME_LEN];
zilog_t *zilog;
int i;
zdtmp = umem_alloc(sizeof (ztest_ds_t), UMEM_NOFAIL);
(void) pthread_rwlock_rdlock(&ztest_name_lock);
- (void) snprintf(name, sizeof (name), "%s/temp_%llu",
- ztest_opts.zo_pool, (u_longlong_t)id);
+ (void) snprintf(name, sizeof (name), "%s/temp_%"PRIu64"",
+ ztest_opts.zo_pool, id);
/*
* If this dataset exists from a previous run, process its replay log
* half of the time. If we don't replay it, then dsl_destroy_head()
* (invoked from ztest_objset_destroy_cb()) should just throw it away.
*/
if (ztest_random(2) == 0 &&
ztest_dmu_objset_own(name, DMU_OST_OTHER, B_FALSE,
B_TRUE, FTAG, &os) == 0) {
ztest_zd_init(zdtmp, NULL, os);
zil_replay(os, zdtmp, ztest_replay_vector);
ztest_zd_fini(zdtmp);
dmu_objset_disown(os, B_TRUE, FTAG);
}
/*
* There may be an old instance of the dataset we're about to
* create lying around from a previous run. If so, destroy it
* and all of its snapshots.
*/
(void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL,
DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
/*
* Verify that the destroyed dataset is no longer in the namespace.
*/
VERIFY3U(ENOENT, ==, ztest_dmu_objset_own(name, DMU_OST_OTHER, B_TRUE,
B_TRUE, FTAG, &os));
/*
* Verify that we can create a new dataset.
*/
error = ztest_dataset_create(name);
if (error) {
if (error == ENOSPC) {
ztest_record_enospc(FTAG);
goto out;
}
- fatal(0, "dmu_objset_create(%s) = %d", name, error);
+ fatal(B_FALSE, "dmu_objset_create(%s) = %d", name, error);
}
VERIFY0(ztest_dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, B_TRUE,
FTAG, &os));
ztest_zd_init(zdtmp, NULL, os);
/*
* Open the intent log for it.
*/
zilog = zil_open(os, ztest_get_data);
/*
* Put some objects in there, do a little I/O to them,
* and randomly take a couple of snapshots along the way.
*/
iters = ztest_random(5);
for (i = 0; i < iters; i++) {
ztest_dmu_object_alloc_free(zdtmp, id);
if (ztest_random(iters) == 0)
(void) ztest_snapshot_create(name, i);
}
/*
* Verify that we cannot create an existing dataset.
*/
VERIFY3U(EEXIST, ==,
dmu_objset_create(name, DMU_OST_OTHER, 0, NULL, NULL, NULL));
/*
* Verify that we can hold an objset that is also owned.
*/
VERIFY0(dmu_objset_hold(name, FTAG, &os2));
dmu_objset_rele(os2, FTAG);
/*
* Verify that we cannot own an objset that is already owned.
*/
VERIFY3U(EBUSY, ==, ztest_dmu_objset_own(name, DMU_OST_OTHER,
B_FALSE, B_TRUE, FTAG, &os2));
zil_close(zilog);
dmu_objset_disown(os, B_TRUE, FTAG);
ztest_zd_fini(zdtmp);
out:
(void) pthread_rwlock_unlock(&ztest_name_lock);
umem_free(zdtmp, sizeof (ztest_ds_t));
}
/*
* Verify that dmu_snapshot_{create,destroy,open,close} work as expected.
*/
void
ztest_dmu_snapshot_create_destroy(ztest_ds_t *zd, uint64_t id)
{
(void) pthread_rwlock_rdlock(&ztest_name_lock);
(void) ztest_snapshot_destroy(zd->zd_name, id);
(void) ztest_snapshot_create(zd->zd_name, id);
(void) pthread_rwlock_unlock(&ztest_name_lock);
}
/*
* Cleanup non-standard snapshots and clones.
*/
static void
ztest_dsl_dataset_cleanup(char *osname, uint64_t id)
{
char *snap1name;
char *clone1name;
char *snap2name;
char *clone2name;
char *snap3name;
int error;
snap1name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL);
clone1name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL);
snap2name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL);
clone2name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL);
snap3name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL);
- (void) snprintf(snap1name, ZFS_MAX_DATASET_NAME_LEN,
- "%s@s1_%llu", osname, (u_longlong_t)id);
- (void) snprintf(clone1name, ZFS_MAX_DATASET_NAME_LEN,
- "%s/c1_%llu", osname, (u_longlong_t)id);
- (void) snprintf(snap2name, ZFS_MAX_DATASET_NAME_LEN,
- "%s@s2_%llu", clone1name, (u_longlong_t)id);
- (void) snprintf(clone2name, ZFS_MAX_DATASET_NAME_LEN,
- "%s/c2_%llu", osname, (u_longlong_t)id);
- (void) snprintf(snap3name, ZFS_MAX_DATASET_NAME_LEN,
- "%s@s3_%llu", clone1name, (u_longlong_t)id);
+ (void) snprintf(snap1name, ZFS_MAX_DATASET_NAME_LEN, "%s@s1_%"PRIu64"",
+ osname, id);
+ (void) snprintf(clone1name, ZFS_MAX_DATASET_NAME_LEN, "%s/c1_%"PRIu64"",
+ osname, id);
+ (void) snprintf(snap2name, ZFS_MAX_DATASET_NAME_LEN, "%s@s2_%"PRIu64"",
+ clone1name, id);
+ (void) snprintf(clone2name, ZFS_MAX_DATASET_NAME_LEN, "%s/c2_%"PRIu64"",
+ osname, id);
+ (void) snprintf(snap3name, ZFS_MAX_DATASET_NAME_LEN, "%s@s3_%"PRIu64"",
+ clone1name, id);
error = dsl_destroy_head(clone2name);
if (error && error != ENOENT)
- fatal(0, "dsl_destroy_head(%s) = %d", clone2name, error);
+ fatal(B_FALSE, "dsl_destroy_head(%s) = %d", clone2name, error);
error = dsl_destroy_snapshot(snap3name, B_FALSE);
if (error && error != ENOENT)
- fatal(0, "dsl_destroy_snapshot(%s) = %d", snap3name, error);
+ fatal(B_FALSE, "dsl_destroy_snapshot(%s) = %d",
+ snap3name, error);
error = dsl_destroy_snapshot(snap2name, B_FALSE);
if (error && error != ENOENT)
- fatal(0, "dsl_destroy_snapshot(%s) = %d", snap2name, error);
+ fatal(B_FALSE, "dsl_destroy_snapshot(%s) = %d",
+ snap2name, error);
error = dsl_destroy_head(clone1name);
if (error && error != ENOENT)
- fatal(0, "dsl_destroy_head(%s) = %d", clone1name, error);
+ fatal(B_FALSE, "dsl_destroy_head(%s) = %d", clone1name, error);
error = dsl_destroy_snapshot(snap1name, B_FALSE);
if (error && error != ENOENT)
- fatal(0, "dsl_destroy_snapshot(%s) = %d", snap1name, error);
+ fatal(B_FALSE, "dsl_destroy_snapshot(%s) = %d",
+ snap1name, error);
umem_free(snap1name, ZFS_MAX_DATASET_NAME_LEN);
umem_free(clone1name, ZFS_MAX_DATASET_NAME_LEN);
umem_free(snap2name, ZFS_MAX_DATASET_NAME_LEN);
umem_free(clone2name, ZFS_MAX_DATASET_NAME_LEN);
umem_free(snap3name, ZFS_MAX_DATASET_NAME_LEN);
}
/*
* Verify dsl_dataset_promote handles EBUSY
*/
void
ztest_dsl_dataset_promote_busy(ztest_ds_t *zd, uint64_t id)
{
objset_t *os;
char *snap1name;
char *clone1name;
char *snap2name;
char *clone2name;
char *snap3name;
char *osname = zd->zd_name;
int error;
snap1name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL);
clone1name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL);
snap2name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL);
clone2name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL);
snap3name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL);
(void) pthread_rwlock_rdlock(&ztest_name_lock);
ztest_dsl_dataset_cleanup(osname, id);
- (void) snprintf(snap1name, ZFS_MAX_DATASET_NAME_LEN,
- "%s@s1_%llu", osname, (u_longlong_t)id);
- (void) snprintf(clone1name, ZFS_MAX_DATASET_NAME_LEN,
- "%s/c1_%llu", osname, (u_longlong_t)id);
- (void) snprintf(snap2name, ZFS_MAX_DATASET_NAME_LEN,
- "%s@s2_%llu", clone1name, (u_longlong_t)id);
- (void) snprintf(clone2name, ZFS_MAX_DATASET_NAME_LEN,
- "%s/c2_%llu", osname, (u_longlong_t)id);
- (void) snprintf(snap3name, ZFS_MAX_DATASET_NAME_LEN,
- "%s@s3_%llu", clone1name, (u_longlong_t)id);
+ (void) snprintf(snap1name, ZFS_MAX_DATASET_NAME_LEN, "%s@s1_%"PRIu64"",
+ osname, id);
+ (void) snprintf(clone1name, ZFS_MAX_DATASET_NAME_LEN, "%s/c1_%"PRIu64"",
+ osname, id);
+ (void) snprintf(snap2name, ZFS_MAX_DATASET_NAME_LEN, "%s@s2_%"PRIu64"",
+ clone1name, id);
+ (void) snprintf(clone2name, ZFS_MAX_DATASET_NAME_LEN, "%s/c2_%"PRIu64"",
+ osname, id);
+ (void) snprintf(snap3name, ZFS_MAX_DATASET_NAME_LEN, "%s@s3_%"PRIu64"",
+ clone1name, id);
error = dmu_objset_snapshot_one(osname, strchr(snap1name, '@') + 1);
if (error && error != EEXIST) {
if (error == ENOSPC) {
ztest_record_enospc(FTAG);
goto out;
}
- fatal(0, "dmu_take_snapshot(%s) = %d", snap1name, error);
+ fatal(B_FALSE, "dmu_take_snapshot(%s) = %d", snap1name, error);
}
error = dmu_objset_clone(clone1name, snap1name);
if (error) {
if (error == ENOSPC) {
ztest_record_enospc(FTAG);
goto out;
}
- fatal(0, "dmu_objset_create(%s) = %d", clone1name, error);
+ fatal(B_FALSE, "dmu_objset_create(%s) = %d", clone1name, error);
}
error = dmu_objset_snapshot_one(clone1name, strchr(snap2name, '@') + 1);
if (error && error != EEXIST) {
if (error == ENOSPC) {
ztest_record_enospc(FTAG);
goto out;
}
- fatal(0, "dmu_open_snapshot(%s) = %d", snap2name, error);
+ fatal(B_FALSE, "dmu_open_snapshot(%s) = %d", snap2name, error);
}
error = dmu_objset_snapshot_one(clone1name, strchr(snap3name, '@') + 1);
if (error && error != EEXIST) {
if (error == ENOSPC) {
ztest_record_enospc(FTAG);
goto out;
}
- fatal(0, "dmu_open_snapshot(%s) = %d", snap3name, error);
+ fatal(B_FALSE, "dmu_open_snapshot(%s) = %d", snap3name, error);
}
error = dmu_objset_clone(clone2name, snap3name);
if (error) {
if (error == ENOSPC) {
ztest_record_enospc(FTAG);
goto out;
}
- fatal(0, "dmu_objset_create(%s) = %d", clone2name, error);
+ fatal(B_FALSE, "dmu_objset_create(%s) = %d", clone2name, error);
}
error = ztest_dmu_objset_own(snap2name, DMU_OST_ANY, B_TRUE, B_TRUE,
FTAG, &os);
if (error)
- fatal(0, "dmu_objset_own(%s) = %d", snap2name, error);
+ fatal(B_FALSE, "dmu_objset_own(%s) = %d", snap2name, error);
error = dsl_dataset_promote(clone2name, NULL);
if (error == ENOSPC) {
dmu_objset_disown(os, B_TRUE, FTAG);
ztest_record_enospc(FTAG);
goto out;
}
if (error != EBUSY)
- fatal(0, "dsl_dataset_promote(%s), %d, not EBUSY", clone2name,
- error);
+ fatal(B_FALSE, "dsl_dataset_promote(%s), %d, not EBUSY",
+ clone2name, error);
dmu_objset_disown(os, B_TRUE, FTAG);
out:
ztest_dsl_dataset_cleanup(osname, id);
(void) pthread_rwlock_unlock(&ztest_name_lock);
umem_free(snap1name, ZFS_MAX_DATASET_NAME_LEN);
umem_free(clone1name, ZFS_MAX_DATASET_NAME_LEN);
umem_free(snap2name, ZFS_MAX_DATASET_NAME_LEN);
umem_free(clone2name, ZFS_MAX_DATASET_NAME_LEN);
umem_free(snap3name, ZFS_MAX_DATASET_NAME_LEN);
}
#undef OD_ARRAY_SIZE
#define OD_ARRAY_SIZE 4
/*
* Verify that dmu_object_{alloc,free} work as expected.
*/
void
ztest_dmu_object_alloc_free(ztest_ds_t *zd, uint64_t id)
{
ztest_od_t *od;
int batchsize;
int size;
int b;
size = sizeof (ztest_od_t) * OD_ARRAY_SIZE;
od = umem_alloc(size, UMEM_NOFAIL);
batchsize = OD_ARRAY_SIZE;
for (b = 0; b < batchsize; b++)
ztest_od_init(od + b, id, FTAG, b, DMU_OT_UINT64_OTHER,
0, 0, 0);
/*
* Destroy the previous batch of objects, create a new batch,
* and do some I/O on the new objects.
*/
if (ztest_object_init(zd, od, size, B_TRUE) != 0)
return;
while (ztest_random(4 * batchsize) != 0)
ztest_io(zd, od[ztest_random(batchsize)].od_object,
ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
umem_free(od, size);
}
/*
* Rewind the global allocator to verify object allocation backfilling.
*/
void
ztest_dmu_object_next_chunk(ztest_ds_t *zd, uint64_t id)
{
objset_t *os = zd->zd_os;
int dnodes_per_chunk = 1 << dmu_object_alloc_chunk_shift;
uint64_t object;
/*
* Rewind the global allocator randomly back to a lower object number
* to force backfilling and reclamation of recently freed dnodes.
*/
mutex_enter(&os->os_obj_lock);
object = ztest_random(os->os_obj_next_chunk);
os->os_obj_next_chunk = P2ALIGN(object, dnodes_per_chunk);
mutex_exit(&os->os_obj_lock);
}
#undef OD_ARRAY_SIZE
#define OD_ARRAY_SIZE 2
/*
* Verify that dmu_{read,write} work as expected.
*/
void
ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id)
{
int size;
ztest_od_t *od;
objset_t *os = zd->zd_os;
size = sizeof (ztest_od_t) * OD_ARRAY_SIZE;
od = umem_alloc(size, UMEM_NOFAIL);
dmu_tx_t *tx;
- int i, freeit, error;
- uint64_t n, s, txg;
+ int freeit, error;
+ uint64_t i, n, s, txg;
bufwad_t *packbuf, *bigbuf, *pack, *bigH, *bigT;
uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize;
uint64_t chunksize = (1000 + ztest_random(1000)) * sizeof (uint64_t);
uint64_t regions = 997;
uint64_t stride = 123456789ULL;
uint64_t width = 40;
int free_percent = 5;
/*
* This test uses two objects, packobj and bigobj, that are always
* updated together (i.e. in the same tx) so that their contents are
* in sync and can be compared. Their contents relate to each other
* in a simple way: packobj is a dense array of 'bufwad' structures,
* while bigobj is a sparse array of the same bufwads. Specifically,
* for any index n, there are three bufwads that should be identical:
*
* packobj, at offset n * sizeof (bufwad_t)
* bigobj, at the head of the nth chunk
* bigobj, at the tail of the nth chunk
*
* The chunk size is arbitrary. It doesn't have to be a power of two,
* and it doesn't have any relation to the object blocksize.
* The only requirement is that it can hold at least two bufwads.
*
* Normally, we write the bufwad to each of these locations.
* However, free_percent of the time we instead write zeroes to
* packobj and perform a dmu_free_range() on bigobj. By comparing
* bigobj to packobj, we can verify that the DMU is correctly
* tracking which parts of an object are allocated and free,
* and that the contents of the allocated blocks are correct.
*/
/*
* Read the directory info. If it's the first time, set things up.
*/
ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0, chunksize);
ztest_od_init(od + 1, id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, 0,
chunksize);
if (ztest_object_init(zd, od, size, B_FALSE) != 0) {
umem_free(od, size);
return;
}
bigobj = od[0].od_object;
packobj = od[1].od_object;
chunksize = od[0].od_gen;
ASSERT3U(chunksize, ==, od[1].od_gen);
/*
* Prefetch a random chunk of the big object.
* Our aim here is to get some async reads in flight
* for blocks that we may free below; the DMU should
* handle this race correctly.
*/
n = ztest_random(regions) * stride + ztest_random(width);
s = 1 + ztest_random(2 * width - 1);
dmu_prefetch(os, bigobj, 0, n * chunksize, s * chunksize,
ZIO_PRIORITY_SYNC_READ);
/*
* Pick a random index and compute the offsets into packobj and bigobj.
*/
n = ztest_random(regions) * stride + ztest_random(width);
s = 1 + ztest_random(width - 1);
packoff = n * sizeof (bufwad_t);
packsize = s * sizeof (bufwad_t);
bigoff = n * chunksize;
bigsize = s * chunksize;
packbuf = umem_alloc(packsize, UMEM_NOFAIL);
bigbuf = umem_alloc(bigsize, UMEM_NOFAIL);
/*
* free_percent of the time, free a range of bigobj rather than
* overwriting it.
*/
freeit = (ztest_random(100) < free_percent);
/*
* Read the current contents of our objects.
*/
error = dmu_read(os, packobj, packoff, packsize, packbuf,
DMU_READ_PREFETCH);
ASSERT0(error);
error = dmu_read(os, bigobj, bigoff, bigsize, bigbuf,
DMU_READ_PREFETCH);
ASSERT0(error);
/*
* Get a tx for the mods to both packobj and bigobj.
*/
tx = dmu_tx_create(os);
dmu_tx_hold_write(tx, packobj, packoff, packsize);
if (freeit)
dmu_tx_hold_free(tx, bigobj, bigoff, bigsize);
else
dmu_tx_hold_write(tx, bigobj, bigoff, bigsize);
/* This accounts for setting the checksum/compression. */
dmu_tx_hold_bonus(tx, bigobj);
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
if (txg == 0) {
umem_free(packbuf, packsize);
umem_free(bigbuf, bigsize);
umem_free(od, size);
return;
}
enum zio_checksum cksum;
do {
cksum = (enum zio_checksum)
ztest_random_dsl_prop(ZFS_PROP_CHECKSUM);
} while (cksum >= ZIO_CHECKSUM_LEGACY_FUNCTIONS);
dmu_object_set_checksum(os, bigobj, cksum, tx);
enum zio_compress comp;
do {
comp = (enum zio_compress)
ztest_random_dsl_prop(ZFS_PROP_COMPRESSION);
} while (comp >= ZIO_COMPRESS_LEGACY_FUNCTIONS);
dmu_object_set_compress(os, bigobj, comp, tx);
/*
* For each index from n to n + s, verify that the existing bufwad
* in packobj matches the bufwads at the head and tail of the
* corresponding chunk in bigobj. Then update all three bufwads
* with the new values we want to write out.
*/
for (i = 0; i < s; i++) {
/* LINTED */
pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t));
/* LINTED */
bigH = (bufwad_t *)((char *)bigbuf + i * chunksize);
/* LINTED */
bigT = (bufwad_t *)((char *)bigH + chunksize) - 1;
ASSERT3U((uintptr_t)bigH - (uintptr_t)bigbuf, <, bigsize);
ASSERT3U((uintptr_t)bigT - (uintptr_t)bigbuf, <, bigsize);
if (pack->bw_txg > txg)
- fatal(0, "future leak: got %llx, open txg is %llx",
+ fatal(B_FALSE,
+ "future leak: got %"PRIx64", open txg is %"PRIx64"",
pack->bw_txg, txg);
if (pack->bw_data != 0 && pack->bw_index != n + i)
- fatal(0, "wrong index: got %llx, wanted %llx+%llx",
+ fatal(B_FALSE, "wrong index: "
+ "got %"PRIx64", wanted %"PRIx64"+%"PRIx64"",
pack->bw_index, n, i);
if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0)
- fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH);
+ fatal(B_FALSE, "pack/bigH mismatch in %p/%p",
+ pack, bigH);
if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0)
- fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT);
+ fatal(B_FALSE, "pack/bigT mismatch in %p/%p",
+ pack, bigT);
if (freeit) {
bzero(pack, sizeof (bufwad_t));
} else {
pack->bw_index = n + i;
pack->bw_txg = txg;
pack->bw_data = 1 + ztest_random(-2ULL);
}
*bigH = *pack;
*bigT = *pack;
}
/*
* We've verified all the old bufwads, and made new ones.
* Now write them out.
*/
dmu_write(os, packobj, packoff, packsize, packbuf, tx);
if (freeit) {
if (ztest_opts.zo_verbose >= 7) {
- (void) printf("freeing offset %llx size %llx"
- " txg %llx\n",
- (u_longlong_t)bigoff,
- (u_longlong_t)bigsize,
- (u_longlong_t)txg);
+ (void) printf("freeing offset %"PRIx64" size %"PRIx64""
+ " txg %"PRIx64"\n",
+ bigoff, bigsize, txg);
}
VERIFY0(dmu_free_range(os, bigobj, bigoff, bigsize, tx));
} else {
if (ztest_opts.zo_verbose >= 7) {
- (void) printf("writing offset %llx size %llx"
- " txg %llx\n",
- (u_longlong_t)bigoff,
- (u_longlong_t)bigsize,
- (u_longlong_t)txg);
+ (void) printf("writing offset %"PRIx64" size %"PRIx64""
+ " txg %"PRIx64"\n",
+ bigoff, bigsize, txg);
}
dmu_write(os, bigobj, bigoff, bigsize, bigbuf, tx);
}
dmu_tx_commit(tx);
/*
* Sanity check the stuff we just wrote.
*/
{
void *packcheck = umem_alloc(packsize, UMEM_NOFAIL);
void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL);
VERIFY0(dmu_read(os, packobj, packoff,
packsize, packcheck, DMU_READ_PREFETCH));
VERIFY0(dmu_read(os, bigobj, bigoff,
bigsize, bigcheck, DMU_READ_PREFETCH));
ASSERT0(bcmp(packbuf, packcheck, packsize));
ASSERT0(bcmp(bigbuf, bigcheck, bigsize));
umem_free(packcheck, packsize);
umem_free(bigcheck, bigsize);
}
umem_free(packbuf, packsize);
umem_free(bigbuf, bigsize);
umem_free(od, size);
}
static void
compare_and_update_pbbufs(uint64_t s, bufwad_t *packbuf, bufwad_t *bigbuf,
uint64_t bigsize, uint64_t n, uint64_t chunksize, uint64_t txg)
{
uint64_t i;
bufwad_t *pack;
bufwad_t *bigH;
bufwad_t *bigT;
/*
* For each index from n to n + s, verify that the existing bufwad
* in packobj matches the bufwads at the head and tail of the
* corresponding chunk in bigobj. Then update all three bufwads
* with the new values we want to write out.
*/
for (i = 0; i < s; i++) {
/* LINTED */
pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t));
/* LINTED */
bigH = (bufwad_t *)((char *)bigbuf + i * chunksize);
/* LINTED */
bigT = (bufwad_t *)((char *)bigH + chunksize) - 1;
ASSERT3U((uintptr_t)bigH - (uintptr_t)bigbuf, <, bigsize);
ASSERT3U((uintptr_t)bigT - (uintptr_t)bigbuf, <, bigsize);
if (pack->bw_txg > txg)
- fatal(0, "future leak: got %llx, open txg is %llx",
+ fatal(B_FALSE,
+ "future leak: got %"PRIx64", open txg is %"PRIx64"",
pack->bw_txg, txg);
if (pack->bw_data != 0 && pack->bw_index != n + i)
- fatal(0, "wrong index: got %llx, wanted %llx+%llx",
+ fatal(B_FALSE, "wrong index: "
+ "got %"PRIx64", wanted %"PRIx64"+%"PRIx64"",
pack->bw_index, n, i);
if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0)
- fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH);
+ fatal(B_FALSE, "pack/bigH mismatch in %p/%p",
+ pack, bigH);
if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0)
- fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT);
+ fatal(B_FALSE, "pack/bigT mismatch in %p/%p",
+ pack, bigT);
pack->bw_index = n + i;
pack->bw_txg = txg;
pack->bw_data = 1 + ztest_random(-2ULL);
*bigH = *pack;
*bigT = *pack;
}
}
#undef OD_ARRAY_SIZE
#define OD_ARRAY_SIZE 2
void
ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id)
{
objset_t *os = zd->zd_os;
ztest_od_t *od;
dmu_tx_t *tx;
uint64_t i;
int error;
int size;
uint64_t n, s, txg;
bufwad_t *packbuf, *bigbuf;
uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize;
uint64_t blocksize = ztest_random_blocksize();
uint64_t chunksize = blocksize;
uint64_t regions = 997;
uint64_t stride = 123456789ULL;
uint64_t width = 9;
dmu_buf_t *bonus_db;
arc_buf_t **bigbuf_arcbufs;
dmu_object_info_t doi;
size = sizeof (ztest_od_t) * OD_ARRAY_SIZE;
od = umem_alloc(size, UMEM_NOFAIL);
/*
* This test uses two objects, packobj and bigobj, that are always
* updated together (i.e. in the same tx) so that their contents are
* in sync and can be compared. Their contents relate to each other
* in a simple way: packobj is a dense array of 'bufwad' structures,
* while bigobj is a sparse array of the same bufwads. Specifically,
* for any index n, there are three bufwads that should be identical:
*
* packobj, at offset n * sizeof (bufwad_t)
* bigobj, at the head of the nth chunk
* bigobj, at the tail of the nth chunk
*
* The chunk size is set equal to bigobj block size so that
* dmu_assign_arcbuf_by_dbuf() can be tested for object updates.
*/
/*
* Read the directory info. If it's the first time, set things up.
*/
ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0, 0);
ztest_od_init(od + 1, id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, 0,
chunksize);
if (ztest_object_init(zd, od, size, B_FALSE) != 0) {
umem_free(od, size);
return;
}
bigobj = od[0].od_object;
packobj = od[1].od_object;
blocksize = od[0].od_blocksize;
chunksize = blocksize;
ASSERT3U(chunksize, ==, od[1].od_gen);
VERIFY0(dmu_object_info(os, bigobj, &doi));
VERIFY(ISP2(doi.doi_data_block_size));
VERIFY3U(chunksize, ==, doi.doi_data_block_size);
VERIFY3U(chunksize, >=, 2 * sizeof (bufwad_t));
/*
* Pick a random index and compute the offsets into packobj and bigobj.
*/
n = ztest_random(regions) * stride + ztest_random(width);
s = 1 + ztest_random(width - 1);
packoff = n * sizeof (bufwad_t);
packsize = s * sizeof (bufwad_t);
bigoff = n * chunksize;
bigsize = s * chunksize;
packbuf = umem_zalloc(packsize, UMEM_NOFAIL);
bigbuf = umem_zalloc(bigsize, UMEM_NOFAIL);
VERIFY0(dmu_bonus_hold(os, bigobj, FTAG, &bonus_db));
bigbuf_arcbufs = umem_zalloc(2 * s * sizeof (arc_buf_t *), UMEM_NOFAIL);
/*
* Iteration 0 test zcopy for DB_UNCACHED dbufs.
* Iteration 1 test zcopy to already referenced dbufs.
* Iteration 2 test zcopy to dirty dbuf in the same txg.
* Iteration 3 test zcopy to dbuf dirty in previous txg.
* Iteration 4 test zcopy when dbuf is no longer dirty.
* Iteration 5 test zcopy when it can't be done.
* Iteration 6 one more zcopy write.
*/
for (i = 0; i < 7; i++) {
uint64_t j;
uint64_t off;
/*
* In iteration 5 (i == 5) use arcbufs
* that don't match bigobj blksz to test
* dmu_assign_arcbuf_by_dbuf() when it can't directly
* assign an arcbuf to a dbuf.
*/
for (j = 0; j < s; j++) {
if (i != 5 || chunksize < (SPA_MINBLOCKSIZE * 2)) {
bigbuf_arcbufs[j] =
dmu_request_arcbuf(bonus_db, chunksize);
} else {
bigbuf_arcbufs[2 * j] =
dmu_request_arcbuf(bonus_db, chunksize / 2);
bigbuf_arcbufs[2 * j + 1] =
dmu_request_arcbuf(bonus_db, chunksize / 2);
}
}
/*
* Get a tx for the mods to both packobj and bigobj.
*/
tx = dmu_tx_create(os);
dmu_tx_hold_write(tx, packobj, packoff, packsize);
dmu_tx_hold_write(tx, bigobj, bigoff, bigsize);
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
if (txg == 0) {
umem_free(packbuf, packsize);
umem_free(bigbuf, bigsize);
for (j = 0; j < s; j++) {
if (i != 5 ||
chunksize < (SPA_MINBLOCKSIZE * 2)) {
dmu_return_arcbuf(bigbuf_arcbufs[j]);
} else {
dmu_return_arcbuf(
bigbuf_arcbufs[2 * j]);
dmu_return_arcbuf(
bigbuf_arcbufs[2 * j + 1]);
}
}
umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *));
umem_free(od, size);
dmu_buf_rele(bonus_db, FTAG);
return;
}
/*
* 50% of the time don't read objects in the 1st iteration to
* test dmu_assign_arcbuf_by_dbuf() for the case when there are
* no existing dbufs for the specified offsets.
*/
if (i != 0 || ztest_random(2) != 0) {
error = dmu_read(os, packobj, packoff,
packsize, packbuf, DMU_READ_PREFETCH);
ASSERT0(error);
error = dmu_read(os, bigobj, bigoff, bigsize,
bigbuf, DMU_READ_PREFETCH);
ASSERT0(error);
}
compare_and_update_pbbufs(s, packbuf, bigbuf, bigsize,
n, chunksize, txg);
/*
* We've verified all the old bufwads, and made new ones.
* Now write them out.
*/
dmu_write(os, packobj, packoff, packsize, packbuf, tx);
if (ztest_opts.zo_verbose >= 7) {
- (void) printf("writing offset %llx size %llx"
- " txg %llx\n",
- (u_longlong_t)bigoff,
- (u_longlong_t)bigsize,
- (u_longlong_t)txg);
+ (void) printf("writing offset %"PRIx64" size %"PRIx64""
+ " txg %"PRIx64"\n",
+ bigoff, bigsize, txg);
}
for (off = bigoff, j = 0; j < s; j++, off += chunksize) {
dmu_buf_t *dbt;
if (i != 5 || chunksize < (SPA_MINBLOCKSIZE * 2)) {
bcopy((caddr_t)bigbuf + (off - bigoff),
bigbuf_arcbufs[j]->b_data, chunksize);
} else {
bcopy((caddr_t)bigbuf + (off - bigoff),
bigbuf_arcbufs[2 * j]->b_data,
chunksize / 2);
bcopy((caddr_t)bigbuf + (off - bigoff) +
chunksize / 2,
bigbuf_arcbufs[2 * j + 1]->b_data,
chunksize / 2);
}
if (i == 1) {
VERIFY(dmu_buf_hold(os, bigobj, off,
FTAG, &dbt, DMU_READ_NO_PREFETCH) == 0);
}
if (i != 5 || chunksize < (SPA_MINBLOCKSIZE * 2)) {
VERIFY0(dmu_assign_arcbuf_by_dbuf(bonus_db,
off, bigbuf_arcbufs[j], tx));
} else {
VERIFY0(dmu_assign_arcbuf_by_dbuf(bonus_db,
off, bigbuf_arcbufs[2 * j], tx));
VERIFY0(dmu_assign_arcbuf_by_dbuf(bonus_db,
off + chunksize / 2,
bigbuf_arcbufs[2 * j + 1], tx));
}
if (i == 1) {
dmu_buf_rele(dbt, FTAG);
}
}
dmu_tx_commit(tx);
/*
* Sanity check the stuff we just wrote.
*/
{
void *packcheck = umem_alloc(packsize, UMEM_NOFAIL);
void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL);
VERIFY0(dmu_read(os, packobj, packoff,
packsize, packcheck, DMU_READ_PREFETCH));
VERIFY0(dmu_read(os, bigobj, bigoff,
bigsize, bigcheck, DMU_READ_PREFETCH));
ASSERT0(bcmp(packbuf, packcheck, packsize));
ASSERT0(bcmp(bigbuf, bigcheck, bigsize));
umem_free(packcheck, packsize);
umem_free(bigcheck, bigsize);
}
if (i == 2) {
txg_wait_open(dmu_objset_pool(os), 0, B_TRUE);
} else if (i == 3) {
txg_wait_synced(dmu_objset_pool(os), 0);
}
}
dmu_buf_rele(bonus_db, FTAG);
umem_free(packbuf, packsize);
umem_free(bigbuf, bigsize);
umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *));
umem_free(od, size);
}
/* ARGSUSED */
void
ztest_dmu_write_parallel(ztest_ds_t *zd, uint64_t id)
{
ztest_od_t *od;
od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL);
uint64_t offset = (1ULL << (ztest_random(20) + 43)) +
(ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
/*
* Have multiple threads write to large offsets in an object
* to verify that parallel writes to an object -- even to the
* same blocks within the object -- doesn't cause any trouble.
*/
ztest_od_init(od, ID_PARALLEL, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0, 0);
if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0)
return;
while (ztest_random(10) != 0)
ztest_io(zd, od->od_object, offset);
umem_free(od, sizeof (ztest_od_t));
}
void
ztest_dmu_prealloc(ztest_ds_t *zd, uint64_t id)
{
ztest_od_t *od;
uint64_t offset = (1ULL << (ztest_random(4) + SPA_MAXBLOCKSHIFT)) +
(ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
uint64_t count = ztest_random(20) + 1;
uint64_t blocksize = ztest_random_blocksize();
void *data;
od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL);
ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0, 0);
if (ztest_object_init(zd, od, sizeof (ztest_od_t),
!ztest_random(2)) != 0) {
umem_free(od, sizeof (ztest_od_t));
return;
}
if (ztest_truncate(zd, od->od_object, offset, count * blocksize) != 0) {
umem_free(od, sizeof (ztest_od_t));
return;
}
ztest_prealloc(zd, od->od_object, offset, count * blocksize);
data = umem_zalloc(blocksize, UMEM_NOFAIL);
while (ztest_random(count) != 0) {
uint64_t randoff = offset + (ztest_random(count) * blocksize);
if (ztest_write(zd, od->od_object, randoff, blocksize,
data) != 0)
break;
while (ztest_random(4) != 0)
ztest_io(zd, od->od_object, randoff);
}
umem_free(data, blocksize);
umem_free(od, sizeof (ztest_od_t));
}
/*
* Verify that zap_{create,destroy,add,remove,update} work as expected.
*/
#define ZTEST_ZAP_MIN_INTS 1
#define ZTEST_ZAP_MAX_INTS 4
#define ZTEST_ZAP_MAX_PROPS 1000
void
ztest_zap(ztest_ds_t *zd, uint64_t id)
{
objset_t *os = zd->zd_os;
ztest_od_t *od;
uint64_t object;
uint64_t txg, last_txg;
uint64_t value[ZTEST_ZAP_MAX_INTS];
uint64_t zl_ints, zl_intsize, prop;
int i, ints;
dmu_tx_t *tx;
char propname[100], txgname[100];
int error;
char *hc[2] = { "s.acl.h", ".s.open.h.hyLZlg" };
od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL);
ztest_od_init(od, id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0, 0);
if (ztest_object_init(zd, od, sizeof (ztest_od_t),
!ztest_random(2)) != 0)
goto out;
object = od->od_object;
/*
* Generate a known hash collision, and verify that
* we can lookup and remove both entries.
*/
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
if (txg == 0)
goto out;
for (i = 0; i < 2; i++) {
value[i] = i;
VERIFY0(zap_add(os, object, hc[i], sizeof (uint64_t),
1, &value[i], tx));
}
for (i = 0; i < 2; i++) {
VERIFY3U(EEXIST, ==, zap_add(os, object, hc[i],
sizeof (uint64_t), 1, &value[i], tx));
VERIFY0(
zap_length(os, object, hc[i], &zl_intsize, &zl_ints));
ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
ASSERT3U(zl_ints, ==, 1);
}
for (i = 0; i < 2; i++) {
VERIFY0(zap_remove(os, object, hc[i], tx));
}
dmu_tx_commit(tx);
/*
* Generate a bunch of random entries.
*/
ints = MAX(ZTEST_ZAP_MIN_INTS, object % ZTEST_ZAP_MAX_INTS);
prop = ztest_random(ZTEST_ZAP_MAX_PROPS);
- (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop);
- (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop);
+ (void) sprintf(propname, "prop_%"PRIu64"", prop);
+ (void) sprintf(txgname, "txg_%"PRIu64"", prop);
bzero(value, sizeof (value));
last_txg = 0;
/*
* If these zap entries already exist, validate their contents.
*/
error = zap_length(os, object, txgname, &zl_intsize, &zl_ints);
if (error == 0) {
ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
ASSERT3U(zl_ints, ==, 1);
VERIFY0(zap_lookup(os, object, txgname, zl_intsize,
zl_ints, &last_txg));
VERIFY0(zap_length(os, object, propname, &zl_intsize,
&zl_ints));
ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
ASSERT3U(zl_ints, ==, ints);
VERIFY0(zap_lookup(os, object, propname, zl_intsize,
zl_ints, value));
for (i = 0; i < ints; i++) {
ASSERT3U(value[i], ==, last_txg + object + i);
}
} else {
ASSERT3U(error, ==, ENOENT);
}
/*
* Atomically update two entries in our zap object.
* The first is named txg_%llu, and contains the txg
* in which the property was last updated. The second
* is named prop_%llu, and the nth element of its value
* should be txg + object + n.
*/
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
if (txg == 0)
goto out;
if (last_txg > txg)
- fatal(0, "zap future leak: old %llu new %llu", last_txg, txg);
+ fatal(B_FALSE, "zap future leak: old %"PRIu64" new %"PRIu64"",
+ last_txg, txg);
for (i = 0; i < ints; i++)
value[i] = txg + object + i;
VERIFY0(zap_update(os, object, txgname, sizeof (uint64_t),
1, &txg, tx));
VERIFY0(zap_update(os, object, propname, sizeof (uint64_t),
ints, value, tx));
dmu_tx_commit(tx);
/*
* Remove a random pair of entries.
*/
prop = ztest_random(ZTEST_ZAP_MAX_PROPS);
- (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop);
- (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop);
+ (void) sprintf(propname, "prop_%"PRIu64"", prop);
+ (void) sprintf(txgname, "txg_%"PRIu64"", prop);
error = zap_length(os, object, txgname, &zl_intsize, &zl_ints);
if (error == ENOENT)
goto out;
ASSERT0(error);
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
if (txg == 0)
goto out;
VERIFY0(zap_remove(os, object, txgname, tx));
VERIFY0(zap_remove(os, object, propname, tx));
dmu_tx_commit(tx);
out:
umem_free(od, sizeof (ztest_od_t));
}
/*
* Test case to test the upgrading of a microzap to fatzap.
*/
void
ztest_fzap(ztest_ds_t *zd, uint64_t id)
{
objset_t *os = zd->zd_os;
ztest_od_t *od;
- uint64_t object, txg;
- int i;
+ uint64_t object, txg, value;
od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL);
ztest_od_init(od, id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0, 0);
if (ztest_object_init(zd, od, sizeof (ztest_od_t),
!ztest_random(2)) != 0)
goto out;
object = od->od_object;
/*
* Add entries to this ZAP and make sure it spills over
* and gets upgraded to a fatzap. Also, since we are adding
* 2050 entries we should see ptrtbl growth and leaf-block split.
*/
- for (i = 0; i < 2050; i++) {
+ for (value = 0; value < 2050; value++) {
char name[ZFS_MAX_DATASET_NAME_LEN];
- uint64_t value = i;
dmu_tx_t *tx;
int error;
- (void) snprintf(name, sizeof (name), "fzap-%llu-%llu",
- (u_longlong_t)id, (u_longlong_t)value);
+ (void) snprintf(name, sizeof (name), "fzap-%"PRIu64"-%"PRIu64"",
+ id, value);
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, object, B_TRUE, name);
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
if (txg == 0)
goto out;
error = zap_add(os, object, name, sizeof (uint64_t), 1,
&value, tx);
ASSERT(error == 0 || error == EEXIST);
dmu_tx_commit(tx);
}
out:
umem_free(od, sizeof (ztest_od_t));
}
/* ARGSUSED */
void
ztest_zap_parallel(ztest_ds_t *zd, uint64_t id)
{
objset_t *os = zd->zd_os;
ztest_od_t *od;
uint64_t txg, object, count, wsize, wc, zl_wsize, zl_wc;
dmu_tx_t *tx;
int i, namelen, error;
int micro = ztest_random(2);
char name[20], string_value[20];
void *data;
od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL);
ztest_od_init(od, ID_PARALLEL, FTAG, micro, DMU_OT_ZAP_OTHER, 0, 0, 0);
if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0) {
umem_free(od, sizeof (ztest_od_t));
return;
}
object = od->od_object;
/*
* Generate a random name of the form 'xxx.....' where each
* x is a random printable character and the dots are dots.
* There are 94 such characters, and the name length goes from
* 6 to 20, so there are 94^3 * 15 = 12,458,760 possible names.
*/
namelen = ztest_random(sizeof (name) - 5) + 5 + 1;
for (i = 0; i < 3; i++)
name[i] = '!' + ztest_random('~' - '!' + 1);
for (; i < namelen - 1; i++)
name[i] = '.';
name[i] = '\0';
if ((namelen & 1) || micro) {
wsize = sizeof (txg);
wc = 1;
data = &txg;
} else {
wsize = 1;
wc = namelen;
data = string_value;
}
count = -1ULL;
VERIFY0(zap_count(os, object, &count));
ASSERT3S(count, !=, -1ULL);
/*
* Select an operation: length, lookup, add, update, remove.
*/
i = ztest_random(5);
if (i >= 2) {
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
if (txg == 0) {
umem_free(od, sizeof (ztest_od_t));
return;
}
bcopy(name, string_value, namelen);
} else {
tx = NULL;
txg = 0;
bzero(string_value, namelen);
}
switch (i) {
case 0:
error = zap_length(os, object, name, &zl_wsize, &zl_wc);
if (error == 0) {
ASSERT3U(wsize, ==, zl_wsize);
ASSERT3U(wc, ==, zl_wc);
} else {
ASSERT3U(error, ==, ENOENT);
}
break;
case 1:
error = zap_lookup(os, object, name, wsize, wc, data);
if (error == 0) {
if (data == string_value &&
bcmp(name, data, namelen) != 0)
- fatal(0, "name '%s' != val '%s' len %d",
- name, data, namelen);
+ fatal(B_FALSE, "name '%s' != val '%s' len %d",
+ name, (char *)data, namelen);
} else {
ASSERT3U(error, ==, ENOENT);
}
break;
case 2:
error = zap_add(os, object, name, wsize, wc, data, tx);
ASSERT(error == 0 || error == EEXIST);
break;
case 3:
VERIFY0(zap_update(os, object, name, wsize, wc, data, tx));
break;
case 4:
error = zap_remove(os, object, name, tx);
ASSERT(error == 0 || error == ENOENT);
break;
}
if (tx != NULL)
dmu_tx_commit(tx);
umem_free(od, sizeof (ztest_od_t));
}
/*
* Commit callback data.
*/
typedef struct ztest_cb_data {
list_node_t zcd_node;
uint64_t zcd_txg;
int zcd_expected_err;
boolean_t zcd_added;
boolean_t zcd_called;
spa_t *zcd_spa;
} ztest_cb_data_t;
/* This is the actual commit callback function */
static void
ztest_commit_callback(void *arg, int error)
{
ztest_cb_data_t *data = arg;
uint64_t synced_txg;
VERIFY3P(data, !=, NULL);
VERIFY3S(data->zcd_expected_err, ==, error);
VERIFY(!data->zcd_called);
synced_txg = spa_last_synced_txg(data->zcd_spa);
if (data->zcd_txg > synced_txg)
- fatal(0, "commit callback of txg %" PRIu64 " called prematurely"
- ", last synced txg = %" PRIu64 "\n", data->zcd_txg,
- synced_txg);
+ fatal(B_FALSE,
+ "commit callback of txg %"PRIu64" called prematurely, "
+ "last synced txg = %"PRIu64"\n",
+ data->zcd_txg, synced_txg);
data->zcd_called = B_TRUE;
if (error == ECANCELED) {
ASSERT0(data->zcd_txg);
ASSERT(!data->zcd_added);
/*
* The private callback data should be destroyed here, but
* since we are going to check the zcd_called field after
* dmu_tx_abort(), we will destroy it there.
*/
return;
}
ASSERT(data->zcd_added);
ASSERT3U(data->zcd_txg, !=, 0);
(void) mutex_enter(&zcl.zcl_callbacks_lock);
/* See if this cb was called more quickly */
if ((synced_txg - data->zcd_txg) < zc_min_txg_delay)
zc_min_txg_delay = synced_txg - data->zcd_txg;
/* Remove our callback from the list */
list_remove(&zcl.zcl_callbacks, data);
(void) mutex_exit(&zcl.zcl_callbacks_lock);
umem_free(data, sizeof (ztest_cb_data_t));
}
/* Allocate and initialize callback data structure */
static ztest_cb_data_t *
ztest_create_cb_data(objset_t *os, uint64_t txg)
{
ztest_cb_data_t *cb_data;
cb_data = umem_zalloc(sizeof (ztest_cb_data_t), UMEM_NOFAIL);
cb_data->zcd_txg = txg;
cb_data->zcd_spa = dmu_objset_spa(os);
list_link_init(&cb_data->zcd_node);
return (cb_data);
}
/*
* Commit callback test.
*/
void
ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id)
{
objset_t *os = zd->zd_os;
ztest_od_t *od;
dmu_tx_t *tx;
ztest_cb_data_t *cb_data[3], *tmp_cb;
uint64_t old_txg, txg;
int i, error = 0;
od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL);
ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0, 0);
if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0) {
umem_free(od, sizeof (ztest_od_t));
return;
}
tx = dmu_tx_create(os);
cb_data[0] = ztest_create_cb_data(os, 0);
dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[0]);
dmu_tx_hold_write(tx, od->od_object, 0, sizeof (uint64_t));
/* Every once in a while, abort the transaction on purpose */
if (ztest_random(100) == 0)
error = -1;
if (!error)
error = dmu_tx_assign(tx, TXG_NOWAIT);
txg = error ? 0 : dmu_tx_get_txg(tx);
cb_data[0]->zcd_txg = txg;
cb_data[1] = ztest_create_cb_data(os, txg);
dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[1]);
if (error) {
/*
* It's not a strict requirement to call the registered
* callbacks from inside dmu_tx_abort(), but that's what
* it's supposed to happen in the current implementation
* so we will check for that.
*/
for (i = 0; i < 2; i++) {
cb_data[i]->zcd_expected_err = ECANCELED;
VERIFY(!cb_data[i]->zcd_called);
}
dmu_tx_abort(tx);
for (i = 0; i < 2; i++) {
VERIFY(cb_data[i]->zcd_called);
umem_free(cb_data[i], sizeof (ztest_cb_data_t));
}
umem_free(od, sizeof (ztest_od_t));
return;
}
cb_data[2] = ztest_create_cb_data(os, txg);
dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[2]);
/*
* Read existing data to make sure there isn't a future leak.
*/
VERIFY0(dmu_read(os, od->od_object, 0, sizeof (uint64_t),
&old_txg, DMU_READ_PREFETCH));
if (old_txg > txg)
- fatal(0, "future leak: got %" PRIu64 ", open txg is %" PRIu64,
+ fatal(B_FALSE,
+ "future leak: got %"PRIu64", open txg is %"PRIu64"",
old_txg, txg);
dmu_write(os, od->od_object, 0, sizeof (uint64_t), &txg, tx);
(void) mutex_enter(&zcl.zcl_callbacks_lock);
/*
* Since commit callbacks don't have any ordering requirement and since
* it is theoretically possible for a commit callback to be called
* after an arbitrary amount of time has elapsed since its txg has been
* synced, it is difficult to reliably determine whether a commit
* callback hasn't been called due to high load or due to a flawed
* implementation.
*
* In practice, we will assume that if after a certain number of txgs a
* commit callback hasn't been called, then most likely there's an
* implementation bug..
*/
tmp_cb = list_head(&zcl.zcl_callbacks);
if (tmp_cb != NULL &&
tmp_cb->zcd_txg + ZTEST_COMMIT_CB_THRESH < txg) {
- fatal(0, "Commit callback threshold exceeded, oldest txg: %"
- PRIu64 ", open txg: %" PRIu64 "\n", tmp_cb->zcd_txg, txg);
+ fatal(B_FALSE,
+ "Commit callback threshold exceeded, "
+ "oldest txg: %"PRIu64", open txg: %"PRIu64"\n",
+ tmp_cb->zcd_txg, txg);
}
/*
* Let's find the place to insert our callbacks.
*
* Even though the list is ordered by txg, it is possible for the
* insertion point to not be the end because our txg may already be
* quiescing at this point and other callbacks in the open txg
* (from other objsets) may have sneaked in.
*/
tmp_cb = list_tail(&zcl.zcl_callbacks);
while (tmp_cb != NULL && tmp_cb->zcd_txg > txg)
tmp_cb = list_prev(&zcl.zcl_callbacks, tmp_cb);
/* Add the 3 callbacks to the list */
for (i = 0; i < 3; i++) {
if (tmp_cb == NULL)
list_insert_head(&zcl.zcl_callbacks, cb_data[i]);
else
list_insert_after(&zcl.zcl_callbacks, tmp_cb,
cb_data[i]);
cb_data[i]->zcd_added = B_TRUE;
VERIFY(!cb_data[i]->zcd_called);
tmp_cb = cb_data[i];
}
zc_cb_counter += 3;
(void) mutex_exit(&zcl.zcl_callbacks_lock);
dmu_tx_commit(tx);
umem_free(od, sizeof (ztest_od_t));
}
/*
* Visit each object in the dataset. Verify that its properties
* are consistent what was stored in the block tag when it was created,
* and that its unused bonus buffer space has not been overwritten.
*/
/* ARGSUSED */
void
ztest_verify_dnode_bt(ztest_ds_t *zd, uint64_t id)
{
objset_t *os = zd->zd_os;
uint64_t obj;
int err = 0;
for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE, 0)) {
ztest_block_tag_t *bt = NULL;
dmu_object_info_t doi;
dmu_buf_t *db;
ztest_object_lock(zd, obj, RL_READER);
if (dmu_bonus_hold(os, obj, FTAG, &db) != 0) {
ztest_object_unlock(zd, obj);
continue;
}
dmu_object_info_from_db(db, &doi);
if (doi.doi_bonus_size >= sizeof (*bt))
bt = ztest_bt_bonus(db);
if (bt && bt->bt_magic == BT_MAGIC) {
ztest_bt_verify(bt, os, obj, doi.doi_dnodesize,
bt->bt_offset, bt->bt_gen, bt->bt_txg,
bt->bt_crtxg);
ztest_verify_unused_bonus(db, bt, obj, os, bt->bt_gen);
}
dmu_buf_rele(db, FTAG);
ztest_object_unlock(zd, obj);
}
}
/* ARGSUSED */
void
ztest_dsl_prop_get_set(ztest_ds_t *zd, uint64_t id)
{
zfs_prop_t proplist[] = {
ZFS_PROP_CHECKSUM,
ZFS_PROP_COMPRESSION,
ZFS_PROP_COPIES,
ZFS_PROP_DEDUP
};
int p;
(void) pthread_rwlock_rdlock(&ztest_name_lock);
for (p = 0; p < sizeof (proplist) / sizeof (proplist[0]); p++)
(void) ztest_dsl_prop_set_uint64(zd->zd_name, proplist[p],
ztest_random_dsl_prop(proplist[p]), (int)ztest_random(2));
VERIFY0(ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_RECORDSIZE,
ztest_random_blocksize(), (int)ztest_random(2)));
(void) pthread_rwlock_unlock(&ztest_name_lock);
}
/* ARGSUSED */
void
ztest_spa_prop_get_set(ztest_ds_t *zd, uint64_t id)
{
nvlist_t *props = NULL;
(void) pthread_rwlock_rdlock(&ztest_name_lock);
(void) ztest_spa_prop_set_uint64(ZPOOL_PROP_AUTOTRIM, ztest_random(2));
VERIFY0(spa_prop_get(ztest_spa, &props));
if (ztest_opts.zo_verbose >= 6)
dump_nvlist(props, 4);
fnvlist_free(props);
(void) pthread_rwlock_unlock(&ztest_name_lock);
}
static int
user_release_one(const char *snapname, const char *holdname)
{
nvlist_t *snaps, *holds;
int error;
snaps = fnvlist_alloc();
holds = fnvlist_alloc();
fnvlist_add_boolean(holds, holdname);
fnvlist_add_nvlist(snaps, snapname, holds);
fnvlist_free(holds);
error = dsl_dataset_user_release(snaps, NULL);
fnvlist_free(snaps);
return (error);
}
/*
* Test snapshot hold/release and deferred destroy.
*/
void
ztest_dmu_snapshot_hold(ztest_ds_t *zd, uint64_t id)
{
int error;
objset_t *os = zd->zd_os;
objset_t *origin;
char snapname[100];
char fullname[100];
char clonename[100];
char tag[100];
char osname[ZFS_MAX_DATASET_NAME_LEN];
nvlist_t *holds;
(void) pthread_rwlock_rdlock(&ztest_name_lock);
dmu_objset_name(os, osname);
- (void) snprintf(snapname, sizeof (snapname), "sh1_%llu",
- (u_longlong_t)id);
+ (void) snprintf(snapname, sizeof (snapname), "sh1_%"PRIu64"", id);
(void) snprintf(fullname, sizeof (fullname), "%s@%s", osname, snapname);
- (void) snprintf(clonename, sizeof (clonename),
- "%s/ch1_%llu", osname, (u_longlong_t)id);
- (void) snprintf(tag, sizeof (tag), "tag_%llu", (u_longlong_t)id);
+ (void) snprintf(clonename, sizeof (clonename), "%s/ch1_%"PRIu64"",
+ osname, id);
+ (void) snprintf(tag, sizeof (tag), "tag_%"PRIu64"", id);
/*
* Clean up from any previous run.
*/
error = dsl_destroy_head(clonename);
if (error != ENOENT)
ASSERT0(error);
error = user_release_one(fullname, tag);
if (error != ESRCH && error != ENOENT)
ASSERT0(error);
error = dsl_destroy_snapshot(fullname, B_FALSE);
if (error != ENOENT)
ASSERT0(error);
/*
* Create snapshot, clone it, mark snap for deferred destroy,
* destroy clone, verify snap was also destroyed.
*/
error = dmu_objset_snapshot_one(osname, snapname);
if (error) {
if (error == ENOSPC) {
ztest_record_enospc("dmu_objset_snapshot");
goto out;
}
- fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error);
+ fatal(B_FALSE, "dmu_objset_snapshot(%s) = %d", fullname, error);
}
error = dmu_objset_clone(clonename, fullname);
if (error) {
if (error == ENOSPC) {
ztest_record_enospc("dmu_objset_clone");
goto out;
}
- fatal(0, "dmu_objset_clone(%s) = %d", clonename, error);
+ fatal(B_FALSE, "dmu_objset_clone(%s) = %d", clonename, error);
}
error = dsl_destroy_snapshot(fullname, B_TRUE);
if (error) {
- fatal(0, "dsl_destroy_snapshot(%s, B_TRUE) = %d",
+ fatal(B_FALSE, "dsl_destroy_snapshot(%s, B_TRUE) = %d",
fullname, error);
}
error = dsl_destroy_head(clonename);
if (error)
- fatal(0, "dsl_destroy_head(%s) = %d", clonename, error);
+ fatal(B_FALSE, "dsl_destroy_head(%s) = %d", clonename, error);
error = dmu_objset_hold(fullname, FTAG, &origin);
if (error != ENOENT)
- fatal(0, "dmu_objset_hold(%s) = %d", fullname, error);
+ fatal(B_FALSE, "dmu_objset_hold(%s) = %d", fullname, error);
/*
* Create snapshot, add temporary hold, verify that we can't
* destroy a held snapshot, mark for deferred destroy,
* release hold, verify snapshot was destroyed.
*/
error = dmu_objset_snapshot_one(osname, snapname);
if (error) {
if (error == ENOSPC) {
ztest_record_enospc("dmu_objset_snapshot");
goto out;
}
- fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error);
+ fatal(B_FALSE, "dmu_objset_snapshot(%s) = %d", fullname, error);
}
holds = fnvlist_alloc();
fnvlist_add_string(holds, fullname, tag);
error = dsl_dataset_user_hold(holds, 0, NULL);
fnvlist_free(holds);
if (error == ENOSPC) {
ztest_record_enospc("dsl_dataset_user_hold");
goto out;
} else if (error) {
- fatal(0, "dsl_dataset_user_hold(%s, %s) = %u",
+ fatal(B_FALSE, "dsl_dataset_user_hold(%s, %s) = %u",
fullname, tag, error);
}
error = dsl_destroy_snapshot(fullname, B_FALSE);
if (error != EBUSY) {
- fatal(0, "dsl_destroy_snapshot(%s, B_FALSE) = %d",
+ fatal(B_FALSE, "dsl_destroy_snapshot(%s, B_FALSE) = %d",
fullname, error);
}
error = dsl_destroy_snapshot(fullname, B_TRUE);
if (error) {
- fatal(0, "dsl_destroy_snapshot(%s, B_TRUE) = %d",
+ fatal(B_FALSE, "dsl_destroy_snapshot(%s, B_TRUE) = %d",
fullname, error);
}
error = user_release_one(fullname, tag);
if (error)
- fatal(0, "user_release_one(%s, %s) = %d", fullname, tag, error);
+ fatal(B_FALSE, "user_release_one(%s, %s) = %d",
+ fullname, tag, error);
VERIFY3U(dmu_objset_hold(fullname, FTAG, &origin), ==, ENOENT);
out:
(void) pthread_rwlock_unlock(&ztest_name_lock);
}
/*
* Inject random faults into the on-disk data.
*/
/* ARGSUSED */
void
ztest_fault_inject(ztest_ds_t *zd, uint64_t id)
{
ztest_shared_t *zs = ztest_shared;
spa_t *spa = ztest_spa;
int fd;
uint64_t offset;
uint64_t leaves;
uint64_t bad = 0x1990c0ffeedecadeull;
uint64_t top, leaf;
char *path0;
char *pathrand;
size_t fsize;
int bshift = SPA_MAXBLOCKSHIFT + 2;
int iters = 1000;
int maxfaults;
int mirror_save;
vdev_t *vd0 = NULL;
uint64_t guid0 = 0;
boolean_t islog = B_FALSE;
path0 = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
pathrand = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
mutex_enter(&ztest_vdev_lock);
/*
* Device removal is in progress, fault injection must be disabled
* until it completes and the pool is scrubbed. The fault injection
* strategy for damaging blocks does not take in to account evacuated
* blocks which may have already been damaged.
*/
if (ztest_device_removal_active) {
mutex_exit(&ztest_vdev_lock);
goto out;
}
maxfaults = MAXFAULTS(zs);
leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raid_children;
mirror_save = zs->zs_mirrors;
mutex_exit(&ztest_vdev_lock);
ASSERT3U(leaves, >=, 1);
/*
* While ztest is running the number of leaves will not change. This
* is critical for the fault injection logic as it determines where
* errors can be safely injected such that they are always repairable.
*
* When restarting ztest a different number of leaves may be requested
* which will shift the regions to be damaged. This is fine as long
* as the pool has been scrubbed prior to using the new mapping.
* Failure to do can result in non-repairable damage being injected.
*/
if (ztest_pool_scrubbed == B_FALSE)
goto out;
/*
* Grab the name lock as reader. There are some operations
* which don't like to have their vdevs changed while
* they are in progress (i.e. spa_change_guid). Those
* operations will have grabbed the name lock as writer.
*/
(void) pthread_rwlock_rdlock(&ztest_name_lock);
/*
* We need SCL_STATE here because we're going to look at vd0->vdev_tsd.
*/
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
if (ztest_random(2) == 0) {
/*
* Inject errors on a normal data device or slog device.
*/
top = ztest_random_vdev_top(spa, B_TRUE);
leaf = ztest_random(leaves) + zs->zs_splits;
/*
* Generate paths to the first leaf in this top-level vdev,
* and to the random leaf we selected. We'll induce transient
* write failures and random online/offline activity on leaf 0,
* and we'll write random garbage to the randomly chosen leaf.
*/
(void) snprintf(path0, MAXPATHLEN, ztest_dev_template,
ztest_opts.zo_dir, ztest_opts.zo_pool,
top * leaves + zs->zs_splits);
(void) snprintf(pathrand, MAXPATHLEN, ztest_dev_template,
ztest_opts.zo_dir, ztest_opts.zo_pool,
top * leaves + leaf);
vd0 = vdev_lookup_by_path(spa->spa_root_vdev, path0);
if (vd0 != NULL && vd0->vdev_top->vdev_islog)
islog = B_TRUE;
/*
* If the top-level vdev needs to be resilvered
* then we only allow faults on the device that is
* resilvering.
*/
if (vd0 != NULL && maxfaults != 1 &&
(!vdev_resilver_needed(vd0->vdev_top, NULL, NULL) ||
vd0->vdev_resilver_txg != 0)) {
/*
* Make vd0 explicitly claim to be unreadable,
* or unwritable, or reach behind its back
* and close the underlying fd. We can do this if
* maxfaults == 0 because we'll fail and reexecute,
* and we can do it if maxfaults >= 2 because we'll
* have enough redundancy. If maxfaults == 1, the
* combination of this with injection of random data
* corruption below exceeds the pool's fault tolerance.
*/
vdev_file_t *vf = vd0->vdev_tsd;
zfs_dbgmsg("injecting fault to vdev %llu; maxfaults=%d",
(long long)vd0->vdev_id, (int)maxfaults);
if (vf != NULL && ztest_random(3) == 0) {
(void) close(vf->vf_file->f_fd);
vf->vf_file->f_fd = -1;
} else if (ztest_random(2) == 0) {
vd0->vdev_cant_read = B_TRUE;
} else {
vd0->vdev_cant_write = B_TRUE;
}
guid0 = vd0->vdev_guid;
}
} else {
/*
* Inject errors on an l2cache device.
*/
spa_aux_vdev_t *sav = &spa->spa_l2cache;
if (sav->sav_count == 0) {
spa_config_exit(spa, SCL_STATE, FTAG);
(void) pthread_rwlock_unlock(&ztest_name_lock);
goto out;
}
vd0 = sav->sav_vdevs[ztest_random(sav->sav_count)];
guid0 = vd0->vdev_guid;
(void) strcpy(path0, vd0->vdev_path);
(void) strcpy(pathrand, vd0->vdev_path);
leaf = 0;
leaves = 1;
maxfaults = INT_MAX; /* no limit on cache devices */
}
spa_config_exit(spa, SCL_STATE, FTAG);
(void) pthread_rwlock_unlock(&ztest_name_lock);
/*
* If we can tolerate two or more faults, or we're dealing
* with a slog, randomly online/offline vd0.
*/
if ((maxfaults >= 2 || islog) && guid0 != 0) {
if (ztest_random(10) < 6) {
int flags = (ztest_random(2) == 0 ?
ZFS_OFFLINE_TEMPORARY : 0);
/*
* We have to grab the zs_name_lock as writer to
* prevent a race between offlining a slog and
* destroying a dataset. Offlining the slog will
* grab a reference on the dataset which may cause
* dsl_destroy_head() to fail with EBUSY thus
* leaving the dataset in an inconsistent state.
*/
if (islog)
(void) pthread_rwlock_wrlock(&ztest_name_lock);
VERIFY3U(vdev_offline(spa, guid0, flags), !=, EBUSY);
if (islog)
(void) pthread_rwlock_unlock(&ztest_name_lock);
} else {
/*
* Ideally we would like to be able to randomly
* call vdev_[on|off]line without holding locks
* to force unpredictable failures but the side
* effects of vdev_[on|off]line prevent us from
* doing so. We grab the ztest_vdev_lock here to
* prevent a race between injection testing and
* aux_vdev removal.
*/
mutex_enter(&ztest_vdev_lock);
(void) vdev_online(spa, guid0, 0, NULL);
mutex_exit(&ztest_vdev_lock);
}
}
if (maxfaults == 0)
goto out;
/*
* We have at least single-fault tolerance, so inject data corruption.
*/
fd = open(pathrand, O_RDWR);
if (fd == -1) /* we hit a gap in the device namespace */
goto out;
fsize = lseek(fd, 0, SEEK_END);
while (--iters != 0) {
/*
* The offset must be chosen carefully to ensure that
* we do not inject a given logical block with errors
* on two different leaf devices, because ZFS can not
* tolerate that (if maxfaults==1).
*
* To achieve this we divide each leaf device into
* chunks of size (# leaves * SPA_MAXBLOCKSIZE * 4).
* Each chunk is further divided into error-injection
* ranges (can accept errors) and clear ranges (we do
* not inject errors in those). Each error-injection
* range can accept errors only for a single leaf vdev.
* Error-injection ranges are separated by clear ranges.
*
* For example, with 3 leaves, each chunk looks like:
* 0 to 32M: injection range for leaf 0
* 32M to 64M: clear range - no injection allowed
* 64M to 96M: injection range for leaf 1
* 96M to 128M: clear range - no injection allowed
* 128M to 160M: injection range for leaf 2
* 160M to 192M: clear range - no injection allowed
*
* Each clear range must be large enough such that a
* single block cannot straddle it. This way a block
* can't be a target in two different injection ranges
* (on different leaf vdevs).
*/
offset = ztest_random(fsize / (leaves << bshift)) *
(leaves << bshift) + (leaf << bshift) +
(ztest_random(1ULL << (bshift - 1)) & -8ULL);
/*
* Only allow damage to the labels at one end of the vdev.
*
* If all labels are damaged, the device will be totally
* inaccessible, which will result in loss of data,
* because we also damage (parts of) the other side of
* the mirror/raidz.
*
* Additionally, we will always have both an even and an
* odd label, so that we can handle crashes in the
* middle of vdev_config_sync().
*/
if ((leaf & 1) == 0 && offset < VDEV_LABEL_START_SIZE)
continue;
/*
* The two end labels are stored at the "end" of the disk, but
* the end of the disk (vdev_psize) is aligned to
* sizeof (vdev_label_t).
*/
uint64_t psize = P2ALIGN(fsize, sizeof (vdev_label_t));
if ((leaf & 1) == 1 &&
offset + sizeof (bad) > psize - VDEV_LABEL_END_SIZE)
continue;
mutex_enter(&ztest_vdev_lock);
if (mirror_save != zs->zs_mirrors) {
mutex_exit(&ztest_vdev_lock);
(void) close(fd);
goto out;
}
if (pwrite(fd, &bad, sizeof (bad), offset) != sizeof (bad))
- fatal(1, "can't inject bad word at 0x%llx in %s",
+ fatal(B_TRUE,
+ "can't inject bad word at 0x%"PRIx64" in %s",
offset, pathrand);
mutex_exit(&ztest_vdev_lock);
if (ztest_opts.zo_verbose >= 7)
(void) printf("injected bad word into %s,"
- " offset 0x%llx\n", pathrand, (u_longlong_t)offset);
+ " offset 0x%"PRIx64"\n", pathrand, offset);
}
(void) close(fd);
out:
umem_free(path0, MAXPATHLEN);
umem_free(pathrand, MAXPATHLEN);
}
/*
* By design ztest will never inject uncorrectable damage in to the pool.
* Issue a scrub, wait for it to complete, and verify there is never any
* persistent damage.
*
* Only after a full scrub has been completed is it safe to start injecting
* data corruption. See the comment in zfs_fault_inject().
*/
static int
ztest_scrub_impl(spa_t *spa)
{
int error = spa_scan(spa, POOL_SCAN_SCRUB);
if (error)
return (error);
while (dsl_scan_scrubbing(spa_get_dsl(spa)))
txg_wait_synced(spa_get_dsl(spa), 0);
if (spa_get_errlog_size(spa) > 0)
return (ECKSUM);
ztest_pool_scrubbed = B_TRUE;
return (0);
}
/*
* Scrub the pool.
*/
/* ARGSUSED */
void
ztest_scrub(ztest_ds_t *zd, uint64_t id)
{
spa_t *spa = ztest_spa;
int error;
/*
* Scrub in progress by device removal.
*/
if (ztest_device_removal_active)
return;
/*
* Start a scrub, wait a moment, then force a restart.
*/
(void) spa_scan(spa, POOL_SCAN_SCRUB);
(void) poll(NULL, 0, 100);
error = ztest_scrub_impl(spa);
if (error == EBUSY)
error = 0;
ASSERT0(error);
}
/*
* Change the guid for the pool.
*/
/* ARGSUSED */
void
ztest_reguid(ztest_ds_t *zd, uint64_t id)
{
spa_t *spa = ztest_spa;
uint64_t orig, load;
int error;
if (ztest_opts.zo_mmp_test)
return;
orig = spa_guid(spa);
load = spa_load_guid(spa);
(void) pthread_rwlock_wrlock(&ztest_name_lock);
error = spa_change_guid(spa);
(void) pthread_rwlock_unlock(&ztest_name_lock);
if (error != 0)
return;
if (ztest_opts.zo_verbose >= 4) {
- (void) printf("Changed guid old %llu -> %llu\n",
- (u_longlong_t)orig, (u_longlong_t)spa_guid(spa));
+ (void) printf("Changed guid old %"PRIu64" -> %"PRIu64"\n",
+ orig, spa_guid(spa));
}
VERIFY3U(orig, !=, spa_guid(spa));
VERIFY3U(load, ==, spa_load_guid(spa));
}
void
ztest_fletcher(ztest_ds_t *zd, uint64_t id)
{
hrtime_t end = gethrtime() + NANOSEC;
while (gethrtime() <= end) {
int run_count = 100;
void *buf;
struct abd *abd_data, *abd_meta;
uint32_t size;
int *ptr;
int i;
zio_cksum_t zc_ref;
zio_cksum_t zc_ref_byteswap;
size = ztest_random_blocksize();
buf = umem_alloc(size, UMEM_NOFAIL);
abd_data = abd_alloc(size, B_FALSE);
abd_meta = abd_alloc(size, B_TRUE);
for (i = 0, ptr = buf; i < size / sizeof (*ptr); i++, ptr++)
*ptr = ztest_random(UINT_MAX);
abd_copy_from_buf_off(abd_data, buf, 0, size);
abd_copy_from_buf_off(abd_meta, buf, 0, size);
VERIFY0(fletcher_4_impl_set("scalar"));
fletcher_4_native(buf, size, NULL, &zc_ref);
fletcher_4_byteswap(buf, size, NULL, &zc_ref_byteswap);
VERIFY0(fletcher_4_impl_set("cycle"));
while (run_count-- > 0) {
zio_cksum_t zc;
zio_cksum_t zc_byteswap;
fletcher_4_byteswap(buf, size, NULL, &zc_byteswap);
fletcher_4_native(buf, size, NULL, &zc);
VERIFY0(bcmp(&zc, &zc_ref, sizeof (zc)));
VERIFY0(bcmp(&zc_byteswap, &zc_ref_byteswap,
sizeof (zc_byteswap)));
/* Test ABD - data */
abd_fletcher_4_byteswap(abd_data, size, NULL,
&zc_byteswap);
abd_fletcher_4_native(abd_data, size, NULL, &zc);
VERIFY0(bcmp(&zc, &zc_ref, sizeof (zc)));
VERIFY0(bcmp(&zc_byteswap, &zc_ref_byteswap,
sizeof (zc_byteswap)));
/* Test ABD - metadata */
abd_fletcher_4_byteswap(abd_meta, size, NULL,
&zc_byteswap);
abd_fletcher_4_native(abd_meta, size, NULL, &zc);
VERIFY0(bcmp(&zc, &zc_ref, sizeof (zc)));
VERIFY0(bcmp(&zc_byteswap, &zc_ref_byteswap,
sizeof (zc_byteswap)));
}
umem_free(buf, size);
abd_free(abd_data);
abd_free(abd_meta);
}
}
void
ztest_fletcher_incr(ztest_ds_t *zd, uint64_t id)
{
void *buf;
size_t size;
int *ptr;
int i;
zio_cksum_t zc_ref;
zio_cksum_t zc_ref_bswap;
hrtime_t end = gethrtime() + NANOSEC;
while (gethrtime() <= end) {
int run_count = 100;
size = ztest_random_blocksize();
buf = umem_alloc(size, UMEM_NOFAIL);
for (i = 0, ptr = buf; i < size / sizeof (*ptr); i++, ptr++)
*ptr = ztest_random(UINT_MAX);
VERIFY0(fletcher_4_impl_set("scalar"));
fletcher_4_native(buf, size, NULL, &zc_ref);
fletcher_4_byteswap(buf, size, NULL, &zc_ref_bswap);
VERIFY0(fletcher_4_impl_set("cycle"));
while (run_count-- > 0) {
zio_cksum_t zc;
zio_cksum_t zc_bswap;
size_t pos = 0;
ZIO_SET_CHECKSUM(&zc, 0, 0, 0, 0);
ZIO_SET_CHECKSUM(&zc_bswap, 0, 0, 0, 0);
while (pos < size) {
size_t inc = 64 * ztest_random(size / 67);
/* sometimes add few bytes to test non-simd */
if (ztest_random(100) < 10)
inc += P2ALIGN(ztest_random(64),
sizeof (uint32_t));
if (inc > (size - pos))
inc = size - pos;
fletcher_4_incremental_native(buf + pos, inc,
&zc);
fletcher_4_incremental_byteswap(buf + pos, inc,
&zc_bswap);
pos += inc;
}
VERIFY3U(pos, ==, size);
VERIFY(ZIO_CHECKSUM_EQUAL(zc, zc_ref));
VERIFY(ZIO_CHECKSUM_EQUAL(zc_bswap, zc_ref_bswap));
/*
* verify if incremental on the whole buffer is
* equivalent to non-incremental version
*/
ZIO_SET_CHECKSUM(&zc, 0, 0, 0, 0);
ZIO_SET_CHECKSUM(&zc_bswap, 0, 0, 0, 0);
fletcher_4_incremental_native(buf, size, &zc);
fletcher_4_incremental_byteswap(buf, size, &zc_bswap);
VERIFY(ZIO_CHECKSUM_EQUAL(zc, zc_ref));
VERIFY(ZIO_CHECKSUM_EQUAL(zc_bswap, zc_ref_bswap));
}
umem_free(buf, size);
}
}
static int
ztest_set_global_vars(void)
{
for (size_t i = 0; i < ztest_opts.zo_gvars_count; i++) {
char *kv = ztest_opts.zo_gvars[i];
VERIFY3U(strlen(kv), <=, ZO_GVARS_MAX_ARGLEN);
VERIFY3U(strlen(kv), >, 0);
int err = set_global_var(kv);
if (ztest_opts.zo_verbose > 0) {
(void) printf("setting global var %s ... %s\n", kv,
err ? "failed" : "ok");
}
if (err != 0) {
(void) fprintf(stderr,
"failed to set global var '%s'\n", kv);
return (err);
}
}
return (0);
}
static char **
ztest_global_vars_to_zdb_args(void)
{
char **args = calloc(2*ztest_opts.zo_gvars_count + 1, sizeof (char *));
char **cur = args;
for (size_t i = 0; i < ztest_opts.zo_gvars_count; i++) {
char *kv = ztest_opts.zo_gvars[i];
*cur = "-o";
cur++;
*cur = strdup(kv);
cur++;
}
ASSERT3P(cur, ==, &args[2*ztest_opts.zo_gvars_count]);
*cur = NULL;
return (args);
}
/* The end of strings is indicated by a NULL element */
static char *
join_strings(char **strings, const char *sep)
{
size_t totallen = 0;
for (char **sp = strings; *sp != NULL; sp++) {
totallen += strlen(*sp);
totallen += strlen(sep);
}
if (totallen > 0) {
ASSERT(totallen >= strlen(sep));
totallen -= strlen(sep);
}
size_t buflen = totallen + 1;
char *o = malloc(buflen); /* trailing 0 byte */
o[0] = '\0';
for (char **sp = strings; *sp != NULL; sp++) {
size_t would;
would = strlcat(o, *sp, buflen);
VERIFY3U(would, <, buflen);
if (*(sp+1) == NULL) {
break;
}
would = strlcat(o, sep, buflen);
VERIFY3U(would, <, buflen);
}
ASSERT3S(strlen(o), ==, totallen);
return (o);
}
static int
ztest_check_path(char *path)
{
struct stat s;
/* return true on success */
return (!stat(path, &s));
}
static void
ztest_get_zdb_bin(char *bin, int len)
{
char *zdb_path;
/*
* Try to use ZDB_PATH and in-tree zdb path. If not successful, just
* let popen to search through PATH.
*/
if ((zdb_path = getenv("ZDB_PATH"))) {
strlcpy(bin, zdb_path, len); /* In env */
if (!ztest_check_path(bin)) {
ztest_dump_core = 0;
- fatal(1, "invalid ZDB_PATH '%s'", bin);
+ fatal(B_TRUE, "invalid ZDB_PATH '%s'", bin);
}
return;
}
VERIFY3P(realpath(getexecname(), bin), !=, NULL);
if (strstr(bin, "/ztest/")) {
strstr(bin, "/ztest/")[0] = '\0'; /* In-tree */
strcat(bin, "/zdb/zdb");
if (ztest_check_path(bin))
return;
}
strcpy(bin, "zdb");
}
static vdev_t *
ztest_random_concrete_vdev_leaf(vdev_t *vd)
{
if (vd == NULL)
return (NULL);
if (vd->vdev_children == 0)
return (vd);
vdev_t *eligible[vd->vdev_children];
int eligible_idx = 0, i;
for (i = 0; i < vd->vdev_children; i++) {
vdev_t *cvd = vd->vdev_child[i];
if (cvd->vdev_top->vdev_removing)
continue;
if (cvd->vdev_children > 0 ||
(vdev_is_concrete(cvd) && !cvd->vdev_detached)) {
eligible[eligible_idx++] = cvd;
}
}
VERIFY3S(eligible_idx, >, 0);
uint64_t child_no = ztest_random(eligible_idx);
return (ztest_random_concrete_vdev_leaf(eligible[child_no]));
}
/* ARGSUSED */
void
ztest_initialize(ztest_ds_t *zd, uint64_t id)
{
spa_t *spa = ztest_spa;
int error = 0;
mutex_enter(&ztest_vdev_lock);
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
/* Random leaf vdev */
vdev_t *rand_vd = ztest_random_concrete_vdev_leaf(spa->spa_root_vdev);
if (rand_vd == NULL) {
spa_config_exit(spa, SCL_VDEV, FTAG);
mutex_exit(&ztest_vdev_lock);
return;
}
/*
* The random vdev we've selected may change as soon as we
* drop the spa_config_lock. We create local copies of things
* we're interested in.
*/
uint64_t guid = rand_vd->vdev_guid;
char *path = strdup(rand_vd->vdev_path);
boolean_t active = rand_vd->vdev_initialize_thread != NULL;
zfs_dbgmsg("vd %px, guid %llu", rand_vd, (u_longlong_t)guid);
spa_config_exit(spa, SCL_VDEV, FTAG);
uint64_t cmd = ztest_random(POOL_INITIALIZE_FUNCS);
nvlist_t *vdev_guids = fnvlist_alloc();
nvlist_t *vdev_errlist = fnvlist_alloc();
fnvlist_add_uint64(vdev_guids, path, guid);
error = spa_vdev_initialize(spa, vdev_guids, cmd, vdev_errlist);
fnvlist_free(vdev_guids);
fnvlist_free(vdev_errlist);
switch (cmd) {
case POOL_INITIALIZE_CANCEL:
if (ztest_opts.zo_verbose >= 4) {
(void) printf("Cancel initialize %s", path);
if (!active)
(void) printf(" failed (no initialize active)");
(void) printf("\n");
}
break;
case POOL_INITIALIZE_START:
if (ztest_opts.zo_verbose >= 4) {
(void) printf("Start initialize %s", path);
if (active && error == 0)
(void) printf(" failed (already active)");
else if (error != 0)
(void) printf(" failed (error %d)", error);
(void) printf("\n");
}
break;
case POOL_INITIALIZE_SUSPEND:
if (ztest_opts.zo_verbose >= 4) {
(void) printf("Suspend initialize %s", path);
if (!active)
(void) printf(" failed (no initialize active)");
(void) printf("\n");
}
break;
}
free(path);
mutex_exit(&ztest_vdev_lock);
}
/* ARGSUSED */
void
ztest_trim(ztest_ds_t *zd, uint64_t id)
{
spa_t *spa = ztest_spa;
int error = 0;
mutex_enter(&ztest_vdev_lock);
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
/* Random leaf vdev */
vdev_t *rand_vd = ztest_random_concrete_vdev_leaf(spa->spa_root_vdev);
if (rand_vd == NULL) {
spa_config_exit(spa, SCL_VDEV, FTAG);
mutex_exit(&ztest_vdev_lock);
return;
}
/*
* The random vdev we've selected may change as soon as we
* drop the spa_config_lock. We create local copies of things
* we're interested in.
*/
uint64_t guid = rand_vd->vdev_guid;
char *path = strdup(rand_vd->vdev_path);
boolean_t active = rand_vd->vdev_trim_thread != NULL;
zfs_dbgmsg("vd %p, guid %llu", rand_vd, (u_longlong_t)guid);
spa_config_exit(spa, SCL_VDEV, FTAG);
uint64_t cmd = ztest_random(POOL_TRIM_FUNCS);
uint64_t rate = 1 << ztest_random(30);
boolean_t partial = (ztest_random(5) > 0);
boolean_t secure = (ztest_random(5) > 0);
nvlist_t *vdev_guids = fnvlist_alloc();
nvlist_t *vdev_errlist = fnvlist_alloc();
fnvlist_add_uint64(vdev_guids, path, guid);
error = spa_vdev_trim(spa, vdev_guids, cmd, rate, partial,
secure, vdev_errlist);
fnvlist_free(vdev_guids);
fnvlist_free(vdev_errlist);
switch (cmd) {
case POOL_TRIM_CANCEL:
if (ztest_opts.zo_verbose >= 4) {
(void) printf("Cancel TRIM %s", path);
if (!active)
(void) printf(" failed (no TRIM active)");
(void) printf("\n");
}
break;
case POOL_TRIM_START:
if (ztest_opts.zo_verbose >= 4) {
(void) printf("Start TRIM %s", path);
if (active && error == 0)
(void) printf(" failed (already active)");
else if (error != 0)
(void) printf(" failed (error %d)", error);
(void) printf("\n");
}
break;
case POOL_TRIM_SUSPEND:
if (ztest_opts.zo_verbose >= 4) {
(void) printf("Suspend TRIM %s", path);
if (!active)
(void) printf(" failed (no TRIM active)");
(void) printf("\n");
}
break;
}
free(path);
mutex_exit(&ztest_vdev_lock);
}
/*
* Verify pool integrity by running zdb.
*/
static void
ztest_run_zdb(char *pool)
{
int status;
char *bin;
char *zdb;
char *zbuf;
const int len = MAXPATHLEN + MAXNAMELEN + 20;
FILE *fp;
bin = umem_alloc(len, UMEM_NOFAIL);
zdb = umem_alloc(len, UMEM_NOFAIL);
zbuf = umem_alloc(1024, UMEM_NOFAIL);
ztest_get_zdb_bin(bin, len);
char **set_gvars_args = ztest_global_vars_to_zdb_args();
char *set_gvars_args_joined = join_strings(set_gvars_args, " ");
free(set_gvars_args);
size_t would = snprintf(zdb, len,
"%s -bcc%s%s -G -d -Y -e -y %s -p %s %s",
bin,
ztest_opts.zo_verbose >= 3 ? "s" : "",
ztest_opts.zo_verbose >= 4 ? "v" : "",
set_gvars_args_joined,
ztest_opts.zo_dir,
pool);
ASSERT3U(would, <, len);
free(set_gvars_args_joined);
if (ztest_opts.zo_verbose >= 5)
(void) printf("Executing %s\n", strstr(zdb, "zdb "));
fp = popen(zdb, "r");
while (fgets(zbuf, 1024, fp) != NULL)
if (ztest_opts.zo_verbose >= 3)
(void) printf("%s", zbuf);
status = pclose(fp);
if (status == 0)
goto out;
ztest_dump_core = 0;
if (WIFEXITED(status))
- fatal(0, "'%s' exit code %d", zdb, WEXITSTATUS(status));
+ fatal(B_FALSE, "'%s' exit code %d", zdb, WEXITSTATUS(status));
else
- fatal(0, "'%s' died with signal %d", zdb, WTERMSIG(status));
+ fatal(B_FALSE, "'%s' died with signal %d",
+ zdb, WTERMSIG(status));
out:
umem_free(bin, len);
umem_free(zdb, len);
umem_free(zbuf, 1024);
}
static void
ztest_walk_pool_directory(char *header)
{
spa_t *spa = NULL;
if (ztest_opts.zo_verbose >= 6)
(void) printf("%s\n", header);
mutex_enter(&spa_namespace_lock);
while ((spa = spa_next(spa)) != NULL)
if (ztest_opts.zo_verbose >= 6)
(void) printf("\t%s\n", spa_name(spa));
mutex_exit(&spa_namespace_lock);
}
static void
ztest_spa_import_export(char *oldname, char *newname)
{
nvlist_t *config, *newconfig;
uint64_t pool_guid;
spa_t *spa;
int error;
if (ztest_opts.zo_verbose >= 4) {
(void) printf("import/export: old = %s, new = %s\n",
oldname, newname);
}
/*
* Clean up from previous runs.
*/
(void) spa_destroy(newname);
/*
* Get the pool's configuration and guid.
*/
VERIFY0(spa_open(oldname, &spa, FTAG));
/*
* Kick off a scrub to tickle scrub/export races.
*/
if (ztest_random(2) == 0)
(void) spa_scan(spa, POOL_SCAN_SCRUB);
pool_guid = spa_guid(spa);
spa_close(spa, FTAG);
ztest_walk_pool_directory("pools before export");
/*
* Export it.
*/
VERIFY0(spa_export(oldname, &config, B_FALSE, B_FALSE));
ztest_walk_pool_directory("pools after export");
/*
* Try to import it.
*/
newconfig = spa_tryimport(config);
ASSERT3P(newconfig, !=, NULL);
fnvlist_free(newconfig);
/*
* Import it under the new name.
*/
error = spa_import(newname, config, NULL, 0);
if (error != 0) {
dump_nvlist(config, 0);
fatal(B_FALSE, "couldn't import pool %s as %s: error %u",
oldname, newname, error);
}
ztest_walk_pool_directory("pools after import");
/*
* Try to import it again -- should fail with EEXIST.
*/
VERIFY3U(EEXIST, ==, spa_import(newname, config, NULL, 0));
/*
* Try to import it under a different name -- should fail with EEXIST.
*/
VERIFY3U(EEXIST, ==, spa_import(oldname, config, NULL, 0));
/*
* Verify that the pool is no longer visible under the old name.
*/
VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG));
/*
* Verify that we can open and close the pool using the new name.
*/
VERIFY0(spa_open(newname, &spa, FTAG));
ASSERT3U(pool_guid, ==, spa_guid(spa));
spa_close(spa, FTAG);
fnvlist_free(config);
}
static void
ztest_resume(spa_t *spa)
{
if (spa_suspended(spa) && ztest_opts.zo_verbose >= 6)
(void) printf("resuming from suspended state\n");
spa_vdev_state_enter(spa, SCL_NONE);
vdev_clear(spa, NULL);
(void) spa_vdev_state_exit(spa, NULL, 0);
(void) zio_resume(spa);
}
static void
ztest_resume_thread(void *arg)
{
spa_t *spa = arg;
while (!ztest_exiting) {
if (spa_suspended(spa))
ztest_resume(spa);
(void) poll(NULL, 0, 100);
/*
* Periodically change the zfs_compressed_arc_enabled setting.
*/
if (ztest_random(10) == 0)
zfs_compressed_arc_enabled = ztest_random(2);
/*
* Periodically change the zfs_abd_scatter_enabled setting.
*/
if (ztest_random(10) == 0)
zfs_abd_scatter_enabled = ztest_random(2);
}
thread_exit();
}
static void
ztest_deadman_thread(void *arg)
{
ztest_shared_t *zs = arg;
spa_t *spa = ztest_spa;
hrtime_t delay, overdue, last_run = gethrtime();
delay = (zs->zs_thread_stop - zs->zs_thread_start) +
MSEC2NSEC(zfs_deadman_synctime_ms);
while (!ztest_exiting) {
/*
* Wait for the delay timer while checking occasionally
* if we should stop.
*/
if (gethrtime() < last_run + delay) {
(void) poll(NULL, 0, 1000);
continue;
}
/*
* If the pool is suspended then fail immediately. Otherwise,
* check to see if the pool is making any progress. If
* vdev_deadman() discovers that there hasn't been any recent
* I/Os then it will end up aborting the tests.
*/
if (spa_suspended(spa) || spa->spa_root_vdev == NULL) {
- fatal(0, "aborting test after %llu seconds because "
+ fatal(B_FALSE,
+ "aborting test after %lu seconds because "
"pool has transitioned to a suspended state.",
zfs_deadman_synctime_ms / 1000);
}
vdev_deadman(spa->spa_root_vdev, FTAG);
/*
* If the process doesn't complete within a grace period of
* zfs_deadman_synctime_ms over the expected finish time,
* then it may be hung and is terminated.
*/
overdue = zs->zs_proc_stop + MSEC2NSEC(zfs_deadman_synctime_ms);
if (gethrtime() > overdue) {
- fatal(0, "aborting test after %llu seconds because "
+ fatal(B_FALSE,
+ "aborting test after %llu seconds because "
"the process is overdue for termination.",
(gethrtime() - zs->zs_proc_start) / NANOSEC);
}
(void) printf("ztest has been running for %lld seconds\n",
(gethrtime() - zs->zs_proc_start) / NANOSEC);
last_run = gethrtime();
delay = MSEC2NSEC(zfs_deadman_checktime_ms);
}
thread_exit();
}
static void
ztest_execute(int test, ztest_info_t *zi, uint64_t id)
{
ztest_ds_t *zd = &ztest_ds[id % ztest_opts.zo_datasets];
ztest_shared_callstate_t *zc = ZTEST_GET_SHARED_CALLSTATE(test);
hrtime_t functime = gethrtime();
int i;
for (i = 0; i < zi->zi_iters; i++)
zi->zi_func(zd, id);
functime = gethrtime() - functime;
atomic_add_64(&zc->zc_count, 1);
atomic_add_64(&zc->zc_time, functime);
if (ztest_opts.zo_verbose >= 4)
(void) printf("%6.2f sec in %s\n",
(double)functime / NANOSEC, zi->zi_funcname);
}
static void
ztest_thread(void *arg)
{
int rand;
uint64_t id = (uintptr_t)arg;
ztest_shared_t *zs = ztest_shared;
uint64_t call_next;
hrtime_t now;
ztest_info_t *zi;
ztest_shared_callstate_t *zc;
while ((now = gethrtime()) < zs->zs_thread_stop) {
/*
* See if it's time to force a crash.
*/
if (now > zs->zs_thread_kill)
ztest_kill(zs);
/*
* If we're getting ENOSPC with some regularity, stop.
*/
if (zs->zs_enospc_count > 10)
break;
/*
* Pick a random function to execute.
*/
rand = ztest_random(ZTEST_FUNCS);
zi = &ztest_info[rand];
zc = ZTEST_GET_SHARED_CALLSTATE(rand);
call_next = zc->zc_next;
if (now >= call_next &&
atomic_cas_64(&zc->zc_next, call_next, call_next +
ztest_random(2 * zi->zi_interval[0] + 1)) == call_next) {
ztest_execute(rand, zi, id);
}
}
thread_exit();
}
static void
ztest_dataset_name(char *dsname, char *pool, int d)
{
(void) snprintf(dsname, ZFS_MAX_DATASET_NAME_LEN, "%s/ds_%d", pool, d);
}
static void
ztest_dataset_destroy(int d)
{
char name[ZFS_MAX_DATASET_NAME_LEN];
int t;
ztest_dataset_name(name, ztest_opts.zo_pool, d);
if (ztest_opts.zo_verbose >= 3)
(void) printf("Destroying %s to free up space\n", name);
/*
* Cleanup any non-standard clones and snapshots. In general,
* ztest thread t operates on dataset (t % zopt_datasets),
* so there may be more than one thing to clean up.
*/
for (t = d; t < ztest_opts.zo_threads;
t += ztest_opts.zo_datasets)
ztest_dsl_dataset_cleanup(name, t);
(void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL,
DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN);
}
static void
ztest_dataset_dirobj_verify(ztest_ds_t *zd)
{
uint64_t usedobjs, dirobjs, scratch;
/*
* ZTEST_DIROBJ is the object directory for the entire dataset.
* Therefore, the number of objects in use should equal the
* number of ZTEST_DIROBJ entries, +1 for ZTEST_DIROBJ itself.
* If not, we have an object leak.
*
* Note that we can only check this in ztest_dataset_open(),
* when the open-context and syncing-context values agree.
* That's because zap_count() returns the open-context value,
* while dmu_objset_space() returns the rootbp fill count.
*/
VERIFY0(zap_count(zd->zd_os, ZTEST_DIROBJ, &dirobjs));
dmu_objset_space(zd->zd_os, &scratch, &scratch, &usedobjs, &scratch);
ASSERT3U(dirobjs + 1, ==, usedobjs);
}
static int
ztest_dataset_open(int d)
{
ztest_ds_t *zd = &ztest_ds[d];
uint64_t committed_seq = ZTEST_GET_SHARED_DS(d)->zd_seq;
objset_t *os;
zilog_t *zilog;
char name[ZFS_MAX_DATASET_NAME_LEN];
int error;
ztest_dataset_name(name, ztest_opts.zo_pool, d);
(void) pthread_rwlock_rdlock(&ztest_name_lock);
error = ztest_dataset_create(name);
if (error == ENOSPC) {
(void) pthread_rwlock_unlock(&ztest_name_lock);
ztest_record_enospc(FTAG);
return (error);
}
ASSERT(error == 0 || error == EEXIST);
VERIFY0(ztest_dmu_objset_own(name, DMU_OST_OTHER, B_FALSE,
B_TRUE, zd, &os));
(void) pthread_rwlock_unlock(&ztest_name_lock);
ztest_zd_init(zd, ZTEST_GET_SHARED_DS(d), os);
zilog = zd->zd_zilog;
if (zilog->zl_header->zh_claim_lr_seq != 0 &&
zilog->zl_header->zh_claim_lr_seq < committed_seq)
- fatal(0, "missing log records: claimed %llu < committed %llu",
+ fatal(B_FALSE, "missing log records: "
+ "claimed %"PRIu64" < committed %"PRIu64"",
zilog->zl_header->zh_claim_lr_seq, committed_seq);
ztest_dataset_dirobj_verify(zd);
zil_replay(os, zd, ztest_replay_vector);
ztest_dataset_dirobj_verify(zd);
if (ztest_opts.zo_verbose >= 6)
- (void) printf("%s replay %llu blocks, %llu records, seq %llu\n",
+ (void) printf("%s replay %"PRIu64" blocks, "
+ "%"PRIu64" records, seq %"PRIu64"\n",
zd->zd_name,
- (u_longlong_t)zilog->zl_parse_blk_count,
- (u_longlong_t)zilog->zl_parse_lr_count,
- (u_longlong_t)zilog->zl_replaying_seq);
+ zilog->zl_parse_blk_count,
+ zilog->zl_parse_lr_count,
+ zilog->zl_replaying_seq);
zilog = zil_open(os, ztest_get_data);
if (zilog->zl_replaying_seq != 0 &&
zilog->zl_replaying_seq < committed_seq)
- fatal(0, "missing log records: replayed %llu < committed %llu",
+ fatal(B_FALSE, "missing log records: "
+ "replayed %"PRIu64" < committed %"PRIu64"",
zilog->zl_replaying_seq, committed_seq);
return (0);
}
static void
ztest_dataset_close(int d)
{
ztest_ds_t *zd = &ztest_ds[d];
zil_close(zd->zd_zilog);
dmu_objset_disown(zd->zd_os, B_TRUE, zd);
ztest_zd_fini(zd);
}
/* ARGSUSED */
static int
ztest_replay_zil_cb(const char *name, void *arg)
{
objset_t *os;
ztest_ds_t *zdtmp;
VERIFY0(ztest_dmu_objset_own(name, DMU_OST_ANY, B_TRUE,
B_TRUE, FTAG, &os));
zdtmp = umem_alloc(sizeof (ztest_ds_t), UMEM_NOFAIL);
ztest_zd_init(zdtmp, NULL, os);
zil_replay(os, zdtmp, ztest_replay_vector);
ztest_zd_fini(zdtmp);
if (dmu_objset_zil(os)->zl_parse_lr_count != 0 &&
ztest_opts.zo_verbose >= 6) {
zilog_t *zilog = dmu_objset_zil(os);
- (void) printf("%s replay %llu blocks, %llu records, seq %llu\n",
+ (void) printf("%s replay %"PRIu64" blocks, "
+ "%"PRIu64" records, seq %"PRIu64"\n",
name,
- (u_longlong_t)zilog->zl_parse_blk_count,
- (u_longlong_t)zilog->zl_parse_lr_count,
- (u_longlong_t)zilog->zl_replaying_seq);
+ zilog->zl_parse_blk_count,
+ zilog->zl_parse_lr_count,
+ zilog->zl_replaying_seq);
}
umem_free(zdtmp, sizeof (ztest_ds_t));
dmu_objset_disown(os, B_TRUE, FTAG);
return (0);
}
static void
ztest_freeze(void)
{
ztest_ds_t *zd = &ztest_ds[0];
spa_t *spa;
int numloops = 0;
if (ztest_opts.zo_verbose >= 3)
(void) printf("testing spa_freeze()...\n");
kernel_init(SPA_MODE_READ | SPA_MODE_WRITE);
VERIFY0(spa_open(ztest_opts.zo_pool, &spa, FTAG));
VERIFY0(ztest_dataset_open(0));
ztest_spa = spa;
/*
* Force the first log block to be transactionally allocated.
* We have to do this before we freeze the pool -- otherwise
* the log chain won't be anchored.
*/
while (BP_IS_HOLE(&zd->zd_zilog->zl_header->zh_log)) {
ztest_dmu_object_alloc_free(zd, 0);
zil_commit(zd->zd_zilog, 0);
}
txg_wait_synced(spa_get_dsl(spa), 0);
/*
* Freeze the pool. This stops spa_sync() from doing anything,
* so that the only way to record changes from now on is the ZIL.
*/
spa_freeze(spa);
/*
* Because it is hard to predict how much space a write will actually
* require beforehand, we leave ourselves some fudge space to write over
* capacity.
*/
uint64_t capacity = metaslab_class_get_space(spa_normal_class(spa)) / 2;
/*
* Run tests that generate log records but don't alter the pool config
* or depend on DSL sync tasks (snapshots, objset create/destroy, etc).
* We do a txg_wait_synced() after each iteration to force the txg
* to increase well beyond the last synced value in the uberblock.
* The ZIL should be OK with that.
*
* Run a random number of times less than zo_maxloops and ensure we do
* not run out of space on the pool.
*/
while (ztest_random(10) != 0 &&
numloops++ < ztest_opts.zo_maxloops &&
metaslab_class_get_alloc(spa_normal_class(spa)) < capacity) {
ztest_od_t od;
ztest_od_init(&od, 0, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0, 0);
VERIFY0(ztest_object_init(zd, &od, sizeof (od), B_FALSE));
ztest_io(zd, od.od_object,
ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
txg_wait_synced(spa_get_dsl(spa), 0);
}
/*
* Commit all of the changes we just generated.
*/
zil_commit(zd->zd_zilog, 0);
txg_wait_synced(spa_get_dsl(spa), 0);
/*
* Close our dataset and close the pool.
*/
ztest_dataset_close(0);
spa_close(spa, FTAG);
kernel_fini();
/*
* Open and close the pool and dataset to induce log replay.
*/
kernel_init(SPA_MODE_READ | SPA_MODE_WRITE);
VERIFY0(spa_open(ztest_opts.zo_pool, &spa, FTAG));
ASSERT3U(spa_freeze_txg(spa), ==, UINT64_MAX);
VERIFY0(ztest_dataset_open(0));
ztest_spa = spa;
txg_wait_synced(spa_get_dsl(spa), 0);
ztest_dataset_close(0);
ztest_reguid(NULL, 0);
spa_close(spa, FTAG);
kernel_fini();
}
static void
ztest_import_impl(ztest_shared_t *zs)
{
importargs_t args = { 0 };
nvlist_t *cfg = NULL;
int nsearch = 1;
char *searchdirs[nsearch];
int flags = ZFS_IMPORT_MISSING_LOG;
searchdirs[0] = ztest_opts.zo_dir;
args.paths = nsearch;
args.path = searchdirs;
args.can_be_active = B_FALSE;
VERIFY0(zpool_find_config(NULL, ztest_opts.zo_pool, &cfg, &args,
&libzpool_config_ops));
VERIFY0(spa_import(ztest_opts.zo_pool, cfg, NULL, flags));
fnvlist_free(cfg);
}
/*
* Import a storage pool with the given name.
*/
static void
ztest_import(ztest_shared_t *zs)
{
spa_t *spa;
mutex_init(&ztest_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&ztest_checkpoint_lock, NULL, MUTEX_DEFAULT, NULL);
VERIFY0(pthread_rwlock_init(&ztest_name_lock, NULL));
kernel_init(SPA_MODE_READ | SPA_MODE_WRITE);
ztest_import_impl(zs);
VERIFY0(spa_open(ztest_opts.zo_pool, &spa, FTAG));
zs->zs_metaslab_sz =
1ULL << spa->spa_root_vdev->vdev_child[0]->vdev_ms_shift;
spa_close(spa, FTAG);
kernel_fini();
if (!ztest_opts.zo_mmp_test) {
ztest_run_zdb(ztest_opts.zo_pool);
ztest_freeze();
ztest_run_zdb(ztest_opts.zo_pool);
}
(void) pthread_rwlock_destroy(&ztest_name_lock);
mutex_destroy(&ztest_vdev_lock);
mutex_destroy(&ztest_checkpoint_lock);
}
/*
* Kick off threads to run tests on all datasets in parallel.
*/
static void
ztest_run(ztest_shared_t *zs)
{
spa_t *spa;
objset_t *os;
kthread_t *resume_thread, *deadman_thread;
kthread_t **run_threads;
uint64_t object;
int error;
int t, d;
ztest_exiting = B_FALSE;
/*
* Initialize parent/child shared state.
*/
mutex_init(&ztest_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&ztest_checkpoint_lock, NULL, MUTEX_DEFAULT, NULL);
VERIFY0(pthread_rwlock_init(&ztest_name_lock, NULL));
zs->zs_thread_start = gethrtime();
zs->zs_thread_stop =
zs->zs_thread_start + ztest_opts.zo_passtime * NANOSEC;
zs->zs_thread_stop = MIN(zs->zs_thread_stop, zs->zs_proc_stop);
zs->zs_thread_kill = zs->zs_thread_stop;
if (ztest_random(100) < ztest_opts.zo_killrate) {
zs->zs_thread_kill -=
ztest_random(ztest_opts.zo_passtime * NANOSEC);
}
mutex_init(&zcl.zcl_callbacks_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&zcl.zcl_callbacks, sizeof (ztest_cb_data_t),
offsetof(ztest_cb_data_t, zcd_node));
/*
* Open our pool. It may need to be imported first depending on
* what tests were running when the previous pass was terminated.
*/
kernel_init(SPA_MODE_READ | SPA_MODE_WRITE);
error = spa_open(ztest_opts.zo_pool, &spa, FTAG);
if (error) {
VERIFY3S(error, ==, ENOENT);
ztest_import_impl(zs);
VERIFY0(spa_open(ztest_opts.zo_pool, &spa, FTAG));
zs->zs_metaslab_sz =
1ULL << spa->spa_root_vdev->vdev_child[0]->vdev_ms_shift;
}
metaslab_preload_limit = ztest_random(20) + 1;
ztest_spa = spa;
VERIFY0(vdev_raidz_impl_set("cycle"));
dmu_objset_stats_t dds;
VERIFY0(ztest_dmu_objset_own(ztest_opts.zo_pool,
DMU_OST_ANY, B_TRUE, B_TRUE, FTAG, &os));
dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
dmu_objset_fast_stat(os, &dds);
dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
zs->zs_guid = dds.dds_guid;
dmu_objset_disown(os, B_TRUE, FTAG);
/*
* Create a thread to periodically resume suspended I/O.
*/
resume_thread = thread_create(NULL, 0, ztest_resume_thread,
spa, 0, NULL, TS_RUN | TS_JOINABLE, defclsyspri);
/*
* Create a deadman thread and set to panic if we hang.
*/
deadman_thread = thread_create(NULL, 0, ztest_deadman_thread,
zs, 0, NULL, TS_RUN | TS_JOINABLE, defclsyspri);
spa->spa_deadman_failmode = ZIO_FAILURE_MODE_PANIC;
/*
* Verify that we can safely inquire about any object,
* whether it's allocated or not. To make it interesting,
* we probe a 5-wide window around each power of two.
* This hits all edge cases, including zero and the max.
*/
for (t = 0; t < 64; t++) {
for (d = -5; d <= 5; d++) {
error = dmu_object_info(spa->spa_meta_objset,
(1ULL << t) + d, NULL);
ASSERT(error == 0 || error == ENOENT ||
error == EINVAL);
}
}
/*
* If we got any ENOSPC errors on the previous run, destroy something.
*/
if (zs->zs_enospc_count != 0) {
int d = ztest_random(ztest_opts.zo_datasets);
ztest_dataset_destroy(d);
}
zs->zs_enospc_count = 0;
/*
* If we were in the middle of ztest_device_removal() and were killed
* we need to ensure the removal and scrub complete before running
* any tests that check ztest_device_removal_active. The removal will
* be restarted automatically when the spa is opened, but we need to
* initiate the scrub manually if it is not already in progress. Note
* that we always run the scrub whenever an indirect vdev exists
* because we have no way of knowing for sure if ztest_device_removal()
* fully completed its scrub before the pool was reimported.
*/
if (spa->spa_removing_phys.sr_state == DSS_SCANNING ||
spa->spa_removing_phys.sr_prev_indirect_vdev != -1) {
while (spa->spa_removing_phys.sr_state == DSS_SCANNING)
txg_wait_synced(spa_get_dsl(spa), 0);
error = ztest_scrub_impl(spa);
if (error == EBUSY)
error = 0;
ASSERT0(error);
}
run_threads = umem_zalloc(ztest_opts.zo_threads * sizeof (kthread_t *),
UMEM_NOFAIL);
if (ztest_opts.zo_verbose >= 4)
(void) printf("starting main threads...\n");
/*
* Replay all logs of all datasets in the pool. This is primarily for
* temporary datasets which wouldn't otherwise get replayed, which
* can trigger failures when attempting to offline a SLOG in
* ztest_fault_inject().
*/
(void) dmu_objset_find(ztest_opts.zo_pool, ztest_replay_zil_cb,
NULL, DS_FIND_CHILDREN);
/*
* Kick off all the tests that run in parallel.
*/
for (t = 0; t < ztest_opts.zo_threads; t++) {
if (t < ztest_opts.zo_datasets && ztest_dataset_open(t) != 0) {
umem_free(run_threads, ztest_opts.zo_threads *
sizeof (kthread_t *));
return;
}
run_threads[t] = thread_create(NULL, 0, ztest_thread,
(void *)(uintptr_t)t, 0, NULL, TS_RUN | TS_JOINABLE,
defclsyspri);
}
/*
* Wait for all of the tests to complete.
*/
for (t = 0; t < ztest_opts.zo_threads; t++)
VERIFY0(thread_join(run_threads[t]));
/*
* Close all datasets. This must be done after all the threads
* are joined so we can be sure none of the datasets are in-use
* by any of the threads.
*/
for (t = 0; t < ztest_opts.zo_threads; t++) {
if (t < ztest_opts.zo_datasets)
ztest_dataset_close(t);
}
txg_wait_synced(spa_get_dsl(spa), 0);
zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(spa));
zs->zs_space = metaslab_class_get_space(spa_normal_class(spa));
umem_free(run_threads, ztest_opts.zo_threads * sizeof (kthread_t *));
/* Kill the resume and deadman threads */
ztest_exiting = B_TRUE;
VERIFY0(thread_join(resume_thread));
VERIFY0(thread_join(deadman_thread));
ztest_resume(spa);
/*
* Right before closing the pool, kick off a bunch of async I/O;
* spa_close() should wait for it to complete.
*/
for (object = 1; object < 50; object++) {
dmu_prefetch(spa->spa_meta_objset, object, 0, 0, 1ULL << 20,
ZIO_PRIORITY_SYNC_READ);
}
/* Verify that at least one commit cb was called in a timely fashion */
if (zc_cb_counter >= ZTEST_COMMIT_CB_MIN_REG)
VERIFY0(zc_min_txg_delay);
spa_close(spa, FTAG);
/*
* Verify that we can loop over all pools.
*/
mutex_enter(&spa_namespace_lock);
for (spa = spa_next(NULL); spa != NULL; spa = spa_next(spa))
if (ztest_opts.zo_verbose > 3)
(void) printf("spa_next: found %s\n", spa_name(spa));
mutex_exit(&spa_namespace_lock);
/*
* Verify that we can export the pool and reimport it under a
* different name.
*/
if ((ztest_random(2) == 0) && !ztest_opts.zo_mmp_test) {
char name[ZFS_MAX_DATASET_NAME_LEN];
(void) snprintf(name, sizeof (name), "%s_import",
ztest_opts.zo_pool);
ztest_spa_import_export(ztest_opts.zo_pool, name);
ztest_spa_import_export(name, ztest_opts.zo_pool);
}
kernel_fini();
list_destroy(&zcl.zcl_callbacks);
mutex_destroy(&zcl.zcl_callbacks_lock);
(void) pthread_rwlock_destroy(&ztest_name_lock);
mutex_destroy(&ztest_vdev_lock);
mutex_destroy(&ztest_checkpoint_lock);
}
static void
print_time(hrtime_t t, char *timebuf)
{
hrtime_t s = t / NANOSEC;
hrtime_t m = s / 60;
hrtime_t h = m / 60;
hrtime_t d = h / 24;
s -= m * 60;
m -= h * 60;
h -= d * 24;
timebuf[0] = '\0';
if (d)
(void) sprintf(timebuf,
"%llud%02lluh%02llum%02llus", d, h, m, s);
else if (h)
(void) sprintf(timebuf, "%lluh%02llum%02llus", h, m, s);
else if (m)
(void) sprintf(timebuf, "%llum%02llus", m, s);
else
(void) sprintf(timebuf, "%llus", s);
}
static nvlist_t *
make_random_props(void)
{
nvlist_t *props;
props = fnvlist_alloc();
if (ztest_random(2) == 0)
return (props);
fnvlist_add_uint64(props,
zpool_prop_to_name(ZPOOL_PROP_AUTOREPLACE), 1);
return (props);
}
/*
* Create a storage pool with the given name and initial vdev size.
* Then test spa_freeze() functionality.
*/
static void
ztest_init(ztest_shared_t *zs)
{
spa_t *spa;
nvlist_t *nvroot, *props;
int i;
mutex_init(&ztest_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&ztest_checkpoint_lock, NULL, MUTEX_DEFAULT, NULL);
VERIFY0(pthread_rwlock_init(&ztest_name_lock, NULL));
kernel_init(SPA_MODE_READ | SPA_MODE_WRITE);
/*
* Create the storage pool.
*/
(void) spa_destroy(ztest_opts.zo_pool);
ztest_shared->zs_vdev_next_leaf = 0;
zs->zs_splits = 0;
zs->zs_mirrors = ztest_opts.zo_mirrors;
nvroot = make_vdev_root(NULL, NULL, NULL, ztest_opts.zo_vdev_size, 0,
NULL, ztest_opts.zo_raid_children, zs->zs_mirrors, 1);
props = make_random_props();
/*
* We don't expect the pool to suspend unless maxfaults == 0,
* in which case ztest_fault_inject() temporarily takes away
* the only valid replica.
*/
fnvlist_add_uint64(props,
zpool_prop_to_name(ZPOOL_PROP_FAILUREMODE),
MAXFAULTS(zs) ? ZIO_FAILURE_MODE_PANIC : ZIO_FAILURE_MODE_WAIT);
for (i = 0; i < SPA_FEATURES; i++) {
char *buf;
if (!spa_feature_table[i].fi_zfs_mod_supported)
continue;
/*
* 75% chance of using the log space map feature. We want ztest
* to exercise both the code paths that use the log space map
* feature and the ones that don't.
*/
if (i == SPA_FEATURE_LOG_SPACEMAP && ztest_random(4) == 0)
continue;
VERIFY3S(-1, !=, asprintf(&buf, "feature@%s",
spa_feature_table[i].fi_uname));
fnvlist_add_uint64(props, buf, 0);
free(buf);
}
VERIFY0(spa_create(ztest_opts.zo_pool, nvroot, props, NULL, NULL));
fnvlist_free(nvroot);
fnvlist_free(props);
VERIFY0(spa_open(ztest_opts.zo_pool, &spa, FTAG));
zs->zs_metaslab_sz =
1ULL << spa->spa_root_vdev->vdev_child[0]->vdev_ms_shift;
spa_close(spa, FTAG);
kernel_fini();
if (!ztest_opts.zo_mmp_test) {
ztest_run_zdb(ztest_opts.zo_pool);
ztest_freeze();
ztest_run_zdb(ztest_opts.zo_pool);
}
(void) pthread_rwlock_destroy(&ztest_name_lock);
mutex_destroy(&ztest_vdev_lock);
mutex_destroy(&ztest_checkpoint_lock);
}
static void
setup_data_fd(void)
{
static char ztest_name_data[] = "/tmp/ztest.data.XXXXXX";
ztest_fd_data = mkstemp(ztest_name_data);
ASSERT3S(ztest_fd_data, >=, 0);
(void) unlink(ztest_name_data);
}
static int
shared_data_size(ztest_shared_hdr_t *hdr)
{
int size;
size = hdr->zh_hdr_size;
size += hdr->zh_opts_size;
size += hdr->zh_size;
size += hdr->zh_stats_size * hdr->zh_stats_count;
size += hdr->zh_ds_size * hdr->zh_ds_count;
return (size);
}
static void
setup_hdr(void)
{
int size;
ztest_shared_hdr_t *hdr;
hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()),
PROT_READ | PROT_WRITE, MAP_SHARED, ztest_fd_data, 0);
ASSERT3P(hdr, !=, MAP_FAILED);
VERIFY0(ftruncate(ztest_fd_data, sizeof (ztest_shared_hdr_t)));
hdr->zh_hdr_size = sizeof (ztest_shared_hdr_t);
hdr->zh_opts_size = sizeof (ztest_shared_opts_t);
hdr->zh_size = sizeof (ztest_shared_t);
hdr->zh_stats_size = sizeof (ztest_shared_callstate_t);
hdr->zh_stats_count = ZTEST_FUNCS;
hdr->zh_ds_size = sizeof (ztest_shared_ds_t);
hdr->zh_ds_count = ztest_opts.zo_datasets;
size = shared_data_size(hdr);
VERIFY0(ftruncate(ztest_fd_data, size));
(void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize()));
}
static void
setup_data(void)
{
int size, offset;
ztest_shared_hdr_t *hdr;
uint8_t *buf;
hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()),
PROT_READ, MAP_SHARED, ztest_fd_data, 0);
ASSERT3P(hdr, !=, MAP_FAILED);
size = shared_data_size(hdr);
(void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize()));
hdr = ztest_shared_hdr = (void *)mmap(0, P2ROUNDUP(size, getpagesize()),
PROT_READ | PROT_WRITE, MAP_SHARED, ztest_fd_data, 0);
ASSERT3P(hdr, !=, MAP_FAILED);
buf = (uint8_t *)hdr;
offset = hdr->zh_hdr_size;
ztest_shared_opts = (void *)&buf[offset];
offset += hdr->zh_opts_size;
ztest_shared = (void *)&buf[offset];
offset += hdr->zh_size;
ztest_shared_callstate = (void *)&buf[offset];
offset += hdr->zh_stats_size * hdr->zh_stats_count;
ztest_shared_ds = (void *)&buf[offset];
}
static boolean_t
exec_child(char *cmd, char *libpath, boolean_t ignorekill, int *statusp)
{
pid_t pid;
int status;
char *cmdbuf = NULL;
pid = fork();
if (cmd == NULL) {
cmdbuf = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
(void) strlcpy(cmdbuf, getexecname(), MAXPATHLEN);
cmd = cmdbuf;
}
if (pid == -1)
- fatal(1, "fork failed");
+ fatal(B_TRUE, "fork failed");
if (pid == 0) { /* child */
char *emptyargv[2] = { cmd, NULL };
char fd_data_str[12];
struct rlimit rl = { 1024, 1024 };
(void) setrlimit(RLIMIT_NOFILE, &rl);
(void) close(ztest_fd_rand);
VERIFY3S(11, >=,
snprintf(fd_data_str, 12, "%d", ztest_fd_data));
VERIFY0(setenv("ZTEST_FD_DATA", fd_data_str, 1));
(void) enable_extended_FILE_stdio(-1, -1);
if (libpath != NULL)
VERIFY0(setenv("LD_LIBRARY_PATH", libpath, 1));
(void) execv(cmd, emptyargv);
ztest_dump_core = B_FALSE;
fatal(B_TRUE, "exec failed: %s", cmd);
}
if (cmdbuf != NULL) {
umem_free(cmdbuf, MAXPATHLEN);
cmd = NULL;
}
while (waitpid(pid, &status, 0) != pid)
continue;
if (statusp != NULL)
*statusp = status;
if (WIFEXITED(status)) {
if (WEXITSTATUS(status) != 0) {
(void) fprintf(stderr, "child exited with code %d\n",
WEXITSTATUS(status));
exit(2);
}
return (B_FALSE);
} else if (WIFSIGNALED(status)) {
if (!ignorekill || WTERMSIG(status) != SIGKILL) {
(void) fprintf(stderr, "child died with signal %d\n",
WTERMSIG(status));
exit(3);
}
return (B_TRUE);
} else {
(void) fprintf(stderr, "something strange happened to child\n");
exit(4);
- /* NOTREACHED */
}
}
static void
ztest_run_init(void)
{
int i;
ztest_shared_t *zs = ztest_shared;
/*
* Blow away any existing copy of zpool.cache
*/
(void) remove(spa_config_path);
if (ztest_opts.zo_init == 0) {
if (ztest_opts.zo_verbose >= 1)
(void) printf("Importing pool %s\n",
ztest_opts.zo_pool);
ztest_import(zs);
return;
}
/*
* Create and initialize our storage pool.
*/
for (i = 1; i <= ztest_opts.zo_init; i++) {
bzero(zs, sizeof (ztest_shared_t));
if (ztest_opts.zo_verbose >= 3 &&
ztest_opts.zo_init != 1) {
(void) printf("ztest_init(), pass %d\n", i);
}
ztest_init(zs);
}
}
int
main(int argc, char **argv)
{
int kills = 0;
int iters = 0;
int older = 0;
int newer = 0;
ztest_shared_t *zs;
ztest_info_t *zi;
ztest_shared_callstate_t *zc;
char timebuf[100];
char numbuf[NN_NUMBUF_SZ];
char *cmd;
boolean_t hasalt;
int f, err;
char *fd_data_str = getenv("ZTEST_FD_DATA");
struct sigaction action;
(void) setvbuf(stdout, NULL, _IOLBF, 0);
dprintf_setup(&argc, argv);
zfs_deadman_synctime_ms = 300000;
zfs_deadman_checktime_ms = 30000;
/*
* As two-word space map entries may not come up often (especially
* if pool and vdev sizes are small) we want to force at least some
* of them so the feature get tested.
*/
zfs_force_some_double_word_sm_entries = B_TRUE;
/*
* Verify that even extensively damaged split blocks with many
* segments can be reconstructed in a reasonable amount of time
* when reconstruction is known to be possible.
*
* Note: the lower this value is, the more damage we inflict, and
* the more time ztest spends in recovering that damage. We chose
* to induce damage 1/100th of the time so recovery is tested but
* not so frequently that ztest doesn't get to test other code paths.
*/
zfs_reconstruct_indirect_damage_fraction = 100;
action.sa_handler = sig_handler;
sigemptyset(&action.sa_mask);
action.sa_flags = 0;
if (sigaction(SIGSEGV, &action, NULL) < 0) {
(void) fprintf(stderr, "ztest: cannot catch SIGSEGV: %s.\n",
strerror(errno));
exit(EXIT_FAILURE);
}
if (sigaction(SIGABRT, &action, NULL) < 0) {
(void) fprintf(stderr, "ztest: cannot catch SIGABRT: %s.\n",
strerror(errno));
exit(EXIT_FAILURE);
}
/*
* Force random_get_bytes() to use /dev/urandom in order to prevent
* ztest from needlessly depleting the system entropy pool.
*/
random_path = "/dev/urandom";
ztest_fd_rand = open(random_path, O_RDONLY);
ASSERT3S(ztest_fd_rand, >=, 0);
if (!fd_data_str) {
process_options(argc, argv);
setup_data_fd();
setup_hdr();
setup_data();
bcopy(&ztest_opts, ztest_shared_opts,
sizeof (*ztest_shared_opts));
} else {
ztest_fd_data = atoi(fd_data_str);
setup_data();
bcopy(ztest_shared_opts, &ztest_opts, sizeof (ztest_opts));
}
ASSERT3U(ztest_opts.zo_datasets, ==, ztest_shared_hdr->zh_ds_count);
err = ztest_set_global_vars();
if (err != 0 && !fd_data_str) {
/* error message done by ztest_set_global_vars */
exit(EXIT_FAILURE);
} else {
/* children should not be spawned if setting gvars fails */
VERIFY3S(err, ==, 0);
}
/* Override location of zpool.cache */
VERIFY3S(asprintf((char **)&spa_config_path, "%s/zpool.cache",
ztest_opts.zo_dir), !=, -1);
ztest_ds = umem_alloc(ztest_opts.zo_datasets * sizeof (ztest_ds_t),
UMEM_NOFAIL);
zs = ztest_shared;
if (fd_data_str) {
metaslab_force_ganging = ztest_opts.zo_metaslab_force_ganging;
metaslab_df_alloc_threshold =
zs->zs_metaslab_df_alloc_threshold;
if (zs->zs_do_init)
ztest_run_init();
else
ztest_run(zs);
exit(0);
}
hasalt = (strlen(ztest_opts.zo_alt_ztest) != 0);
if (ztest_opts.zo_verbose >= 1) {
- (void) printf("%llu vdevs, %d datasets, %d threads,"
- "%d %s disks, %llu seconds...\n\n",
- (u_longlong_t)ztest_opts.zo_vdevs,
+ (void) printf("%"PRIu64" vdevs, %d datasets, %d threads,"
+ "%d %s disks, %"PRIu64" seconds...\n\n",
+ ztest_opts.zo_vdevs,
ztest_opts.zo_datasets,
ztest_opts.zo_threads,
ztest_opts.zo_raid_children,
ztest_opts.zo_raid_type,
- (u_longlong_t)ztest_opts.zo_time);
+ ztest_opts.zo_time);
}
cmd = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
(void) strlcpy(cmd, getexecname(), MAXNAMELEN);
zs->zs_do_init = B_TRUE;
if (strlen(ztest_opts.zo_alt_ztest) != 0) {
if (ztest_opts.zo_verbose >= 1) {
(void) printf("Executing older ztest for "
"initialization: %s\n", ztest_opts.zo_alt_ztest);
}
VERIFY(!exec_child(ztest_opts.zo_alt_ztest,
ztest_opts.zo_alt_libpath, B_FALSE, NULL));
} else {
VERIFY(!exec_child(NULL, NULL, B_FALSE, NULL));
}
zs->zs_do_init = B_FALSE;
zs->zs_proc_start = gethrtime();
zs->zs_proc_stop = zs->zs_proc_start + ztest_opts.zo_time * NANOSEC;
for (f = 0; f < ZTEST_FUNCS; f++) {
zi = &ztest_info[f];
zc = ZTEST_GET_SHARED_CALLSTATE(f);
if (zs->zs_proc_start + zi->zi_interval[0] > zs->zs_proc_stop)
zc->zc_next = UINT64_MAX;
else
zc->zc_next = zs->zs_proc_start +
ztest_random(2 * zi->zi_interval[0] + 1);
}
/*
* Run the tests in a loop. These tests include fault injection
* to verify that self-healing data works, and forced crashes
* to verify that we never lose on-disk consistency.
*/
while (gethrtime() < zs->zs_proc_stop) {
int status;
boolean_t killed;
/*
* Initialize the workload counters for each function.
*/
for (f = 0; f < ZTEST_FUNCS; f++) {
zc = ZTEST_GET_SHARED_CALLSTATE(f);
zc->zc_count = 0;
zc->zc_time = 0;
}
/* Set the allocation switch size */
zs->zs_metaslab_df_alloc_threshold =
ztest_random(zs->zs_metaslab_sz / 4) + 1;
if (!hasalt || ztest_random(2) == 0) {
if (hasalt && ztest_opts.zo_verbose >= 1) {
(void) printf("Executing newer ztest: %s\n",
cmd);
}
newer++;
killed = exec_child(cmd, NULL, B_TRUE, &status);
} else {
if (hasalt && ztest_opts.zo_verbose >= 1) {
(void) printf("Executing older ztest: %s\n",
ztest_opts.zo_alt_ztest);
}
older++;
killed = exec_child(ztest_opts.zo_alt_ztest,
ztest_opts.zo_alt_libpath, B_TRUE, &status);
}
if (killed)
kills++;
iters++;
if (ztest_opts.zo_verbose >= 1) {
hrtime_t now = gethrtime();
now = MIN(now, zs->zs_proc_stop);
print_time(zs->zs_proc_stop - now, timebuf);
nicenum(zs->zs_space, numbuf, sizeof (numbuf));
- (void) printf("Pass %3d, %8s, %3llu ENOSPC, "
+ (void) printf("Pass %3d, %8s, %3"PRIu64" ENOSPC, "
"%4.1f%% of %5s used, %3.0f%% done, %8s to go\n",
iters,
WIFEXITED(status) ? "Complete" : "SIGKILL",
- (u_longlong_t)zs->zs_enospc_count,
+ zs->zs_enospc_count,
100.0 * zs->zs_alloc / zs->zs_space,
numbuf,
100.0 * (now - zs->zs_proc_start) /
(ztest_opts.zo_time * NANOSEC), timebuf);
}
if (ztest_opts.zo_verbose >= 2) {
(void) printf("\nWorkload summary:\n\n");
(void) printf("%7s %9s %s\n",
"Calls", "Time", "Function");
(void) printf("%7s %9s %s\n",
"-----", "----", "--------");
for (f = 0; f < ZTEST_FUNCS; f++) {
zi = &ztest_info[f];
zc = ZTEST_GET_SHARED_CALLSTATE(f);
print_time(zc->zc_time, timebuf);
- (void) printf("%7llu %9s %s\n",
- (u_longlong_t)zc->zc_count, timebuf,
+ (void) printf("%7"PRIu64" %9s %s\n",
+ zc->zc_count, timebuf,
zi->zi_funcname);
}
(void) printf("\n");
}
if (!ztest_opts.zo_mmp_test)
ztest_run_zdb(ztest_opts.zo_pool);
}
if (ztest_opts.zo_verbose >= 1) {
if (hasalt) {
(void) printf("%d runs of older ztest: %s\n", older,
ztest_opts.zo_alt_ztest);
(void) printf("%d runs of newer ztest: %s\n", newer,
cmd);
}
(void) printf("%d killed, %d completed, %.0f%% kill rate\n",
kills, iters - kills, (100.0 * kills) / MAX(1, iters));
}
umem_free(cmd, MAXNAMELEN);
return (0);
}
diff --git a/sys/contrib/openzfs/config/Abigail.am b/sys/contrib/openzfs/config/Abigail.am
index 0a74741b4e4f..49673a309e3b 100644
--- a/sys/contrib/openzfs/config/Abigail.am
+++ b/sys/contrib/openzfs/config/Abigail.am
@@ -1,29 +1,31 @@
#
# When performing an ABI check the following options are applied:
#
# --no-unreferenced-symbols: Exclude symbols which are not referenced by
# any debug information. Without this _init() and _fini() are incorrectly
# reported on CentOS7 for libuutil.so.
#
# --headers-dir1: Limit ABI checks to public OpenZFS headers, otherwise
# changes in public system headers are also reported.
#
# --suppressions: Honor a suppressions file for each library to provide
# a mechanism for suppressing harmless warnings.
#
PHONY += checkabi storeabi
checkabi:
for lib in $(lib_LTLIBRARIES) ; do \
abidiff --no-unreferenced-symbols \
--headers-dir1 ../../include \
--suppressions $${lib%.la}.suppr \
$${lib%.la}.abi .libs/$${lib%.la}.so ; \
done
storeabi:
cd .libs ; \
for lib in $(lib_LTLIBRARIES) ; do \
- abidw --no-show-locs $${lib%.la}.so > ../$${lib%.la}.abi ; \
+ abidw --no-show-locs \
+ --no-corpus-path \
+ $${lib%.la}.so > ../$${lib%.la}.abi ; \
done
diff --git a/sys/contrib/openzfs/config/kernel-make-request-fn.m4 b/sys/contrib/openzfs/config/kernel-make-request-fn.m4
index 290ef6b8da7d..86b202a7a272 100644
--- a/sys/contrib/openzfs/config/kernel-make-request-fn.m4
+++ b/sys/contrib/openzfs/config/kernel-make-request-fn.m4
@@ -1,140 +1,160 @@
dnl #
dnl # Check for make_request_fn interface.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_MAKE_REQUEST_FN], [
ZFS_LINUX_TEST_SRC([make_request_fn_void], [
#include <linux/blkdev.h>
void make_request(struct request_queue *q,
struct bio *bio) { return; }
],[
blk_queue_make_request(NULL, &make_request);
])
ZFS_LINUX_TEST_SRC([make_request_fn_blk_qc_t], [
#include <linux/blkdev.h>
blk_qc_t make_request(struct request_queue *q,
struct bio *bio) { return (BLK_QC_T_NONE); }
],[
blk_queue_make_request(NULL, &make_request);
])
ZFS_LINUX_TEST_SRC([blk_alloc_queue_request_fn], [
#include <linux/blkdev.h>
blk_qc_t make_request(struct request_queue *q,
struct bio *bio) { return (BLK_QC_T_NONE); }
],[
struct request_queue *q __attribute__ ((unused));
q = blk_alloc_queue(make_request, NUMA_NO_NODE);
])
ZFS_LINUX_TEST_SRC([blk_alloc_queue_request_fn_rh], [
#include <linux/blkdev.h>
blk_qc_t make_request(struct request_queue *q,
struct bio *bio) { return (BLK_QC_T_NONE); }
],[
struct request_queue *q __attribute__ ((unused));
q = blk_alloc_queue_rh(make_request, NUMA_NO_NODE);
])
ZFS_LINUX_TEST_SRC([block_device_operations_submit_bio], [
#include <linux/blkdev.h>
],[
struct block_device_operations o;
o.submit_bio = NULL;
])
+
+ ZFS_LINUX_TEST_SRC([blk_alloc_disk], [
+ #include <linux/blkdev.h>
+ ],[
+ struct gendisk *disk __attribute__ ((unused));
+ disk = blk_alloc_disk(NUMA_NO_NODE);
+ ])
])
AC_DEFUN([ZFS_AC_KERNEL_MAKE_REQUEST_FN], [
dnl # Checked as part of the blk_alloc_queue_request_fn test
dnl #
dnl # Linux 5.9 API Change
dnl # make_request_fn was moved into block_device_operations->submit_bio
dnl #
AC_MSG_CHECKING([whether submit_bio is member of struct block_device_operations])
ZFS_LINUX_TEST_RESULT([block_device_operations_submit_bio], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS, 1,
[submit_bio is member of struct block_device_operations])
+
+ dnl #
+ dnl # Linux 5.14 API Change:
+ dnl # blk_alloc_queue() + alloc_disk() combo replaced by
+ dnl # a single call to blk_alloc_disk().
+ dnl #
+ AC_MSG_CHECKING([whether blk_alloc_disk() exists])
+ ZFS_LINUX_TEST_RESULT([blk_alloc_disk], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE([HAVE_BLK_ALLOC_DISK], 1, [blk_alloc_disk() exists])
+ ], [
+ AC_MSG_RESULT(no)
+ ])
],[
AC_MSG_RESULT(no)
dnl # Checked as part of the blk_alloc_queue_request_fn test
dnl #
dnl # Linux 5.7 API Change
dnl # blk_alloc_queue() expects request function.
dnl #
AC_MSG_CHECKING([whether blk_alloc_queue() expects request function])
ZFS_LINUX_TEST_RESULT([blk_alloc_queue_request_fn], [
AC_MSG_RESULT(yes)
dnl # This is currently always the case.
AC_MSG_CHECKING([whether make_request_fn() returns blk_qc_t])
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BLK_ALLOC_QUEUE_REQUEST_FN, 1,
[blk_alloc_queue() expects request function])
AC_DEFINE(MAKE_REQUEST_FN_RET, blk_qc_t,
[make_request_fn() return type])
AC_DEFINE(HAVE_MAKE_REQUEST_FN_RET_QC, 1,
[Noting that make_request_fn() returns blk_qc_t])
],[
dnl #
dnl # CentOS Stream 4.18.0-257 API Change
dnl # The Linux 5.7 blk_alloc_queue() change was back-
dnl # ported and the symbol renamed blk_alloc_queue_rh().
dnl # As of this kernel version they're not providing
dnl # any compatibility code in the kernel for this.
dnl #
ZFS_LINUX_TEST_RESULT([blk_alloc_queue_request_fn_rh], [
AC_MSG_RESULT(yes)
dnl # This is currently always the case.
AC_MSG_CHECKING([whether make_request_fn_rh() returns blk_qc_t])
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BLK_ALLOC_QUEUE_REQUEST_FN_RH, 1,
[blk_alloc_queue_rh() expects request function])
AC_DEFINE(MAKE_REQUEST_FN_RET, blk_qc_t,
[make_request_fn() return type])
AC_DEFINE(HAVE_MAKE_REQUEST_FN_RET_QC, 1,
[Noting that make_request_fn() returns blk_qc_t])
],[
AC_MSG_RESULT(no)
dnl #
dnl # Linux 3.2 API Change
dnl # make_request_fn returns void.
dnl #
AC_MSG_CHECKING(
[whether make_request_fn() returns void])
ZFS_LINUX_TEST_RESULT([make_request_fn_void], [
AC_MSG_RESULT(yes)
AC_DEFINE(MAKE_REQUEST_FN_RET, void,
[make_request_fn() return type])
AC_DEFINE(HAVE_MAKE_REQUEST_FN_RET_VOID, 1,
[Noting that make_request_fn() returns void])
],[
AC_MSG_RESULT(no)
dnl #
dnl # Linux 4.4 API Change
dnl # make_request_fn returns blk_qc_t.
dnl #
AC_MSG_CHECKING(
[whether make_request_fn() returns blk_qc_t])
ZFS_LINUX_TEST_RESULT([make_request_fn_blk_qc_t], [
AC_MSG_RESULT(yes)
AC_DEFINE(MAKE_REQUEST_FN_RET, blk_qc_t,
[make_request_fn() return type])
AC_DEFINE(HAVE_MAKE_REQUEST_FN_RET_QC, 1,
[Noting that make_request_fn() ]
[returns blk_qc_t])
],[
ZFS_LINUX_TEST_ERROR([make_request_fn])
])
])
])
])
])
])
diff --git a/sys/contrib/openzfs/config/kernel-vfs-set_page_dirty.m4 b/sys/contrib/openzfs/config/kernel-vfs-set_page_dirty.m4
new file mode 100644
index 000000000000..a9d252e4e01e
--- /dev/null
+++ b/sys/contrib/openzfs/config/kernel-vfs-set_page_dirty.m4
@@ -0,0 +1,34 @@
+dnl #
+dnl # Linux 5.14 adds a change to require set_page_dirty to be manually
+dnl # wired up in struct address_space_operations. Determine if this needs
+dnl # to be done. This patch set also introduced __set_page_dirty_nobuffers
+dnl # declaration in linux/pagemap.h, so these tests look for the presence
+dnl # of that function to tell the compiler to assign set_page_dirty in
+dnl # module/os/linux/zfs/zpl_file.c
+dnl #
+AC_DEFUN([ZFS_AC_KERNEL_SRC_VFS_SET_PAGE_DIRTY_NOBUFFERS], [
+ ZFS_LINUX_TEST_SRC([vfs_has_set_page_dirty_nobuffers], [
+ #include <linux/pagemap.h>
+ #include <linux/fs.h>
+
+ static const struct address_space_operations
+ aops __attribute__ ((unused)) = {
+ .set_page_dirty = __set_page_dirty_nobuffers,
+ };
+ ],[])
+])
+
+AC_DEFUN([ZFS_AC_KERNEL_VFS_SET_PAGE_DIRTY_NOBUFFERS], [
+ dnl #
+ dnl # Linux 5.14 change requires set_page_dirty() to be assigned
+ dnl # in address_space_operations()
+ dnl #
+ AC_MSG_CHECKING([__set_page_dirty_nobuffers exists])
+ ZFS_LINUX_TEST_RESULT([vfs_has_set_page_dirty_nobuffers], [
+ AC_MSG_RESULT([yes])
+ AC_DEFINE(HAVE_VFS_SET_PAGE_DIRTY_NOBUFFERS, 1,
+ [__set_page_dirty_nobuffers exists])
+ ],[
+ AC_MSG_RESULT([no])
+ ])
+])
diff --git a/sys/contrib/openzfs/config/kernel.m4 b/sys/contrib/openzfs/config/kernel.m4
index 7196e66ca28a..5ea2286dbcc3 100644
--- a/sys/contrib/openzfs/config/kernel.m4
+++ b/sys/contrib/openzfs/config/kernel.m4
@@ -1,887 +1,889 @@
dnl #
dnl # Default ZFS kernel configuration
dnl #
AC_DEFUN([ZFS_AC_CONFIG_KERNEL], [
AM_COND_IF([BUILD_LINUX], [
dnl # Setup the kernel build environment.
ZFS_AC_KERNEL
ZFS_AC_QAT
dnl # Sanity checks for module building and CONFIG_* defines
ZFS_AC_KERNEL_TEST_MODULE
ZFS_AC_KERNEL_CONFIG_DEFINED
dnl # Sequential ZFS_LINUX_TRY_COMPILE tests
ZFS_AC_KERNEL_FPU_HEADER
ZFS_AC_KERNEL_OBJTOOL_HEADER
ZFS_AC_KERNEL_WAIT_QUEUE_ENTRY_T
ZFS_AC_KERNEL_MISC_MINOR
ZFS_AC_KERNEL_DECLARE_EVENT_CLASS
dnl # Parallel ZFS_LINUX_TEST_SRC / ZFS_LINUX_TEST_RESULT tests
ZFS_AC_KERNEL_TEST_SRC
ZFS_AC_KERNEL_TEST_RESULT
AS_IF([test "$LINUX_OBJ" != "$LINUX"], [
KERNEL_MAKE="$KERNEL_MAKE O=$LINUX_OBJ"
])
AC_SUBST(KERNEL_MAKE)
])
])
dnl #
dnl # Generate and compile all of the kernel API test cases to determine
dnl # which interfaces are available. By invoking the kernel build system
dnl # only once the compilation can be done in parallel significantly
dnl # speeding up the process.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_TEST_SRC], [
ZFS_AC_KERNEL_SRC_OBJTOOL
ZFS_AC_KERNEL_SRC_GLOBAL_PAGE_STATE
ZFS_AC_KERNEL_SRC_ACCESS_OK_TYPE
ZFS_AC_KERNEL_SRC_PDE_DATA
ZFS_AC_KERNEL_SRC_FALLOCATE
ZFS_AC_KERNEL_SRC_2ARGS_ZLIB_DEFLATE_WORKSPACESIZE
ZFS_AC_KERNEL_SRC_RWSEM
ZFS_AC_KERNEL_SRC_SCHED
ZFS_AC_KERNEL_SRC_USLEEP_RANGE
ZFS_AC_KERNEL_SRC_KMEM_CACHE
ZFS_AC_KERNEL_SRC_KVMALLOC
ZFS_AC_KERNEL_SRC_VMALLOC_PAGE_KERNEL
ZFS_AC_KERNEL_SRC_WAIT
ZFS_AC_KERNEL_SRC_INODE_TIMES
ZFS_AC_KERNEL_SRC_INODE_LOCK
ZFS_AC_KERNEL_SRC_GROUP_INFO_GID
ZFS_AC_KERNEL_SRC_RW
ZFS_AC_KERNEL_SRC_TIMER_SETUP
ZFS_AC_KERNEL_SRC_SUPER_USER_NS
ZFS_AC_KERNEL_SRC_PROC_OPERATIONS
ZFS_AC_KERNEL_SRC_BLOCK_DEVICE_OPERATIONS
ZFS_AC_KERNEL_SRC_BIO
ZFS_AC_KERNEL_SRC_BLKDEV
ZFS_AC_KERNEL_SRC_BLK_QUEUE
ZFS_AC_KERNEL_SRC_REVALIDATE_DISK
ZFS_AC_KERNEL_SRC_GET_DISK_RO
ZFS_AC_KERNEL_SRC_GENERIC_READLINK_GLOBAL
ZFS_AC_KERNEL_SRC_DISCARD_GRANULARITY
ZFS_AC_KERNEL_SRC_INODE_OWNER_OR_CAPABLE
ZFS_AC_KERNEL_SRC_XATTR
ZFS_AC_KERNEL_SRC_ACL
ZFS_AC_KERNEL_SRC_INODE_GETATTR
ZFS_AC_KERNEL_SRC_INODE_SET_FLAGS
ZFS_AC_KERNEL_SRC_INODE_SET_IVERSION
ZFS_AC_KERNEL_SRC_SHOW_OPTIONS
ZFS_AC_KERNEL_SRC_FILE_INODE
ZFS_AC_KERNEL_SRC_FILE_DENTRY
ZFS_AC_KERNEL_SRC_FSYNC
ZFS_AC_KERNEL_SRC_AIO_FSYNC
ZFS_AC_KERNEL_SRC_EVICT_INODE
ZFS_AC_KERNEL_SRC_DIRTY_INODE
ZFS_AC_KERNEL_SRC_SHRINKER
ZFS_AC_KERNEL_SRC_MKDIR
ZFS_AC_KERNEL_SRC_LOOKUP_FLAGS
ZFS_AC_KERNEL_SRC_CREATE
ZFS_AC_KERNEL_SRC_GET_LINK
ZFS_AC_KERNEL_SRC_PUT_LINK
ZFS_AC_KERNEL_SRC_TMPFILE
ZFS_AC_KERNEL_SRC_AUTOMOUNT
ZFS_AC_KERNEL_SRC_ENCODE_FH_WITH_INODE
ZFS_AC_KERNEL_SRC_COMMIT_METADATA
ZFS_AC_KERNEL_SRC_CLEAR_INODE
ZFS_AC_KERNEL_SRC_SETATTR_PREPARE
ZFS_AC_KERNEL_SRC_INSERT_INODE_LOCKED
ZFS_AC_KERNEL_SRC_DENTRY
ZFS_AC_KERNEL_SRC_TRUNCATE_SETSIZE
ZFS_AC_KERNEL_SRC_SECURITY_INODE
ZFS_AC_KERNEL_SRC_FST_MOUNT
ZFS_AC_KERNEL_SRC_BDI
ZFS_AC_KERNEL_SRC_SET_NLINK
ZFS_AC_KERNEL_SRC_SGET
ZFS_AC_KERNEL_SRC_LSEEK_EXECUTE
ZFS_AC_KERNEL_SRC_VFS_GETATTR
ZFS_AC_KERNEL_SRC_VFS_FSYNC_2ARGS
ZFS_AC_KERNEL_SRC_VFS_ITERATE
ZFS_AC_KERNEL_SRC_VFS_DIRECT_IO
ZFS_AC_KERNEL_SRC_VFS_RW_ITERATE
ZFS_AC_KERNEL_SRC_VFS_GENERIC_WRITE_CHECKS
ZFS_AC_KERNEL_SRC_VFS_IOV_ITER
ZFS_AC_KERNEL_SRC_KMAP_ATOMIC_ARGS
ZFS_AC_KERNEL_SRC_FOLLOW_DOWN_ONE
ZFS_AC_KERNEL_SRC_MAKE_REQUEST_FN
ZFS_AC_KERNEL_SRC_GENERIC_IO_ACCT
ZFS_AC_KERNEL_SRC_FPU
ZFS_AC_KERNEL_SRC_FMODE_T
ZFS_AC_KERNEL_SRC_KUIDGID_T
ZFS_AC_KERNEL_SRC_KUID_HELPERS
ZFS_AC_KERNEL_SRC_MODULE_PARAM_CALL_CONST
ZFS_AC_KERNEL_SRC_RENAME
ZFS_AC_KERNEL_SRC_CURRENT_TIME
ZFS_AC_KERNEL_SRC_USERNS_CAPABILITIES
ZFS_AC_KERNEL_SRC_IN_COMPAT_SYSCALL
ZFS_AC_KERNEL_SRC_KTIME
ZFS_AC_KERNEL_SRC_TOTALRAM_PAGES_FUNC
ZFS_AC_KERNEL_SRC_TOTALHIGH_PAGES
ZFS_AC_KERNEL_SRC_KSTRTOUL
ZFS_AC_KERNEL_SRC_PERCPU
ZFS_AC_KERNEL_SRC_CPU_HOTPLUG
ZFS_AC_KERNEL_SRC_GENERIC_FILLATTR_USERNS
ZFS_AC_KERNEL_SRC_MKNOD
ZFS_AC_KERNEL_SRC_SYMLINK
ZFS_AC_KERNEL_SRC_BIO_MAX_SEGS
ZFS_AC_KERNEL_SRC_SIGNAL_STOP
ZFS_AC_KERNEL_SRC_SIGINFO
ZFS_AC_KERNEL_SRC_SET_SPECIAL_STATE
+ ZFS_AC_KERNEL_SRC_VFS_SET_PAGE_DIRTY_NOBUFFERS
AC_MSG_CHECKING([for available kernel interfaces])
ZFS_LINUX_TEST_COMPILE_ALL([kabi])
AC_MSG_RESULT([done])
])
dnl #
dnl # Check results of kernel interface tests.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_TEST_RESULT], [
ZFS_AC_KERNEL_ACCESS_OK_TYPE
ZFS_AC_KERNEL_GLOBAL_PAGE_STATE
ZFS_AC_KERNEL_OBJTOOL
ZFS_AC_KERNEL_PDE_DATA
ZFS_AC_KERNEL_FALLOCATE
ZFS_AC_KERNEL_2ARGS_ZLIB_DEFLATE_WORKSPACESIZE
ZFS_AC_KERNEL_RWSEM
ZFS_AC_KERNEL_SCHED
ZFS_AC_KERNEL_USLEEP_RANGE
ZFS_AC_KERNEL_KMEM_CACHE
ZFS_AC_KERNEL_KVMALLOC
ZFS_AC_KERNEL_VMALLOC_PAGE_KERNEL
ZFS_AC_KERNEL_WAIT
ZFS_AC_KERNEL_INODE_TIMES
ZFS_AC_KERNEL_INODE_LOCK
ZFS_AC_KERNEL_GROUP_INFO_GID
ZFS_AC_KERNEL_RW
ZFS_AC_KERNEL_TIMER_SETUP
ZFS_AC_KERNEL_SUPER_USER_NS
ZFS_AC_KERNEL_PROC_OPERATIONS
ZFS_AC_KERNEL_BLOCK_DEVICE_OPERATIONS
ZFS_AC_KERNEL_BIO
ZFS_AC_KERNEL_BLKDEV
ZFS_AC_KERNEL_BLK_QUEUE
ZFS_AC_KERNEL_REVALIDATE_DISK
ZFS_AC_KERNEL_GET_DISK_RO
ZFS_AC_KERNEL_GENERIC_READLINK_GLOBAL
ZFS_AC_KERNEL_DISCARD_GRANULARITY
ZFS_AC_KERNEL_INODE_OWNER_OR_CAPABLE
ZFS_AC_KERNEL_XATTR
ZFS_AC_KERNEL_ACL
ZFS_AC_KERNEL_INODE_GETATTR
ZFS_AC_KERNEL_INODE_SET_FLAGS
ZFS_AC_KERNEL_INODE_SET_IVERSION
ZFS_AC_KERNEL_SHOW_OPTIONS
ZFS_AC_KERNEL_FILE_INODE
ZFS_AC_KERNEL_FILE_DENTRY
ZFS_AC_KERNEL_FSYNC
ZFS_AC_KERNEL_AIO_FSYNC
ZFS_AC_KERNEL_EVICT_INODE
ZFS_AC_KERNEL_DIRTY_INODE
ZFS_AC_KERNEL_SHRINKER
ZFS_AC_KERNEL_MKDIR
ZFS_AC_KERNEL_LOOKUP_FLAGS
ZFS_AC_KERNEL_CREATE
ZFS_AC_KERNEL_GET_LINK
ZFS_AC_KERNEL_PUT_LINK
ZFS_AC_KERNEL_TMPFILE
ZFS_AC_KERNEL_AUTOMOUNT
ZFS_AC_KERNEL_ENCODE_FH_WITH_INODE
ZFS_AC_KERNEL_COMMIT_METADATA
ZFS_AC_KERNEL_CLEAR_INODE
ZFS_AC_KERNEL_SETATTR_PREPARE
ZFS_AC_KERNEL_INSERT_INODE_LOCKED
ZFS_AC_KERNEL_DENTRY
ZFS_AC_KERNEL_TRUNCATE_SETSIZE
ZFS_AC_KERNEL_SECURITY_INODE
ZFS_AC_KERNEL_FST_MOUNT
ZFS_AC_KERNEL_BDI
ZFS_AC_KERNEL_SET_NLINK
ZFS_AC_KERNEL_SGET
ZFS_AC_KERNEL_LSEEK_EXECUTE
ZFS_AC_KERNEL_VFS_GETATTR
ZFS_AC_KERNEL_VFS_FSYNC_2ARGS
ZFS_AC_KERNEL_VFS_ITERATE
ZFS_AC_KERNEL_VFS_DIRECT_IO
ZFS_AC_KERNEL_VFS_RW_ITERATE
ZFS_AC_KERNEL_VFS_GENERIC_WRITE_CHECKS
ZFS_AC_KERNEL_VFS_IOV_ITER
ZFS_AC_KERNEL_KMAP_ATOMIC_ARGS
ZFS_AC_KERNEL_FOLLOW_DOWN_ONE
ZFS_AC_KERNEL_MAKE_REQUEST_FN
ZFS_AC_KERNEL_GENERIC_IO_ACCT
ZFS_AC_KERNEL_FPU
ZFS_AC_KERNEL_FMODE_T
ZFS_AC_KERNEL_KUIDGID_T
ZFS_AC_KERNEL_KUID_HELPERS
ZFS_AC_KERNEL_MODULE_PARAM_CALL_CONST
ZFS_AC_KERNEL_RENAME
ZFS_AC_KERNEL_CURRENT_TIME
ZFS_AC_KERNEL_USERNS_CAPABILITIES
ZFS_AC_KERNEL_IN_COMPAT_SYSCALL
ZFS_AC_KERNEL_KTIME
ZFS_AC_KERNEL_TOTALRAM_PAGES_FUNC
ZFS_AC_KERNEL_TOTALHIGH_PAGES
ZFS_AC_KERNEL_KSTRTOUL
ZFS_AC_KERNEL_PERCPU
ZFS_AC_KERNEL_CPU_HOTPLUG
ZFS_AC_KERNEL_GENERIC_FILLATTR_USERNS
ZFS_AC_KERNEL_MKNOD
ZFS_AC_KERNEL_SYMLINK
ZFS_AC_KERNEL_BIO_MAX_SEGS
ZFS_AC_KERNEL_SIGNAL_STOP
ZFS_AC_KERNEL_SIGINFO
ZFS_AC_KERNEL_SET_SPECIAL_STATE
+ ZFS_AC_KERNEL_VFS_SET_PAGE_DIRTY_NOBUFFERS
])
dnl #
dnl # Detect name used for Module.symvers file in kernel
dnl #
AC_DEFUN([ZFS_AC_MODULE_SYMVERS], [
modpost=$LINUX/scripts/Makefile.modpost
AC_MSG_CHECKING([kernel file name for module symbols])
AS_IF([test "x$enable_linux_builtin" != xyes -a -f "$modpost"], [
AS_IF([grep -q Modules.symvers $modpost], [
LINUX_SYMBOLS=Modules.symvers
], [
LINUX_SYMBOLS=Module.symvers
])
AS_IF([test ! -f "$LINUX_OBJ/$LINUX_SYMBOLS"], [
AC_MSG_ERROR([
*** Please make sure the kernel devel package for your distribution
*** is installed. If you are building with a custom kernel, make sure
*** the kernel is configured, built, and the '--with-linux=PATH'
*** configure option refers to the location of the kernel source.
])
])
], [
LINUX_SYMBOLS=NONE
])
AC_MSG_RESULT($LINUX_SYMBOLS)
AC_SUBST(LINUX_SYMBOLS)
])
dnl #
dnl # Detect the kernel to be built against
dnl #
AC_DEFUN([ZFS_AC_KERNEL], [
AC_ARG_WITH([linux],
AS_HELP_STRING([--with-linux=PATH],
[Path to kernel source]),
[kernelsrc="$withval"])
AC_ARG_WITH(linux-obj,
AS_HELP_STRING([--with-linux-obj=PATH],
[Path to kernel build objects]),
[kernelbuild="$withval"])
AC_MSG_CHECKING([kernel source directory])
AS_IF([test -z "$kernelsrc"], [
AS_IF([test -e "/lib/modules/$(uname -r)/source"], [
headersdir="/lib/modules/$(uname -r)/source"
sourcelink=$(readlink -f "$headersdir")
], [test -e "/lib/modules/$(uname -r)/build"], [
headersdir="/lib/modules/$(uname -r)/build"
sourcelink=$(readlink -f "$headersdir")
], [
sourcelink=$(ls -1d /usr/src/kernels/* \
/usr/src/linux-* \
2>/dev/null | grep -v obj | tail -1)
])
AS_IF([test -n "$sourcelink" && test -e ${sourcelink}], [
kernelsrc=`readlink -f ${sourcelink}`
], [
kernelsrc="[Not found]"
])
], [
AS_IF([test "$kernelsrc" = "NONE"], [
kernsrcver=NONE
])
withlinux=yes
])
AC_MSG_RESULT([$kernelsrc])
AS_IF([test ! -d "$kernelsrc"], [
AC_MSG_ERROR([
*** Please make sure the kernel devel package for your distribution
*** is installed and then try again. If that fails, you can specify the
*** location of the kernel source with the '--with-linux=PATH' option.])
])
AC_MSG_CHECKING([kernel build directory])
AS_IF([test -z "$kernelbuild"], [
AS_IF([test x$withlinux != xyes -a -e "/lib/modules/$(uname -r)/build"], [
kernelbuild=`readlink -f /lib/modules/$(uname -r)/build`
], [test -d ${kernelsrc}-obj/${target_cpu}/${target_cpu}], [
kernelbuild=${kernelsrc}-obj/${target_cpu}/${target_cpu}
], [test -d ${kernelsrc}-obj/${target_cpu}/default], [
kernelbuild=${kernelsrc}-obj/${target_cpu}/default
], [test -d `dirname ${kernelsrc}`/build-${target_cpu}], [
kernelbuild=`dirname ${kernelsrc}`/build-${target_cpu}
], [
kernelbuild=${kernelsrc}
])
])
AC_MSG_RESULT([$kernelbuild])
AC_MSG_CHECKING([kernel source version])
utsrelease1=$kernelbuild/include/linux/version.h
utsrelease2=$kernelbuild/include/linux/utsrelease.h
utsrelease3=$kernelbuild/include/generated/utsrelease.h
AS_IF([test -r $utsrelease1 && fgrep -q UTS_RELEASE $utsrelease1], [
utsrelease=$utsrelease1
], [test -r $utsrelease2 && fgrep -q UTS_RELEASE $utsrelease2], [
utsrelease=$utsrelease2
], [test -r $utsrelease3 && fgrep -q UTS_RELEASE $utsrelease3], [
utsrelease=$utsrelease3
])
AS_IF([test -n "$utsrelease"], [
kernsrcver=$($AWK '/UTS_RELEASE/ { gsub(/"/, "", $[3]); print $[3] }' $utsrelease)
AS_IF([test -z "$kernsrcver"], [
AC_MSG_RESULT([Not found])
AC_MSG_ERROR([
*** Cannot determine kernel version.
])
])
], [
AC_MSG_RESULT([Not found])
if test "x$enable_linux_builtin" != xyes; then
AC_MSG_ERROR([
*** Cannot find UTS_RELEASE definition.
])
else
AC_MSG_ERROR([
*** Cannot find UTS_RELEASE definition.
*** Please run 'make prepare' inside the kernel source tree.])
fi
])
AC_MSG_RESULT([$kernsrcver])
AS_VERSION_COMPARE([$kernsrcver], [$ZFS_META_KVER_MIN], [
AC_MSG_ERROR([
*** Cannot build against kernel version $kernsrcver.
*** The minimum supported kernel version is $ZFS_META_KVER_MIN.
])
])
LINUX=${kernelsrc}
LINUX_OBJ=${kernelbuild}
LINUX_VERSION=${kernsrcver}
AC_SUBST(LINUX)
AC_SUBST(LINUX_OBJ)
AC_SUBST(LINUX_VERSION)
ZFS_AC_MODULE_SYMVERS
])
dnl #
dnl # Detect the QAT module to be built against, QAT provides hardware
dnl # acceleration for data compression:
dnl #
dnl # https://01.org/intel-quickassist-technology
dnl #
dnl # 1) Download and install QAT driver from the above link
dnl # 2) Start QAT driver in your system:
dnl # service qat_service start
dnl # 3) Enable QAT in ZFS, e.g.:
dnl # ./configure --with-qat=<qat-driver-path>/QAT1.6
dnl # make
dnl # 4) Set GZIP compression in ZFS dataset:
dnl # zfs set compression = gzip <dataset>
dnl #
dnl # Then the data written to this ZFS pool is compressed by QAT accelerator
dnl # automatically, and de-compressed by QAT when read from the pool.
dnl #
dnl # 1) Get QAT hardware statistics with:
dnl # cat /proc/icp_dh895xcc_dev/qat
dnl # 2) To disable QAT:
dnl # insmod zfs.ko zfs_qat_disable=1
dnl #
AC_DEFUN([ZFS_AC_QAT], [
AC_ARG_WITH([qat],
AS_HELP_STRING([--with-qat=PATH],
[Path to qat source]),
AS_IF([test "$withval" = "yes"],
AC_MSG_ERROR([--with-qat=PATH requires a PATH]),
[qatsrc="$withval"]))
AC_ARG_WITH([qat-obj],
AS_HELP_STRING([--with-qat-obj=PATH],
[Path to qat build objects]),
[qatbuild="$withval"])
AS_IF([test ! -z "${qatsrc}"], [
AC_MSG_CHECKING([qat source directory])
AC_MSG_RESULT([$qatsrc])
QAT_SRC="${qatsrc}/quickassist"
AS_IF([ test ! -e "$QAT_SRC/include/cpa.h"], [
AC_MSG_ERROR([
*** Please make sure the qat driver package is installed
*** and specify the location of the qat source with the
*** '--with-qat=PATH' option then try again. Failed to
*** find cpa.h in:
${QAT_SRC}/include])
])
])
AS_IF([test ! -z "${qatsrc}"], [
AC_MSG_CHECKING([qat build directory])
AS_IF([test -z "$qatbuild"], [
qatbuild="${qatsrc}/build"
])
AC_MSG_RESULT([$qatbuild])
QAT_OBJ=${qatbuild}
AS_IF([ ! test -e "$QAT_OBJ/icp_qa_al.ko" && ! test -e "$QAT_OBJ/qat_api.ko"], [
AC_MSG_ERROR([
*** Please make sure the qat driver is installed then try again.
*** Failed to find icp_qa_al.ko or qat_api.ko in:
$QAT_OBJ])
])
AC_SUBST(QAT_SRC)
AC_SUBST(QAT_OBJ)
AC_DEFINE(HAVE_QAT, 1,
[qat is enabled and existed])
])
dnl #
dnl # Detect the name used for the QAT Module.symvers file.
dnl #
AS_IF([test ! -z "${qatsrc}"], [
AC_MSG_CHECKING([qat file for module symbols])
QAT_SYMBOLS=$QAT_SRC/lookaside/access_layer/src/Module.symvers
AS_IF([test -r $QAT_SYMBOLS], [
AC_MSG_RESULT([$QAT_SYMBOLS])
AC_SUBST(QAT_SYMBOLS)
],[
AC_MSG_ERROR([
*** Please make sure the qat driver is installed then try again.
*** Failed to find Module.symvers in:
$QAT_SYMBOLS
])
])
])
])
dnl #
dnl # Basic toolchain sanity check.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_TEST_MODULE], [
AC_MSG_CHECKING([whether modules can be built])
ZFS_LINUX_TRY_COMPILE([], [], [
AC_MSG_RESULT([yes])
],[
AC_MSG_RESULT([no])
if test "x$enable_linux_builtin" != xyes; then
AC_MSG_ERROR([
*** Unable to build an empty module.
])
else
AC_MSG_ERROR([
*** Unable to build an empty module.
*** Please run 'make scripts' inside the kernel source tree.])
fi
])
])
dnl #
dnl # ZFS_LINUX_CONFTEST_H
dnl #
AC_DEFUN([ZFS_LINUX_CONFTEST_H], [
test -d build/$2 || mkdir -p build/$2
cat - <<_ACEOF >build/$2/$2.h
$1
_ACEOF
])
dnl #
dnl # ZFS_LINUX_CONFTEST_C
dnl #
AC_DEFUN([ZFS_LINUX_CONFTEST_C], [
test -d build/$2 || mkdir -p build/$2
cat confdefs.h - <<_ACEOF >build/$2/$2.c
$1
_ACEOF
])
dnl #
dnl # ZFS_LINUX_CONFTEST_MAKEFILE
dnl #
dnl # $1 - test case name
dnl # $2 - add to top-level Makefile
dnl # $3 - additional build flags
dnl #
AC_DEFUN([ZFS_LINUX_CONFTEST_MAKEFILE], [
test -d build || mkdir -p build
test -d build/$1 || mkdir -p build/$1
file=build/$1/Makefile
dnl # Example command line to manually build source.
cat - <<_ACEOF >$file
# Example command line to manually build source
# make modules -C $LINUX_OBJ $ARCH_UM M=$PWD/build/$1
ccflags-y := -Werror $FRAME_LARGER_THAN
_ACEOF
dnl # Additional custom CFLAGS as requested.
m4_ifval($3, [echo "ccflags-y += $3" >>$file], [])
dnl # Test case source
echo "obj-m := $1.o" >>$file
AS_IF([test "x$2" = "xyes"], [echo "obj-m += $1/" >>build/Makefile], [])
])
dnl #
dnl # ZFS_LINUX_TEST_PROGRAM(C)([PROLOGUE], [BODY])
dnl #
m4_define([ZFS_LINUX_TEST_PROGRAM], [
#include <linux/module.h>
$1
int
main (void)
{
$2
;
return 0;
}
MODULE_DESCRIPTION("conftest");
MODULE_AUTHOR(ZFS_META_AUTHOR);
MODULE_VERSION(ZFS_META_VERSION "-" ZFS_META_RELEASE);
MODULE_LICENSE($3);
])
dnl #
dnl # ZFS_LINUX_TEST_REMOVE
dnl #
dnl # Removes the specified test source and results.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_REMOVE], [
test -d build/$1 && rm -Rf build/$1
test -f build/Makefile && sed '/$1/d' build/Makefile
])
dnl #
dnl # ZFS_LINUX_COMPILE
dnl #
dnl # $1 - build dir
dnl # $2 - test command
dnl # $3 - pass command
dnl # $4 - fail command
dnl # $5 - set KBUILD_MODPOST_NOFINAL='yes'
dnl # $6 - set KBUILD_MODPOST_WARN='yes'
dnl #
dnl # Used internally by ZFS_LINUX_TEST_{COMPILE,MODPOST}
dnl #
AC_DEFUN([ZFS_LINUX_COMPILE], [
AC_TRY_COMMAND([
KBUILD_MODPOST_NOFINAL="$5" KBUILD_MODPOST_WARN="$6"
make modules -k -j$TEST_JOBS -C $LINUX_OBJ $ARCH_UM
M=$PWD/$1 >$1/build.log 2>&1])
AS_IF([AC_TRY_COMMAND([$2])], [$3], [$4])
])
dnl #
dnl # ZFS_LINUX_TEST_COMPILE
dnl #
dnl # Perform a full compile excluding the final modpost phase.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_COMPILE], [
ZFS_LINUX_COMPILE([$2], [test -f $2/build.log], [
mv $2/Makefile $2/Makefile.compile.$1
mv $2/build.log $2/build.log.$1
],[
AC_MSG_ERROR([
*** Unable to compile test source to determine kernel interfaces.])
], [yes], [])
])
dnl #
dnl # ZFS_LINUX_TEST_MODPOST
dnl #
dnl # Perform a full compile including the modpost phase. This may
dnl # be an incremental build if the objects have already been built.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_MODPOST], [
ZFS_LINUX_COMPILE([$2], [test -f $2/build.log], [
mv $2/Makefile $2/Makefile.modpost.$1
cat $2/build.log >>build/build.log.$1
],[
AC_MSG_ERROR([
*** Unable to modpost test source to determine kernel interfaces.])
], [], [yes])
])
dnl #
dnl # Perform the compilation of the test cases in two phases.
dnl #
dnl # Phase 1) attempt to build the object files for all of the tests
dnl # defined by the ZFS_LINUX_TEST_SRC macro. But do not
dnl # perform the final modpost stage.
dnl #
dnl # Phase 2) disable all tests which failed the initial compilation,
dnl # then invoke the final modpost step for the remaining tests.
dnl #
dnl # This allows us efficiently build the test cases in parallel while
dnl # remaining resilient to build failures which are expected when
dnl # detecting the available kernel interfaces.
dnl #
dnl # The maximum allowed parallelism can be controlled by setting the
dnl # TEST_JOBS environment variable. Otherwise, it default to $(nproc).
dnl #
AC_DEFUN([ZFS_LINUX_TEST_COMPILE_ALL], [
dnl # Phase 1 - Compilation only, final linking is skipped.
ZFS_LINUX_TEST_COMPILE([$1], [build])
dnl #
dnl # Phase 2 - When building external modules disable test cases
dnl # which failed to compile and invoke modpost to verify the
dnl # final linking.
dnl #
dnl # Test names suffixed with '_license' call modpost independently
dnl # to ensure that a single incompatibility does not result in the
dnl # modpost phase exiting early. This check is not performed on
dnl # every symbol since the majority are compatible and doing so
dnl # would significantly slow down this phase.
dnl #
dnl # When configuring for builtin (--enable-linux-builtin)
dnl # fake the linking step artificially create the expected .ko
dnl # files for tests which did compile. This is required for
dnl # kernels which do not have loadable module support or have
dnl # not yet been built.
dnl #
AS_IF([test "x$enable_linux_builtin" = "xno"], [
for dir in $(awk '/^obj-m/ { print [$]3 }' \
build/Makefile.compile.$1); do
name=${dir%/}
AS_IF([test -f build/$name/$name.o], [
AS_IF([test "${name##*_}" = "license"], [
ZFS_LINUX_TEST_MODPOST([$1],
[build/$name])
echo "obj-n += $dir" >>build/Makefile
], [
echo "obj-m += $dir" >>build/Makefile
])
], [
echo "obj-n += $dir" >>build/Makefile
])
done
ZFS_LINUX_TEST_MODPOST([$1], [build])
], [
for dir in $(awk '/^obj-m/ { print [$]3 }' \
build/Makefile.compile.$1); do
name=${dir%/}
AS_IF([test -f build/$name/$name.o], [
touch build/$name/$name.ko
])
done
])
])
dnl #
dnl # ZFS_LINUX_TEST_SRC
dnl #
dnl # $1 - name
dnl # $2 - global
dnl # $3 - source
dnl # $4 - extra cflags
dnl # $5 - check license-compatibility
dnl #
dnl # Check if the test source is buildable at all and then if it is
dnl # license compatible.
dnl #
dnl # N.B because all of the test cases are compiled in parallel they
dnl # must never depend on the results of previous tests. Each test
dnl # needs to be entirely independent.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_SRC], [
ZFS_LINUX_CONFTEST_C([ZFS_LINUX_TEST_PROGRAM([[$2]], [[$3]],
[["Dual BSD/GPL"]])], [$1])
ZFS_LINUX_CONFTEST_MAKEFILE([$1], [yes], [$4])
AS_IF([ test -n "$5" ], [
ZFS_LINUX_CONFTEST_C([ZFS_LINUX_TEST_PROGRAM(
[[$2]], [[$3]], [[$5]])], [$1_license])
ZFS_LINUX_CONFTEST_MAKEFILE([$1_license], [yes], [$4])
])
])
dnl #
dnl # ZFS_LINUX_TEST_RESULT
dnl #
dnl # $1 - name of a test source (ZFS_LINUX_TEST_SRC)
dnl # $2 - run on success (valid .ko generated)
dnl # $3 - run on failure (unable to compile)
dnl #
AC_DEFUN([ZFS_LINUX_TEST_RESULT], [
AS_IF([test -d build/$1], [
AS_IF([test -f build/$1/$1.ko], [$2], [$3])
], [
AC_MSG_ERROR([
*** No matching source for the "$1" test, check that
*** both the test source and result macros refer to the same name.
])
])
])
dnl #
dnl # ZFS_LINUX_TEST_ERROR
dnl #
dnl # Generic error message which can be used when none of the expected
dnl # kernel interfaces were detected.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_ERROR], [
AC_MSG_ERROR([
*** None of the expected "$1" interfaces were detected.
*** This may be because your kernel version is newer than what is
*** supported, or you are using a patched custom kernel with
*** incompatible modifications.
***
*** ZFS Version: $ZFS_META_ALIAS
*** Compatible Kernels: $ZFS_META_KVER_MIN - $ZFS_META_KVER_MAX
])
])
dnl #
dnl # ZFS_LINUX_TEST_RESULT_SYMBOL
dnl #
dnl # Like ZFS_LINUX_TEST_RESULT except ZFS_CHECK_SYMBOL_EXPORT is called to
dnl # verify symbol exports, unless --enable-linux-builtin was provided to
dnl # configure.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_RESULT_SYMBOL], [
AS_IF([ ! test -f build/$1/$1.ko], [
$5
], [
AS_IF([test "x$enable_linux_builtin" != "xyes"], [
ZFS_CHECK_SYMBOL_EXPORT([$2], [$3], [$4], [$5])
], [
$4
])
])
])
dnl #
dnl # ZFS_LINUX_COMPILE_IFELSE
dnl #
AC_DEFUN([ZFS_LINUX_COMPILE_IFELSE], [
ZFS_LINUX_TEST_REMOVE([conftest])
m4_ifvaln([$1], [ZFS_LINUX_CONFTEST_C([$1], [conftest])])
m4_ifvaln([$5], [ZFS_LINUX_CONFTEST_H([$5], [conftest])],
[ZFS_LINUX_CONFTEST_H([], [conftest])])
ZFS_LINUX_CONFTEST_MAKEFILE([conftest], [no],
[m4_ifvaln([$5], [-I$PWD/build/conftest], [])])
ZFS_LINUX_COMPILE([build/conftest], [$2], [$3], [$4], [], [])
])
dnl #
dnl # ZFS_LINUX_TRY_COMPILE
dnl #
dnl # $1 - global
dnl # $2 - source
dnl # $3 - run on success (valid .ko generated)
dnl # $4 - run on failure (unable to compile)
dnl #
dnl # When configuring as builtin (--enable-linux-builtin) for kernels
dnl # without loadable module support (CONFIG_MODULES=n) only the object
dnl # file is created. See ZFS_LINUX_TEST_COMPILE_ALL for details.
dnl #
AC_DEFUN([ZFS_LINUX_TRY_COMPILE], [
AS_IF([test "x$enable_linux_builtin" = "xyes"], [
ZFS_LINUX_COMPILE_IFELSE(
[ZFS_LINUX_TEST_PROGRAM([[$1]], [[$2]],
[[ZFS_META_LICENSE]])],
[test -f build/conftest/conftest.o], [$3], [$4])
], [
ZFS_LINUX_COMPILE_IFELSE(
[ZFS_LINUX_TEST_PROGRAM([[$1]], [[$2]],
[[ZFS_META_LICENSE]])],
[test -f build/conftest/conftest.ko], [$3], [$4])
])
])
dnl #
dnl # ZFS_CHECK_SYMBOL_EXPORT
dnl #
dnl # Check if a symbol is exported on not by consulting the symbols
dnl # file, or optionally the source code.
dnl #
AC_DEFUN([ZFS_CHECK_SYMBOL_EXPORT], [
grep -q -E '[[[:space:]]]$1[[[:space:]]]' \
$LINUX_OBJ/$LINUX_SYMBOLS 2>/dev/null
rc=$?
if test $rc -ne 0; then
export=0
for file in $2; do
grep -q -E "EXPORT_SYMBOL.*($1)" \
"$LINUX/$file" 2>/dev/null
rc=$?
if test $rc -eq 0; then
export=1
break;
fi
done
if test $export -eq 0; then :
$4
else :
$3
fi
else :
$3
fi
])
dnl #
dnl # ZFS_LINUX_TRY_COMPILE_SYMBOL
dnl #
dnl # Like ZFS_LINUX_TRY_COMPILER except ZFS_CHECK_SYMBOL_EXPORT is called
dnl # to verify symbol exports, unless --enable-linux-builtin was provided
dnl # to configure.
dnl #
AC_DEFUN([ZFS_LINUX_TRY_COMPILE_SYMBOL], [
ZFS_LINUX_TRY_COMPILE([$1], [$2], [rc=0], [rc=1])
if test $rc -ne 0; then :
$6
else
if test "x$enable_linux_builtin" != xyes; then
ZFS_CHECK_SYMBOL_EXPORT([$3], [$4], [rc=0], [rc=1])
fi
if test $rc -ne 0; then :
$6
else :
$5
fi
fi
])
dnl #
dnl # ZFS_LINUX_TRY_COMPILE_HEADER
dnl # like ZFS_LINUX_TRY_COMPILE, except the contents conftest.h are
dnl # provided via the fifth parameter
dnl #
AC_DEFUN([ZFS_LINUX_TRY_COMPILE_HEADER], [
ZFS_LINUX_COMPILE_IFELSE(
[ZFS_LINUX_TEST_PROGRAM([[$1]], [[$2]], [[ZFS_META_LICENSE]])],
[test -f build/conftest/conftest.ko],
[$3], [$4], [$5])
])
diff --git a/sys/contrib/openzfs/include/libuutil.h b/sys/contrib/openzfs/include/libuutil.h
index 1d179945cca1..cadc20d2d8f3 100644
--- a/sys/contrib/openzfs/include/libuutil.h
+++ b/sys/contrib/openzfs/include/libuutil.h
@@ -1,356 +1,359 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
*/
#ifndef _LIBUUTIL_H
#define _LIBUUTIL_H
#include <sys/types.h>
#include <stdarg.h>
#include <stdio.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Standard flags codes.
*/
#define UU_DEFAULT 0
/*
* Standard error codes.
*/
#define UU_ERROR_NONE 0 /* no error */
#define UU_ERROR_INVALID_ARGUMENT 1 /* invalid argument */
#define UU_ERROR_UNKNOWN_FLAG 2 /* passed flag invalid */
#define UU_ERROR_NO_MEMORY 3 /* out of memory */
#define UU_ERROR_CALLBACK_FAILED 4 /* callback-initiated error */
#define UU_ERROR_NOT_SUPPORTED 5 /* operation not supported */
#define UU_ERROR_EMPTY 6 /* no value provided */
#define UU_ERROR_UNDERFLOW 7 /* value is too small */
#define UU_ERROR_OVERFLOW 8 /* value is too value */
#define UU_ERROR_INVALID_CHAR 9 /* value contains unexpected char */
#define UU_ERROR_INVALID_DIGIT 10 /* value contains digit not in base */
#define UU_ERROR_SYSTEM 99 /* underlying system error */
#define UU_ERROR_UNKNOWN 100 /* error status not known */
/*
* Standard program exit codes.
*/
#define UU_EXIT_OK (*(uu_exit_ok()))
#define UU_EXIT_FATAL (*(uu_exit_fatal()))
#define UU_EXIT_USAGE (*(uu_exit_usage()))
/*
* Exit status profiles.
*/
#define UU_PROFILE_DEFAULT 0
#define UU_PROFILE_LAUNCHER 1
/*
* Error reporting functions.
*/
uint32_t uu_error(void);
const char *uu_strerror(uint32_t);
/*
* Program notification functions.
*/
extern void uu_alt_exit(int);
extern const char *uu_setpname(char *);
extern const char *uu_getpname(void);
-/*PRINTFLIKE1*/
-extern void uu_warn(const char *, ...);
-extern void uu_vwarn(const char *, va_list);
-/*PRINTFLIKE1*/
-extern void uu_die(const char *, ...) __NORETURN;
-extern void uu_vdie(const char *, va_list) __NORETURN;
-/*PRINTFLIKE2*/
-extern void uu_xdie(int, const char *, ...) __NORETURN;
-extern void uu_vxdie(int, const char *, va_list) __NORETURN;
+extern void uu_warn(const char *, ...)
+ __attribute__((format(printf, 1, 2)));
+extern void uu_vwarn(const char *, va_list)
+ __attribute__((format(printf, 1, 0)));
+extern void uu_die(const char *, ...)
+ __attribute__((format(printf, 1, 2))) __NORETURN;
+extern void uu_vdie(const char *, va_list)
+ __attribute__((format(printf, 1, 0))) __NORETURN;
+extern void uu_xdie(int, const char *, ...)
+ __attribute__((format(printf, 2, 3))) __NORETURN;
+extern void uu_vxdie(int, const char *, va_list)
+ __attribute__((format(printf, 2, 0))) __NORETURN;
/*
* Exit status functions (not to be used directly)
*/
extern int *uu_exit_ok(void);
extern int *uu_exit_fatal(void);
extern int *uu_exit_usage(void);
/*
* Identifier test flags and function.
*/
#define UU_NAME_DOMAIN 0x1 /* allow SUNW, or com.sun, prefix */
#define UU_NAME_PATH 0x2 /* allow '/'-delimited paths */
int uu_check_name(const char *, uint_t);
/*
* Convenience functions.
*/
#define UU_NELEM(a) (sizeof (a) / sizeof ((a)[0]))
-/*PRINTFLIKE1*/
-extern char *uu_msprintf(const char *format, ...);
+extern char *uu_msprintf(const char *format, ...)
+ __attribute__((format(printf, 1, 2)));
extern void *uu_zalloc(size_t);
extern char *uu_strdup(const char *);
extern void uu_free(void *);
extern boolean_t uu_strcaseeq(const char *a, const char *b);
extern boolean_t uu_streq(const char *a, const char *b);
extern char *uu_strndup(const char *s, size_t n);
extern boolean_t uu_strbw(const char *a, const char *b);
extern void *uu_memdup(const void *buf, size_t sz);
/*
* Comparison function type definition.
* Developers should be careful in their use of the _private argument. If you
* break interface guarantees, you get undefined behavior.
*/
typedef int uu_compare_fn_t(const void *__left, const void *__right,
void *__private);
/*
* Walk variant flags.
* A data structure need not provide support for all variants and
* combinations. Refer to the appropriate documentation.
*/
#define UU_WALK_ROBUST 0x00000001 /* walk can survive removes */
#define UU_WALK_REVERSE 0x00000002 /* reverse walk order */
#define UU_WALK_PREORDER 0x00000010 /* walk tree in pre-order */
#define UU_WALK_POSTORDER 0x00000020 /* walk tree in post-order */
/*
* Walk callback function return codes.
*/
#define UU_WALK_ERROR -1
#define UU_WALK_NEXT 0
#define UU_WALK_DONE 1
/*
* Walk callback function type definition.
*/
typedef int uu_walk_fn_t(void *_elem, void *_private);
/*
* lists: opaque structures
*/
typedef struct uu_list_pool uu_list_pool_t;
typedef struct uu_list uu_list_t;
typedef struct uu_list_node {
uintptr_t uln_opaque[2];
} uu_list_node_t;
typedef struct uu_list_walk uu_list_walk_t;
typedef uintptr_t uu_list_index_t;
/*
* lists: interface
*
* basic usage:
* typedef struct foo {
* ...
* uu_list_node_t foo_node;
* ...
* } foo_t;
*
* static int
* foo_compare(void *l_arg, void *r_arg, void *private)
* {
* foo_t *l = l_arg;
* foo_t *r = r_arg;
*
* if (... l greater than r ...)
* return (1);
* if (... l less than r ...)
* return (-1);
* return (0);
* }
*
* ...
* // at initialization time
* foo_pool = uu_list_pool_create("foo_pool",
* sizeof (foo_t), offsetof(foo_t, foo_node), foo_compare,
* debugging? 0 : UU_AVL_POOL_DEBUG);
* ...
*/
uu_list_pool_t *uu_list_pool_create(const char *, size_t, size_t,
uu_compare_fn_t *, uint32_t);
#define UU_LIST_POOL_DEBUG 0x00000001
void uu_list_pool_destroy(uu_list_pool_t *);
/*
* usage:
*
* foo_t *a;
* a = malloc(sizeof (*a));
* uu_list_node_init(a, &a->foo_list, pool);
* ...
* uu_list_node_fini(a, &a->foo_list, pool);
* free(a);
*/
void uu_list_node_init(void *, uu_list_node_t *, uu_list_pool_t *);
void uu_list_node_fini(void *, uu_list_node_t *, uu_list_pool_t *);
uu_list_t *uu_list_create(uu_list_pool_t *, void *_parent, uint32_t);
#define UU_LIST_DEBUG 0x00000001
#define UU_LIST_SORTED 0x00000002 /* list is sorted */
void uu_list_destroy(uu_list_t *); /* list must be empty */
size_t uu_list_numnodes(uu_list_t *);
void *uu_list_first(uu_list_t *);
void *uu_list_last(uu_list_t *);
void *uu_list_next(uu_list_t *, void *);
void *uu_list_prev(uu_list_t *, void *);
int uu_list_walk(uu_list_t *, uu_walk_fn_t *, void *, uint32_t);
uu_list_walk_t *uu_list_walk_start(uu_list_t *, uint32_t);
void *uu_list_walk_next(uu_list_walk_t *);
void uu_list_walk_end(uu_list_walk_t *);
void *uu_list_find(uu_list_t *, void *, void *, uu_list_index_t *);
void uu_list_insert(uu_list_t *, void *, uu_list_index_t);
void *uu_list_nearest_next(uu_list_t *, uu_list_index_t);
void *uu_list_nearest_prev(uu_list_t *, uu_list_index_t);
void *uu_list_teardown(uu_list_t *, void **);
void uu_list_remove(uu_list_t *, void *);
/*
* lists: interfaces for non-sorted lists only
*/
int uu_list_insert_before(uu_list_t *, void *_target, void *_elem);
int uu_list_insert_after(uu_list_t *, void *_target, void *_elem);
/*
* avl trees: opaque structures
*/
typedef struct uu_avl_pool uu_avl_pool_t;
typedef struct uu_avl uu_avl_t;
typedef struct uu_avl_node {
#ifdef _LP64
uintptr_t uan_opaque[3];
#else
uintptr_t uan_opaque[4];
#endif
} uu_avl_node_t;
typedef struct uu_avl_walk uu_avl_walk_t;
typedef uintptr_t uu_avl_index_t;
/*
* avl trees: interface
*
* basic usage:
* typedef struct foo {
* ...
* uu_avl_node_t foo_node;
* ...
* } foo_t;
*
* static int
* foo_compare(void *l_arg, void *r_arg, void *private)
* {
* foo_t *l = l_arg;
* foo_t *r = r_arg;
*
* if (... l greater than r ...)
* return (1);
* if (... l less than r ...)
* return (-1);
* return (0);
* }
*
* ...
* // at initialization time
* foo_pool = uu_avl_pool_create("foo_pool",
* sizeof (foo_t), offsetof(foo_t, foo_node), foo_compare,
* debugging? 0 : UU_AVL_POOL_DEBUG);
* ...
*/
uu_avl_pool_t *uu_avl_pool_create(const char *, size_t, size_t,
uu_compare_fn_t *, uint32_t);
#define UU_AVL_POOL_DEBUG 0x00000001
void uu_avl_pool_destroy(uu_avl_pool_t *);
/*
* usage:
*
* foo_t *a;
* a = malloc(sizeof (*a));
* uu_avl_node_init(a, &a->foo_avl, pool);
* ...
* uu_avl_node_fini(a, &a->foo_avl, pool);
* free(a);
*/
void uu_avl_node_init(void *, uu_avl_node_t *, uu_avl_pool_t *);
void uu_avl_node_fini(void *, uu_avl_node_t *, uu_avl_pool_t *);
uu_avl_t *uu_avl_create(uu_avl_pool_t *, void *_parent, uint32_t);
#define UU_AVL_DEBUG 0x00000001
void uu_avl_destroy(uu_avl_t *); /* list must be empty */
size_t uu_avl_numnodes(uu_avl_t *);
void *uu_avl_first(uu_avl_t *);
void *uu_avl_last(uu_avl_t *);
void *uu_avl_next(uu_avl_t *, void *);
void *uu_avl_prev(uu_avl_t *, void *);
int uu_avl_walk(uu_avl_t *, uu_walk_fn_t *, void *, uint32_t);
uu_avl_walk_t *uu_avl_walk_start(uu_avl_t *, uint32_t);
void *uu_avl_walk_next(uu_avl_walk_t *);
void uu_avl_walk_end(uu_avl_walk_t *);
void *uu_avl_find(uu_avl_t *, void *, void *, uu_avl_index_t *);
void uu_avl_insert(uu_avl_t *, void *, uu_avl_index_t);
void *uu_avl_nearest_next(uu_avl_t *, uu_avl_index_t);
void *uu_avl_nearest_prev(uu_avl_t *, uu_avl_index_t);
void *uu_avl_teardown(uu_avl_t *, void **);
void uu_avl_remove(uu_avl_t *, void *);
#ifdef __cplusplus
}
#endif
#endif /* _LIBUUTIL_H */
diff --git a/sys/contrib/openzfs/include/libuutil_impl.h b/sys/contrib/openzfs/include/libuutil_impl.h
index 50d8e012d5f2..753bbff2461d 100644
--- a/sys/contrib/openzfs/include/libuutil_impl.h
+++ b/sys/contrib/openzfs/include/libuutil_impl.h
@@ -1,175 +1,174 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _LIBUUTIL_IMPL_H
#define _LIBUUTIL_IMPL_H
#include <libuutil.h>
#include <pthread.h>
#include <sys/avl_impl.h>
#include <sys/byteorder.h>
#ifdef __cplusplus
extern "C" {
#endif
void uu_set_error(uint_t);
-/*PRINTFLIKE1*/
-void uu_panic(const char *format, ...);
+void uu_panic(const char *format, ...) __attribute__((format(printf, 1, 2)));
/*
* For debugging purposes, libuutil keeps around linked lists of all uu_lists
* and uu_avls, along with pointers to their parents. These can cause false
* negatives when looking for memory leaks, so we encode the pointers by
* storing them with swapped endianness; this is not perfect, but it's about
* the best we can do without wasting a lot of space.
*/
#ifdef _LP64
#define UU_PTR_ENCODE(ptr) BSWAP_64((uintptr_t)(void *)(ptr))
#else
#define UU_PTR_ENCODE(ptr) BSWAP_32((uintptr_t)(void *)(ptr))
#endif
#define UU_PTR_DECODE(ptr) ((void *)UU_PTR_ENCODE(ptr))
/*
* uu_list structures
*/
typedef struct uu_list_node_impl {
struct uu_list_node_impl *uln_next;
struct uu_list_node_impl *uln_prev;
} uu_list_node_impl_t;
struct uu_list_walk {
uu_list_walk_t *ulw_next;
uu_list_walk_t *ulw_prev;
uu_list_t *ulw_list;
int8_t ulw_dir;
uint8_t ulw_robust;
uu_list_node_impl_t *ulw_next_result;
};
struct uu_list {
uintptr_t ul_next_enc;
uintptr_t ul_prev_enc;
uu_list_pool_t *ul_pool;
uintptr_t ul_parent_enc; /* encoded parent pointer */
size_t ul_offset;
size_t ul_numnodes;
uint8_t ul_debug;
uint8_t ul_sorted;
uint8_t ul_index; /* mark for uu_list_index_ts */
uu_list_node_impl_t ul_null_node;
uu_list_walk_t ul_null_walk; /* for robust walkers */
};
#define UU_LIST_PTR(ptr) ((uu_list_t *)UU_PTR_DECODE(ptr))
#define UU_LIST_POOL_MAXNAME 64
struct uu_list_pool {
uu_list_pool_t *ulp_next;
uu_list_pool_t *ulp_prev;
char ulp_name[UU_LIST_POOL_MAXNAME];
size_t ulp_nodeoffset;
size_t ulp_objsize;
uu_compare_fn_t *ulp_cmp;
uint8_t ulp_debug;
uint8_t ulp_last_index;
pthread_mutex_t ulp_lock; /* protects null_list */
uu_list_t ulp_null_list;
};
/*
* uu_avl structures
*/
typedef struct avl_node uu_avl_node_impl_t;
struct uu_avl_walk {
uu_avl_walk_t *uaw_next;
uu_avl_walk_t *uaw_prev;
uu_avl_t *uaw_avl;
void *uaw_next_result;
int8_t uaw_dir;
uint8_t uaw_robust;
};
struct uu_avl {
uintptr_t ua_next_enc;
uintptr_t ua_prev_enc;
uu_avl_pool_t *ua_pool;
uintptr_t ua_parent_enc;
uint8_t ua_debug;
uint8_t ua_index; /* mark for uu_avl_index_ts */
struct avl_tree ua_tree;
uu_avl_walk_t ua_null_walk;
};
#define UU_AVL_PTR(x) ((uu_avl_t *)UU_PTR_DECODE(x))
#define UU_AVL_POOL_MAXNAME 64
struct uu_avl_pool {
uu_avl_pool_t *uap_next;
uu_avl_pool_t *uap_prev;
char uap_name[UU_AVL_POOL_MAXNAME];
size_t uap_nodeoffset;
size_t uap_objsize;
uu_compare_fn_t *uap_cmp;
uint8_t uap_debug;
uint8_t uap_last_index;
pthread_mutex_t uap_lock; /* protects null_avl */
uu_avl_t uap_null_avl;
};
/*
* atfork() handlers
*/
void uu_avl_lockup(void);
void uu_avl_release(void);
void uu_list_lockup(void);
void uu_list_release(void);
#ifdef __cplusplus
}
#endif
#endif /* _LIBUUTIL_IMPL_H */
diff --git a/sys/contrib/openzfs/include/os/freebsd/spl/sys/ccompile.h b/sys/contrib/openzfs/include/os/freebsd/spl/sys/ccompile.h
index 7109d42ffbb6..23e637983475 100644
--- a/sys/contrib/openzfs/include/os/freebsd/spl/sys/ccompile.h
+++ b/sys/contrib/openzfs/include/os/freebsd/spl/sys/ccompile.h
@@ -1,284 +1,193 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2004 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_CCOMPILE_H
#define _SYS_CCOMPILE_H
/*
* This file contains definitions designed to enable different compilers
* to be used harmoniously on Solaris systems.
*/
#ifdef __cplusplus
extern "C" {
#endif
-/*
- * Allow for version tests for compiler bugs and features.
- */
-#if defined(__GNUC__)
-#define __GNUC_VERSION \
- (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
-#else
-#define __GNUC_VERSION 0
-#endif
-
-#if defined(__ATTRIBUTE_IMPLEMENTED) || defined(__GNUC__)
-
-#if 0
-/*
- * analogous to lint's PRINTFLIKEn
- */
-#define __sun_attr___PRINTFLIKE__(__n) \
- __attribute__((__format__(printf, __n, (__n)+1)))
-#define __sun_attr___VPRINTFLIKE__(__n) \
- __attribute__((__format__(printf, __n, 0)))
-
-#define __sun_attr___KPRINTFLIKE__ __sun_attr___PRINTFLIKE__
-#define __sun_attr___KVPRINTFLIKE__ __sun_attr___VPRINTFLIKE__
-#else
-/*
- * Currently the openzfs codebase has a lot of formatting errors
- * which are not picked up in the linux build because they're not
- * doing formatting checks. LLVM's kprintf implementation doesn't
- * actually do format checks!
- *
- * For FreeBSD these break under gcc! LLVM shim'ed cmn_err as a
- * format attribute but also didn't check anything. If one
- * replaces it with the above, all of the format issues
- * in the codebase show up.
- *
- * Once those format string issues are addressed, the above
- * should be flipped on once again.
- */
-#define __sun_attr___PRINTFLIKE__(__n)
-#define __sun_attr___VPRINTFLIKE__(__n)
-#define __sun_attr___KPRINTFLIKE__(__n)
-#define __sun_attr___KVPRINTFLIKE__(__n)
-
-#endif
-
-/*
- * This one's pretty obvious -- the function never returns
- */
-#define __sun_attr___noreturn__ __attribute__((__noreturn__))
-
-/*
- * This is an appropriate label for functions that do not
- * modify their arguments, e.g. strlen()
- */
-#define __sun_attr___pure__ __attribute__((__pure__))
-
-/*
- * This is a stronger form of __pure__. Can be used for functions
- * that do not modify their arguments and don't depend on global
- * memory.
- */
-#define __sun_attr___const__ __attribute__((__const__))
-
-/*
- * structure packing like #pragma pack(1)
- */
-#define __sun_attr___packed__ __attribute__((__packed__))
-
-#define ___sun_attr_inner(__a) __sun_attr_##__a
-#define __sun_attr__(__a) ___sun_attr_inner __a
-
-#else /* __ATTRIBUTE_IMPLEMENTED || __GNUC__ */
-
-#define __sun_attr__(__a)
-
-#endif /* __ATTRIBUTE_IMPLEMENTED || __GNUC__ */
-
-/*
- * Shorthand versions for readability
- */
-
-#define __PRINTFLIKE(__n) __sun_attr__((__PRINTFLIKE__(__n)))
-#define __VPRINTFLIKE(__n) __sun_attr__((__VPRINTFLIKE__(__n)))
-#define __KPRINTFLIKE(__n) __sun_attr__((__KPRINTFLIKE__(__n)))
-#define __KVPRINTFLIKE(__n) __sun_attr__((__KVPRINTFLIKE__(__n)))
-#if defined(_KERNEL) || defined(_STANDALONE)
-#define __NORETURN __sun_attr__((__noreturn__))
-#endif /* _KERNEL || _STANDALONE */
-#define __CONST __sun_attr__((__const__))
-#define __PURE __sun_attr__((__pure__))
-
#if defined(INVARIANTS) && !defined(ZFS_DEBUG)
#define ZFS_DEBUG
#undef NDEBUG
#endif
#define EXPORT_SYMBOL(x)
#define MODULE_AUTHOR(s)
#define MODULE_DESCRIPTION(s)
#define MODULE_LICENSE(s)
#define module_param(a, b, c)
#define module_param_call(a, b, c, d, e)
#define module_param_named(a, b, c, d)
#define MODULE_PARM_DESC(a, b)
#define asm __asm
#ifdef ZFS_DEBUG
#undef NDEBUG
#endif
#if !defined(ZFS_DEBUG) && !defined(NDEBUG)
#define NDEBUG
#endif
#ifndef EINTEGRITY
#define EINTEGRITY 97 /* EINTEGRITY is new in 13 */
#endif
/*
* These are bespoke errnos used in ZFS. We map them to their closest FreeBSD
* equivalents. This gives us more useful error messages from strerror(3).
*/
#define ECKSUM EINTEGRITY
#define EFRAGS ENOSPC
/* Similar for ENOACTIVE */
#define ENOTACTIVE ECANCELED
#define EREMOTEIO EREMOTE
#define ECHRNG ENXIO
#define ETIME ETIMEDOUT
#ifndef LOCORE
#ifndef HAVE_RPC_TYPES
typedef int bool_t;
typedef int enum_t;
#endif
#endif
#ifndef __cplusplus
#define __init
#define __exit
#endif
#if defined(_KERNEL) || defined(_STANDALONE)
#define param_set_charp(a, b) (0)
#define ATTR_UID AT_UID
#define ATTR_GID AT_GID
#define ATTR_MODE AT_MODE
#define ATTR_XVATTR AT_XVATTR
#define ATTR_CTIME AT_CTIME
#define ATTR_MTIME AT_MTIME
#define ATTR_ATIME AT_ATIME
#if defined(_STANDALONE)
#define vmem_free kmem_free
#define vmem_zalloc kmem_zalloc
#define vmem_alloc kmem_zalloc
#else
#define vmem_free zfs_kmem_free
#define vmem_zalloc(size, flags) zfs_kmem_alloc(size, flags | M_ZERO)
#define vmem_alloc zfs_kmem_alloc
#endif
#define MUTEX_NOLOCKDEP 0
#define RW_NOLOCKDEP 0
#else
#define FALSE 0
#define TRUE 1
/*
* XXX We really need to consolidate on standard
* error codes in the common code
*/
#define ENOSTR ENOTCONN
#define ENODATA EINVAL
#define __BSD_VISIBLE 1
#ifndef IN_BASE
#define __POSIX_VISIBLE 201808
#define __XSI_VISIBLE 1000
#endif
#define ARRAY_SIZE(a) (sizeof (a) / sizeof (a[0]))
#define mmap64 mmap
/* Note: this file can be used on linux/macOS when bootstrapping tools. */
#if defined(__FreeBSD__)
#define open64 open
#define pwrite64 pwrite
#define ftruncate64 ftruncate
#define lseek64 lseek
#define pread64 pread
#define stat64 stat
#define lstat64 lstat
#define statfs64 statfs
#define readdir64 readdir
#define dirent64 dirent
#endif
#define P2ALIGN(x, align) ((x) & -(align))
#define P2CROSS(x, y, align) (((x) ^ (y)) > (align) - 1)
#define P2ROUNDUP(x, align) ((((x) - 1) | ((align) - 1)) + 1)
#define P2PHASE(x, align) ((x) & ((align) - 1))
#define P2NPHASE(x, align) (-(x) & ((align) - 1))
#define ISP2(x) (((x) & ((x) - 1)) == 0)
#define IS_P2ALIGNED(v, a) ((((uintptr_t)(v)) & ((uintptr_t)(a) - 1)) == 0)
#define P2BOUNDARY(off, len, align) \
(((off) ^ ((off) + (len) - 1)) > (align) - 1)
/*
* Typed version of the P2* macros. These macros should be used to ensure
* that the result is correctly calculated based on the data type of (x),
* which is passed in as the last argument, regardless of the data
* type of the alignment. For example, if (x) is of type uint64_t,
* and we want to round it up to a page boundary using "PAGESIZE" as
* the alignment, we can do either
*
* P2ROUNDUP(x, (uint64_t)PAGESIZE)
* or
* P2ROUNDUP_TYPED(x, PAGESIZE, uint64_t)
*/
#define P2ALIGN_TYPED(x, align, type) \
((type)(x) & -(type)(align))
#define P2PHASE_TYPED(x, align, type) \
((type)(x) & ((type)(align) - 1))
#define P2NPHASE_TYPED(x, align, type) \
(-(type)(x) & ((type)(align) - 1))
#define P2ROUNDUP_TYPED(x, align, type) \
((((type)(x) - 1) | ((type)(align) - 1)) + 1)
#define P2END_TYPED(x, align, type) \
(-(~(type)(x) & -(type)(align)))
#define P2PHASEUP_TYPED(x, align, phase, type) \
((type)(phase) - (((type)(phase) - (type)(x)) & -(type)(align)))
#define P2CROSS_TYPED(x, y, align, type) \
(((type)(x) ^ (type)(y)) > (type)(align) - 1)
#define P2SAMEHIGHBIT_TYPED(x, y, type) \
(((type)(x) ^ (type)(y)) < ((type)(x) & (type)(y)))
#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
#define RLIM64_INFINITY RLIM_INFINITY
#ifndef HAVE_ERESTART
#define ERESTART EAGAIN
#endif
#define ABS(a) ((a) < 0 ? -(a) : (a))
#endif
#ifdef __cplusplus
}
#endif
#endif /* _SYS_CCOMPILE_H */
diff --git a/sys/contrib/openzfs/include/os/freebsd/spl/sys/cmn_err.h b/sys/contrib/openzfs/include/os/freebsd/spl/sys/cmn_err.h
index ba4cff37d5f3..ddc2f0049e59 100644
--- a/sys/contrib/openzfs/include/os/freebsd/spl/sys/cmn_err.h
+++ b/sys/contrib/openzfs/include/os/freebsd/spl/sys/cmn_err.h
@@ -1,89 +1,82 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
/* All Rights Reserved */
/*
* Copyright 2004 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_CMN_ERR_H
#define _SYS_CMN_ERR_H
#if !defined(_ASM)
#include <sys/_stdarg.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
/* Common error handling severity levels */
#define CE_CONT 0 /* continuation */
#define CE_NOTE 1 /* notice */
#define CE_WARN 2 /* warning */
#define CE_PANIC 3 /* panic */
#define CE_IGNORE 4 /* print nothing */
#ifndef _ASM
-/*PRINTFLIKE2*/
extern void cmn_err(int, const char *, ...)
- __KPRINTFLIKE(2);
+ __attribute__((format(printf, 2, 3)));
extern void vzcmn_err(zoneid_t, int, const char *, __va_list)
- __KVPRINTFLIKE(3);
+ __attribute__((format(printf, 3, 0)));
extern void vcmn_err(int, const char *, __va_list)
- __KVPRINTFLIKE(2);
+ __attribute__((format(printf, 2, 0)));
-/*PRINTFLIKE3*/
extern void zcmn_err(zoneid_t, int, const char *, ...)
- __KPRINTFLIKE(3);
+ __attribute__((format(printf, 3, 4)));
extern void vzprintf(zoneid_t, const char *, __va_list)
- __KVPRINTFLIKE(2);
+ __attribute__((format(printf, 2, 0)));
-/*PRINTFLIKE2*/
extern void zprintf(zoneid_t, const char *, ...)
- __KPRINTFLIKE(2);
+ __attribute__((format(printf, 2, 3)));
extern void vuprintf(const char *, __va_list)
- __KVPRINTFLIKE(1);
+ __attribute__((format(printf, 1, 0)));
-/*PRINTFLIKE1*/
extern void panic(const char *, ...)
- __KPRINTFLIKE(1) __NORETURN;
-
-extern void vpanic(const char *, __va_list)
- __KVPRINTFLIKE(1) __NORETURN;
+ __attribute__((format(printf, 1, 2)));
#endif /* !_ASM */
#ifdef __cplusplus
}
#endif
#endif /* _SYS_CMN_ERR_H */
diff --git a/sys/contrib/openzfs/include/os/linux/spl/sys/cmn_err.h b/sys/contrib/openzfs/include/os/linux/spl/sys/cmn_err.h
index 314bbbaf9e95..79297067c17d 100644
--- a/sys/contrib/openzfs/include/os/linux/spl/sys/cmn_err.h
+++ b/sys/contrib/openzfs/include/os/linux/spl/sys/cmn_err.h
@@ -1,41 +1,44 @@
/*
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
* Copyright (C) 2007 The Regents of the University of California.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
* UCRL-CODE-235197
*
* This file is part of the SPL, Solaris Porting Layer.
*
* The SPL is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* The SPL is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _SPL_CMN_ERR_H
#define _SPL_CMN_ERR_H
#include <stdarg.h>
#define CE_CONT 0 /* continuation */
#define CE_NOTE 1 /* notice */
#define CE_WARN 2 /* warning */
#define CE_PANIC 3 /* panic */
#define CE_IGNORE 4 /* print nothing */
-extern void cmn_err(int, const char *, ...);
-extern void vcmn_err(int, const char *, va_list);
-extern void vpanic(const char *, va_list);
+extern void cmn_err(int, const char *, ...)
+ __attribute__((format(printf, 2, 3)));
+extern void vcmn_err(int, const char *, va_list)
+ __attribute__((format(printf, 2, 0)));
+extern void vpanic(const char *, va_list)
+ __attribute__((format(printf, 1, 0)));
#define fm_panic panic
#endif /* SPL_CMN_ERR_H */
diff --git a/sys/contrib/openzfs/include/sys/Makefile.am b/sys/contrib/openzfs/include/sys/Makefile.am
index 385c82c926ae..54573fbe1b1c 100644
--- a/sys/contrib/openzfs/include/sys/Makefile.am
+++ b/sys/contrib/openzfs/include/sys/Makefile.am
@@ -1,152 +1,151 @@
SUBDIRS = fm fs crypto lua sysevent zstd
COMMON_H = \
abd.h \
abd_impl.h \
aggsum.h \
arc.h \
arc_impl.h \
avl.h \
avl_impl.h \
bitops.h \
blkptr.h \
bplist.h \
bpobj.h \
bptree.h \
btree.h \
bqueue.h \
dataset_kstats.h \
dbuf.h \
ddt.h \
dmu.h \
dmu_impl.h \
dmu_objset.h \
dmu_recv.h \
dmu_redact.h \
dmu_send.h \
dmu_traverse.h \
dmu_tx.h \
dmu_zfetch.h \
dnode.h \
dsl_bookmark.h \
dsl_dataset.h \
dsl_deadlist.h \
dsl_deleg.h \
dsl_destroy.h \
dsl_dir.h \
dsl_crypt.h \
dsl_pool.h \
dsl_prop.h \
dsl_scan.h \
dsl_synctask.h \
dsl_userhold.h \
edonr.h \
efi_partition.h \
frame.h \
hkdf.h \
metaslab.h \
metaslab_impl.h \
mmp.h \
mntent.h \
mod.h \
multilist.h \
- note.h \
nvpair.h \
nvpair_impl.h \
objlist.h \
pathname.h \
qat.h \
range_tree.h \
rrwlock.h \
sa.h \
sa_impl.h \
skein.h \
spa_boot.h \
spa_checkpoint.h \
spa_log_spacemap.h \
space_map.h \
space_reftree.h \
spa.h \
spa_impl.h \
spa_checksum.h \
sysevent.h \
txg.h \
txg_impl.h \
u8_textprep_data.h \
u8_textprep.h \
uberblock.h \
uberblock_impl.h \
uio_impl.h \
unique.h \
uuid.h \
vdev_disk.h \
vdev_file.h \
vdev.h \
vdev_draid.h \
vdev_impl.h \
vdev_indirect_births.h \
vdev_indirect_mapping.h \
vdev_initialize.h \
vdev_raidz.h \
vdev_raidz_impl.h \
vdev_rebuild.h \
vdev_removal.h \
vdev_trim.h \
xvattr.h \
zap.h \
zap_impl.h \
zap_leaf.h \
zcp.h \
zcp_global.h \
zcp_iter.h \
zcp_prop.h \
zcp_set.h \
zfeature.h \
zfs_acl.h \
zfs_bootenv.h \
zfs_context.h \
zfs_debug.h \
zfs_delay.h \
zfs_file.h \
zfs_fuid.h \
zfs_project.h \
zfs_quota.h \
zfs_racct.h \
zfs_ratelimit.h \
zfs_refcount.h \
zfs_rlock.h \
zfs_sa.h \
zfs_stat.h \
zfs_sysfs.h \
zfs_vfsops.h \
zfs_vnops.h \
zfs_znode.h \
zil.h \
zil_impl.h \
zio_checksum.h \
zio_compress.h \
zio_crypt.h \
zio.h \
zio_impl.h \
zio_priority.h \
zrlock.h \
zthr.h
KERNEL_H = \
zfs_ioctl.h \
zfs_ioctl_impl.h \
zfs_onexit.h \
zvol.h \
zvol_impl.h
if CONFIG_USER
libzfsdir = $(includedir)/libzfs/sys
libzfs_HEADERS = $(COMMON_H)
endif
if CONFIG_KERNEL
if BUILD_LINUX
kerneldir = @prefix@/src/zfs-$(VERSION)/include/sys
kernel_HEADERS = $(COMMON_H) $(KERNEL_H)
endif
endif
diff --git a/sys/contrib/openzfs/include/sys/abd.h b/sys/contrib/openzfs/include/sys/abd.h
index 6903e0c0e713..5c6bd0c271d4 100644
--- a/sys/contrib/openzfs/include/sys/abd.h
+++ b/sys/contrib/openzfs/include/sys/abd.h
@@ -1,220 +1,221 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2014 by Chunwei Chen. All rights reserved.
* Copyright (c) 2016, 2019 by Delphix. All rights reserved.
*/
#ifndef _ABD_H
#define _ABD_H
#include <sys/isa_defs.h>
#include <sys/debug.h>
#include <sys/zfs_refcount.h>
#include <sys/uio.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef enum abd_flags {
ABD_FLAG_LINEAR = 1 << 0, /* is buffer linear (or scattered)? */
ABD_FLAG_OWNER = 1 << 1, /* does it own its data buffers? */
ABD_FLAG_META = 1 << 2, /* does this represent FS metadata? */
ABD_FLAG_MULTI_ZONE = 1 << 3, /* pages split over memory zones */
ABD_FLAG_MULTI_CHUNK = 1 << 4, /* pages split over multiple chunks */
ABD_FLAG_LINEAR_PAGE = 1 << 5, /* linear but allocd from page */
ABD_FLAG_GANG = 1 << 6, /* mult ABDs chained together */
ABD_FLAG_GANG_FREE = 1 << 7, /* gang ABD is responsible for mem */
ABD_FLAG_ZEROS = 1 << 8, /* ABD for zero-filled buffer */
ABD_FLAG_ALLOCD = 1 << 9, /* we allocated the abd_t */
} abd_flags_t;
typedef struct abd {
abd_flags_t abd_flags;
uint_t abd_size; /* excludes scattered abd_offset */
list_node_t abd_gang_link;
#ifdef ZFS_DEBUG
struct abd *abd_parent;
zfs_refcount_t abd_children;
#endif
kmutex_t abd_mtx;
union {
struct abd_scatter {
uint_t abd_offset;
#if defined(__FreeBSD__) && defined(_KERNEL)
void *abd_chunks[1]; /* actually variable-length */
#else
uint_t abd_nents;
struct scatterlist *abd_sgl;
#endif
} abd_scatter;
struct abd_linear {
void *abd_buf;
struct scatterlist *abd_sgl; /* for LINEAR_PAGE */
} abd_linear;
struct abd_gang {
list_t abd_gang_chain;
} abd_gang;
} abd_u;
} abd_t;
typedef int abd_iter_func_t(void *buf, size_t len, void *priv);
typedef int abd_iter_func2_t(void *bufa, void *bufb, size_t len, void *priv);
extern int zfs_abd_scatter_enabled;
/*
* Allocations and deallocations
*/
abd_t *abd_alloc(size_t, boolean_t);
abd_t *abd_alloc_linear(size_t, boolean_t);
abd_t *abd_alloc_gang(void);
abd_t *abd_alloc_for_io(size_t, boolean_t);
abd_t *abd_alloc_sametype(abd_t *, size_t);
+boolean_t abd_size_alloc_linear(size_t);
void abd_gang_add(abd_t *, abd_t *, boolean_t);
void abd_free(abd_t *);
abd_t *abd_get_offset(abd_t *, size_t);
abd_t *abd_get_offset_size(abd_t *, size_t, size_t);
abd_t *abd_get_offset_struct(abd_t *, abd_t *, size_t, size_t);
abd_t *abd_get_zeros(size_t);
abd_t *abd_get_from_buf(void *, size_t);
void abd_cache_reap_now(void);
/*
* Conversion to and from a normal buffer
*/
void *abd_to_buf(abd_t *);
void *abd_borrow_buf(abd_t *, size_t);
void *abd_borrow_buf_copy(abd_t *, size_t);
void abd_return_buf(abd_t *, void *, size_t);
void abd_return_buf_copy(abd_t *, void *, size_t);
void abd_take_ownership_of_buf(abd_t *, boolean_t);
void abd_release_ownership_of_buf(abd_t *);
/*
* ABD operations
*/
int abd_iterate_func(abd_t *, size_t, size_t, abd_iter_func_t *, void *);
int abd_iterate_func2(abd_t *, abd_t *, size_t, size_t, size_t,
abd_iter_func2_t *, void *);
void abd_copy_off(abd_t *, abd_t *, size_t, size_t, size_t);
void abd_copy_from_buf_off(abd_t *, const void *, size_t, size_t);
void abd_copy_to_buf_off(void *, abd_t *, size_t, size_t);
int abd_cmp(abd_t *, abd_t *);
int abd_cmp_buf_off(abd_t *, const void *, size_t, size_t);
void abd_zero_off(abd_t *, size_t, size_t);
void abd_verify(abd_t *);
void abd_raidz_gen_iterate(abd_t **cabds, abd_t *dabd,
ssize_t csize, ssize_t dsize, const unsigned parity,
void (*func_raidz_gen)(void **, const void *, size_t, size_t));
void abd_raidz_rec_iterate(abd_t **cabds, abd_t **tabds,
ssize_t tsize, const unsigned parity,
void (*func_raidz_rec)(void **t, const size_t tsize, void **c,
const unsigned *mul),
const unsigned *mul);
/*
* Wrappers for calls with offsets of 0
*/
static inline void
abd_copy(abd_t *dabd, abd_t *sabd, size_t size)
{
abd_copy_off(dabd, sabd, 0, 0, size);
}
static inline void
abd_copy_from_buf(abd_t *abd, const void *buf, size_t size)
{
abd_copy_from_buf_off(abd, buf, 0, size);
}
static inline void
abd_copy_to_buf(void* buf, abd_t *abd, size_t size)
{
abd_copy_to_buf_off(buf, abd, 0, size);
}
static inline int
abd_cmp_buf(abd_t *abd, const void *buf, size_t size)
{
return (abd_cmp_buf_off(abd, buf, 0, size));
}
static inline void
abd_zero(abd_t *abd, size_t size)
{
abd_zero_off(abd, 0, size);
}
/*
* ABD type check functions
*/
static inline boolean_t
abd_is_linear(abd_t *abd)
{
return ((abd->abd_flags & ABD_FLAG_LINEAR) ? B_TRUE : B_FALSE);
}
static inline boolean_t
abd_is_linear_page(abd_t *abd)
{
return ((abd->abd_flags & ABD_FLAG_LINEAR_PAGE) ? B_TRUE : B_FALSE);
}
static inline boolean_t
abd_is_gang(abd_t *abd)
{
return ((abd->abd_flags & ABD_FLAG_GANG) ? B_TRUE : B_FALSE);
}
static inline uint_t
abd_get_size(abd_t *abd)
{
return (abd->abd_size);
}
/*
* Module lifecycle
* Defined in each specific OS's abd_os.c
*/
void abd_init(void);
void abd_fini(void);
/*
* Linux ABD bio functions
*/
#if defined(__linux__) && defined(_KERNEL)
unsigned int abd_bio_map_off(struct bio *, abd_t *, unsigned int, size_t);
unsigned long abd_nr_pages_off(abd_t *, unsigned int, size_t);
#endif
#ifdef __cplusplus
}
#endif
#endif /* _ABD_H */
diff --git a/sys/contrib/openzfs/include/sys/abd_impl.h b/sys/contrib/openzfs/include/sys/abd_impl.h
index 113700cd72b1..e96f1edfc8ce 100644
--- a/sys/contrib/openzfs/include/sys/abd_impl.h
+++ b/sys/contrib/openzfs/include/sys/abd_impl.h
@@ -1,112 +1,111 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2014 by Chunwei Chen. All rights reserved.
* Copyright (c) 2016, 2019 by Delphix. All rights reserved.
*/
#ifndef _ABD_IMPL_H
#define _ABD_IMPL_H
#include <sys/abd.h>
#include <sys/wmsum.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef enum abd_stats_op {
ABDSTAT_INCR, /* Increase abdstat values */
ABDSTAT_DECR /* Decrease abdstat values */
} abd_stats_op_t;
struct scatterlist; /* forward declaration */
struct abd_iter {
/* public interface */
void *iter_mapaddr; /* addr corresponding to iter_pos */
size_t iter_mapsize; /* length of data valid at mapaddr */
/* private */
abd_t *iter_abd; /* ABD being iterated through */
size_t iter_pos;
size_t iter_offset; /* offset in current sg/abd_buf, */
/* abd_offset included */
struct scatterlist *iter_sg; /* current sg */
};
extern abd_t *abd_zero_scatter;
abd_t *abd_gang_get_offset(abd_t *, size_t *);
abd_t *abd_alloc_struct(size_t);
void abd_free_struct(abd_t *);
/*
* OS specific functions
*/
abd_t *abd_alloc_struct_impl(size_t);
abd_t *abd_get_offset_scatter(abd_t *, abd_t *, size_t, size_t);
void abd_free_struct_impl(abd_t *);
void abd_alloc_chunks(abd_t *, size_t);
void abd_free_chunks(abd_t *);
-boolean_t abd_size_alloc_linear(size_t);
void abd_update_scatter_stats(abd_t *, abd_stats_op_t);
void abd_update_linear_stats(abd_t *, abd_stats_op_t);
void abd_verify_scatter(abd_t *);
void abd_free_linear_page(abd_t *);
/* OS specific abd_iter functions */
void abd_iter_init(struct abd_iter *, abd_t *);
boolean_t abd_iter_at_end(struct abd_iter *);
void abd_iter_advance(struct abd_iter *, size_t);
void abd_iter_map(struct abd_iter *);
void abd_iter_unmap(struct abd_iter *);
/*
* Helper macros
*/
#define ABDSTAT_INCR(stat, val) \
wmsum_add(&abd_sums.stat, (val))
#define ABDSTAT_BUMP(stat) ABDSTAT_INCR(stat, 1)
#define ABDSTAT_BUMPDOWN(stat) ABDSTAT_INCR(stat, -1)
#define ABD_SCATTER(abd) (abd->abd_u.abd_scatter)
#define ABD_LINEAR_BUF(abd) (abd->abd_u.abd_linear.abd_buf)
#define ABD_GANG(abd) (abd->abd_u.abd_gang)
#if defined(_KERNEL)
#if defined(__FreeBSD__)
#define abd_enter_critical(flags) critical_enter()
#define abd_exit_critical(flags) critical_exit()
#else
#define abd_enter_critical(flags) local_irq_save(flags)
#define abd_exit_critical(flags) local_irq_restore(flags)
#endif
#else /* !_KERNEL */
#define abd_enter_critical(flags) ((void)0)
#define abd_exit_critical(flags) ((void)0)
#endif
#ifdef __cplusplus
}
#endif
#endif /* _ABD_IMPL_H */
diff --git a/sys/contrib/openzfs/include/sys/arc.h b/sys/contrib/openzfs/include/sys/arc.h
index ef07a657f53c..20fa47bd9564 100644
--- a/sys/contrib/openzfs/include/sys/arc.h
+++ b/sys/contrib/openzfs/include/sys/arc.h
@@ -1,341 +1,341 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
* Copyright (c) 2019, Allan Jude
* Copyright (c) 2019, Klara Inc.
*/
#ifndef _SYS_ARC_H
#define _SYS_ARC_H
#include <sys/zfs_context.h>
#ifdef __cplusplus
extern "C" {
#endif
#include <sys/zio.h>
#include <sys/dmu.h>
#include <sys/spa.h>
#include <sys/zfs_refcount.h>
/*
* Used by arc_flush() to inform arc_evict_state() that it should evict
* all available buffers from the arc state being passed in.
*/
#define ARC_EVICT_ALL UINT64_MAX
#define HDR_SET_LSIZE(hdr, x) do { \
ASSERT(IS_P2ALIGNED(x, 1U << SPA_MINBLOCKSHIFT)); \
(hdr)->b_lsize = ((x) >> SPA_MINBLOCKSHIFT); \
-_NOTE(CONSTCOND) } while (0)
+} while (0)
#define HDR_SET_PSIZE(hdr, x) do { \
ASSERT(IS_P2ALIGNED((x), 1U << SPA_MINBLOCKSHIFT)); \
(hdr)->b_psize = ((x) >> SPA_MINBLOCKSHIFT); \
-_NOTE(CONSTCOND) } while (0)
+} while (0)
#define HDR_GET_LSIZE(hdr) ((hdr)->b_lsize << SPA_MINBLOCKSHIFT)
#define HDR_GET_PSIZE(hdr) ((hdr)->b_psize << SPA_MINBLOCKSHIFT)
typedef struct arc_buf_hdr arc_buf_hdr_t;
typedef struct arc_buf arc_buf_t;
typedef struct arc_prune arc_prune_t;
/*
* Because the ARC can store encrypted data, errors (not due to bugs) may arise
* while transforming data into its desired format - specifically, when
* decrypting, the key may not be present, or the HMAC may not be correct
* which signifies deliberate tampering with the on-disk state
* (assuming that the checksum was correct). If any error occurs, the "buf"
* parameter will be NULL.
*/
typedef void arc_read_done_func_t(zio_t *zio, const zbookmark_phys_t *zb,
const blkptr_t *bp, arc_buf_t *buf, void *priv);
typedef void arc_write_done_func_t(zio_t *zio, arc_buf_t *buf, void *priv);
typedef void arc_prune_func_t(int64_t bytes, void *priv);
/* Shared module parameters */
extern int zfs_arc_average_blocksize;
/* generic arc_done_func_t's which you can use */
arc_read_done_func_t arc_bcopy_func;
arc_read_done_func_t arc_getbuf_func;
/* generic arc_prune_func_t wrapper for callbacks */
struct arc_prune {
arc_prune_func_t *p_pfunc;
void *p_private;
uint64_t p_adjust;
list_node_t p_node;
zfs_refcount_t p_refcnt;
};
typedef enum arc_strategy {
ARC_STRATEGY_META_ONLY = 0, /* Evict only meta data buffers */
ARC_STRATEGY_META_BALANCED = 1, /* Evict data buffers if needed */
} arc_strategy_t;
typedef enum arc_flags
{
/*
* Public flags that can be passed into the ARC by external consumers.
*/
ARC_FLAG_WAIT = 1 << 0, /* perform sync I/O */
ARC_FLAG_NOWAIT = 1 << 1, /* perform async I/O */
ARC_FLAG_PREFETCH = 1 << 2, /* I/O is a prefetch */
ARC_FLAG_CACHED = 1 << 3, /* I/O was in cache */
ARC_FLAG_L2CACHE = 1 << 4, /* cache in L2ARC */
ARC_FLAG_PREDICTIVE_PREFETCH = 1 << 5, /* I/O from zfetch */
ARC_FLAG_PRESCIENT_PREFETCH = 1 << 6, /* long min lifespan */
/*
* Private ARC flags. These flags are private ARC only flags that
* will show up in b_flags in the arc_hdr_buf_t. These flags should
* only be set by ARC code.
*/
ARC_FLAG_IN_HASH_TABLE = 1 << 7, /* buffer is hashed */
ARC_FLAG_IO_IN_PROGRESS = 1 << 8, /* I/O in progress */
ARC_FLAG_IO_ERROR = 1 << 9, /* I/O failed for buf */
ARC_FLAG_INDIRECT = 1 << 10, /* indirect block */
/* Indicates that block was read with ASYNC priority. */
ARC_FLAG_PRIO_ASYNC_READ = 1 << 11,
ARC_FLAG_L2_WRITING = 1 << 12, /* write in progress */
ARC_FLAG_L2_EVICTED = 1 << 13, /* evicted during I/O */
ARC_FLAG_L2_WRITE_HEAD = 1 << 14, /* head of write list */
/*
* Encrypted or authenticated on disk (may be plaintext in memory).
* This header has b_crypt_hdr allocated. Does not include indirect
* blocks with checksums of MACs which will also have their X
* (encrypted) bit set in the bp.
*/
ARC_FLAG_PROTECTED = 1 << 15,
/* data has not been authenticated yet */
ARC_FLAG_NOAUTH = 1 << 16,
/* indicates that the buffer contains metadata (otherwise, data) */
ARC_FLAG_BUFC_METADATA = 1 << 17,
/* Flags specifying whether optional hdr struct fields are defined */
ARC_FLAG_HAS_L1HDR = 1 << 18,
ARC_FLAG_HAS_L2HDR = 1 << 19,
/*
* Indicates the arc_buf_hdr_t's b_pdata matches the on-disk data.
* This allows the l2arc to use the blkptr's checksum to verify
* the data without having to store the checksum in the hdr.
*/
ARC_FLAG_COMPRESSED_ARC = 1 << 20,
ARC_FLAG_SHARED_DATA = 1 << 21,
/*
* Fail this arc_read() (with ENOENT) if the data is not already present
* in cache.
*/
ARC_FLAG_CACHED_ONLY = 1 << 22,
/*
* Don't instantiate an arc_buf_t for arc_read_done.
*/
ARC_FLAG_NO_BUF = 1 << 23,
/*
* The arc buffer's compression mode is stored in the top 7 bits of the
* flags field, so these dummy flags are included so that MDB can
* interpret the enum properly.
*/
ARC_FLAG_COMPRESS_0 = 1 << 24,
ARC_FLAG_COMPRESS_1 = 1 << 25,
ARC_FLAG_COMPRESS_2 = 1 << 26,
ARC_FLAG_COMPRESS_3 = 1 << 27,
ARC_FLAG_COMPRESS_4 = 1 << 28,
ARC_FLAG_COMPRESS_5 = 1 << 29,
ARC_FLAG_COMPRESS_6 = 1 << 30
} arc_flags_t;
typedef enum arc_buf_flags {
ARC_BUF_FLAG_SHARED = 1 << 0,
ARC_BUF_FLAG_COMPRESSED = 1 << 1,
/*
* indicates whether this arc_buf_t is encrypted, regardless of
* state on-disk
*/
ARC_BUF_FLAG_ENCRYPTED = 1 << 2
} arc_buf_flags_t;
struct arc_buf {
arc_buf_hdr_t *b_hdr;
arc_buf_t *b_next;
kmutex_t b_evict_lock;
void *b_data;
arc_buf_flags_t b_flags;
};
typedef enum arc_buf_contents {
ARC_BUFC_INVALID, /* invalid type */
ARC_BUFC_DATA, /* buffer contains data */
ARC_BUFC_METADATA, /* buffer contains metadata */
ARC_BUFC_NUMTYPES
} arc_buf_contents_t;
/*
* The following breakdowns of arc_size exist for kstat only.
*/
typedef enum arc_space_type {
ARC_SPACE_DATA,
ARC_SPACE_META,
ARC_SPACE_HDRS,
ARC_SPACE_L2HDRS,
ARC_SPACE_DBUF,
ARC_SPACE_DNODE,
ARC_SPACE_BONUS,
ARC_SPACE_ABD_CHUNK_WASTE,
ARC_SPACE_NUMTYPES
} arc_space_type_t;
typedef enum arc_state_type {
ARC_STATE_ANON,
ARC_STATE_MRU,
ARC_STATE_MRU_GHOST,
ARC_STATE_MFU,
ARC_STATE_MFU_GHOST,
ARC_STATE_L2C_ONLY,
ARC_STATE_NUMTYPES
} arc_state_type_t;
typedef struct arc_buf_info {
arc_state_type_t abi_state_type;
arc_buf_contents_t abi_state_contents;
uint32_t abi_flags;
uint32_t abi_bufcnt;
uint64_t abi_size;
uint64_t abi_spa;
uint64_t abi_access;
uint32_t abi_mru_hits;
uint32_t abi_mru_ghost_hits;
uint32_t abi_mfu_hits;
uint32_t abi_mfu_ghost_hits;
uint32_t abi_l2arc_hits;
uint32_t abi_holds;
uint64_t abi_l2arc_dattr;
uint64_t abi_l2arc_asize;
enum zio_compress abi_l2arc_compress;
} arc_buf_info_t;
void arc_space_consume(uint64_t space, arc_space_type_t type);
void arc_space_return(uint64_t space, arc_space_type_t type);
boolean_t arc_is_metadata(arc_buf_t *buf);
boolean_t arc_is_encrypted(arc_buf_t *buf);
boolean_t arc_is_unauthenticated(arc_buf_t *buf);
enum zio_compress arc_get_compression(arc_buf_t *buf);
void arc_get_raw_params(arc_buf_t *buf, boolean_t *byteorder, uint8_t *salt,
uint8_t *iv, uint8_t *mac);
int arc_untransform(arc_buf_t *buf, spa_t *spa, const zbookmark_phys_t *zb,
boolean_t in_place);
void arc_convert_to_raw(arc_buf_t *buf, uint64_t dsobj, boolean_t byteorder,
dmu_object_type_t ot, const uint8_t *salt, const uint8_t *iv,
const uint8_t *mac);
arc_buf_t *arc_alloc_buf(spa_t *spa, void *tag, arc_buf_contents_t type,
int32_t size);
arc_buf_t *arc_alloc_compressed_buf(spa_t *spa, void *tag,
uint64_t psize, uint64_t lsize, enum zio_compress compression_type,
uint8_t complevel);
arc_buf_t *arc_alloc_raw_buf(spa_t *spa, void *tag, uint64_t dsobj,
boolean_t byteorder, const uint8_t *salt, const uint8_t *iv,
const uint8_t *mac, dmu_object_type_t ot, uint64_t psize, uint64_t lsize,
enum zio_compress compression_type, uint8_t complevel);
uint8_t arc_get_complevel(arc_buf_t *buf);
arc_buf_t *arc_loan_buf(spa_t *spa, boolean_t is_metadata, int size);
arc_buf_t *arc_loan_compressed_buf(spa_t *spa, uint64_t psize, uint64_t lsize,
enum zio_compress compression_type, uint8_t complevel);
arc_buf_t *arc_loan_raw_buf(spa_t *spa, uint64_t dsobj, boolean_t byteorder,
const uint8_t *salt, const uint8_t *iv, const uint8_t *mac,
dmu_object_type_t ot, uint64_t psize, uint64_t lsize,
enum zio_compress compression_type, uint8_t complevel);
void arc_return_buf(arc_buf_t *buf, void *tag);
void arc_loan_inuse_buf(arc_buf_t *buf, void *tag);
void arc_buf_destroy(arc_buf_t *buf, void *tag);
void arc_buf_info(arc_buf_t *buf, arc_buf_info_t *abi, int state_index);
uint64_t arc_buf_size(arc_buf_t *buf);
uint64_t arc_buf_lsize(arc_buf_t *buf);
void arc_buf_access(arc_buf_t *buf);
void arc_release(arc_buf_t *buf, void *tag);
int arc_released(arc_buf_t *buf);
void arc_buf_sigsegv(int sig, siginfo_t *si, void *unused);
void arc_buf_freeze(arc_buf_t *buf);
void arc_buf_thaw(arc_buf_t *buf);
#ifdef ZFS_DEBUG
int arc_referenced(arc_buf_t *buf);
#endif
int arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
arc_read_done_func_t *done, void *priv, zio_priority_t priority,
int flags, arc_flags_t *arc_flags, const zbookmark_phys_t *zb);
zio_t *arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc, const zio_prop_t *zp,
arc_write_done_func_t *ready, arc_write_done_func_t *child_ready,
arc_write_done_func_t *physdone, arc_write_done_func_t *done,
void *priv, zio_priority_t priority, int zio_flags,
const zbookmark_phys_t *zb);
arc_prune_t *arc_add_prune_callback(arc_prune_func_t *func, void *priv);
void arc_remove_prune_callback(arc_prune_t *p);
void arc_freed(spa_t *spa, const blkptr_t *bp);
void arc_flush(spa_t *spa, boolean_t retry);
void arc_tempreserve_clear(uint64_t reserve);
int arc_tempreserve_space(spa_t *spa, uint64_t reserve, uint64_t txg);
uint64_t arc_all_memory(void);
uint64_t arc_default_max(uint64_t min, uint64_t allmem);
uint64_t arc_target_bytes(void);
void arc_set_limits(uint64_t);
void arc_init(void);
void arc_fini(void);
/*
* Level 2 ARC
*/
void l2arc_add_vdev(spa_t *spa, vdev_t *vd);
void l2arc_remove_vdev(vdev_t *vd);
boolean_t l2arc_vdev_present(vdev_t *vd);
void l2arc_rebuild_vdev(vdev_t *vd, boolean_t reopen);
boolean_t l2arc_range_check_overlap(uint64_t bottom, uint64_t top,
uint64_t check);
void l2arc_init(void);
void l2arc_fini(void);
void l2arc_start(void);
void l2arc_stop(void);
void l2arc_spa_rebuild_start(spa_t *spa);
#ifndef _KERNEL
extern boolean_t arc_watch;
#endif
#ifdef __cplusplus
}
#endif
#endif /* _SYS_ARC_H */
diff --git a/sys/contrib/openzfs/include/sys/arc_impl.h b/sys/contrib/openzfs/include/sys/arc_impl.h
index 747100a22068..f99d2911b5df 100644
--- a/sys/contrib/openzfs/include/sys/arc_impl.h
+++ b/sys/contrib/openzfs/include/sys/arc_impl.h
@@ -1,1021 +1,1020 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, Delphix. All rights reserved.
* Copyright (c) 2013, Saso Kiselkov. All rights reserved.
* Copyright (c) 2013, Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2020, George Amanakis. All rights reserved.
*/
#ifndef _SYS_ARC_IMPL_H
#define _SYS_ARC_IMPL_H
#include <sys/arc.h>
#include <sys/zio_crypt.h>
#include <sys/zthr.h>
#include <sys/aggsum.h>
#include <sys/wmsum.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Note that buffers can be in one of 6 states:
* ARC_anon - anonymous (discussed below)
* ARC_mru - recently used, currently cached
* ARC_mru_ghost - recently used, no longer in cache
* ARC_mfu - frequently used, currently cached
* ARC_mfu_ghost - frequently used, no longer in cache
* ARC_l2c_only - exists in L2ARC but not other states
* When there are no active references to the buffer, they are
* are linked onto a list in one of these arc states. These are
* the only buffers that can be evicted or deleted. Within each
* state there are multiple lists, one for meta-data and one for
* non-meta-data. Meta-data (indirect blocks, blocks of dnodes,
* etc.) is tracked separately so that it can be managed more
* explicitly: favored over data, limited explicitly.
*
* Anonymous buffers are buffers that are not associated with
* a DVA. These are buffers that hold dirty block copies
* before they are written to stable storage. By definition,
* they are "ref'd" and are considered part of arc_mru
* that cannot be freed. Generally, they will acquire a DVA
* as they are written and migrate onto the arc_mru list.
*
* The ARC_l2c_only state is for buffers that are in the second
* level ARC but no longer in any of the ARC_m* lists. The second
* level ARC itself may also contain buffers that are in any of
* the ARC_m* states - meaning that a buffer can exist in two
* places. The reason for the ARC_l2c_only state is to keep the
* buffer header in the hash table, so that reads that hit the
* second level ARC benefit from these fast lookups.
*/
typedef struct arc_state {
/*
* list of evictable buffers
*/
multilist_t arcs_list[ARC_BUFC_NUMTYPES];
/*
* supports the "dbufs" kstat
*/
arc_state_type_t arcs_state;
/*
* total amount of evictable data in this state
*/
zfs_refcount_t arcs_esize[ARC_BUFC_NUMTYPES] ____cacheline_aligned;
/*
* total amount of data in this state; this includes: evictable,
* non-evictable, ARC_BUFC_DATA, and ARC_BUFC_METADATA.
*/
zfs_refcount_t arcs_size;
} arc_state_t;
typedef struct arc_callback arc_callback_t;
struct arc_callback {
void *acb_private;
arc_read_done_func_t *acb_done;
arc_buf_t *acb_buf;
boolean_t acb_encrypted;
boolean_t acb_compressed;
boolean_t acb_noauth;
boolean_t acb_nobuf;
zbookmark_phys_t acb_zb;
zio_t *acb_zio_dummy;
zio_t *acb_zio_head;
arc_callback_t *acb_next;
};
typedef struct arc_write_callback arc_write_callback_t;
struct arc_write_callback {
void *awcb_private;
arc_write_done_func_t *awcb_ready;
arc_write_done_func_t *awcb_children_ready;
arc_write_done_func_t *awcb_physdone;
arc_write_done_func_t *awcb_done;
arc_buf_t *awcb_buf;
};
/*
* ARC buffers are separated into multiple structs as a memory saving measure:
* - Common fields struct, always defined, and embedded within it:
* - L2-only fields, always allocated but undefined when not in L2ARC
* - L1-only fields, only allocated when in L1ARC
*
* Buffer in L1 Buffer only in L2
* +------------------------+ +------------------------+
* | arc_buf_hdr_t | | arc_buf_hdr_t |
* | | | |
* | | | |
* | | | |
* +------------------------+ +------------------------+
* | l2arc_buf_hdr_t | | l2arc_buf_hdr_t |
* | (undefined if L1-only) | | |
* +------------------------+ +------------------------+
* | l1arc_buf_hdr_t |
* | |
* | |
* | |
* | |
* +------------------------+
*
* Because it's possible for the L2ARC to become extremely large, we can wind
* up eating a lot of memory in L2ARC buffer headers, so the size of a header
* is minimized by only allocating the fields necessary for an L1-cached buffer
* when a header is actually in the L1 cache. The sub-headers (l1arc_buf_hdr and
* l2arc_buf_hdr) are embedded rather than allocated separately to save a couple
* words in pointers. arc_hdr_realloc() is used to switch a header between
* these two allocation states.
*/
typedef struct l1arc_buf_hdr {
kmutex_t b_freeze_lock;
zio_cksum_t *b_freeze_cksum;
arc_buf_t *b_buf;
uint32_t b_bufcnt;
/* for waiting on writes to complete */
kcondvar_t b_cv;
uint8_t b_byteswap;
/* protected by arc state mutex */
arc_state_t *b_state;
multilist_node_t b_arc_node;
/* updated atomically */
clock_t b_arc_access;
uint32_t b_mru_hits;
uint32_t b_mru_ghost_hits;
uint32_t b_mfu_hits;
uint32_t b_mfu_ghost_hits;
uint32_t b_l2_hits;
/* self protecting */
zfs_refcount_t b_refcnt;
arc_callback_t *b_acb;
abd_t *b_pabd;
} l1arc_buf_hdr_t;
typedef enum l2arc_dev_hdr_flags_t {
L2ARC_DEV_HDR_EVICT_FIRST = (1 << 0) /* mirror of l2ad_first */
} l2arc_dev_hdr_flags_t;
/*
* Pointer used in persistent L2ARC (for pointing to log blocks).
*/
typedef struct l2arc_log_blkptr {
/*
* Offset of log block within the device, in bytes
*/
uint64_t lbp_daddr;
/*
* Aligned payload size (in bytes) of the log block
*/
uint64_t lbp_payload_asize;
/*
* Offset in bytes of the first buffer in the payload
*/
uint64_t lbp_payload_start;
/*
* lbp_prop has the following format:
* * logical size (in bytes)
* * aligned (after compression) size (in bytes)
* * compression algorithm (we always LZ4-compress l2arc logs)
* * checksum algorithm (used for lbp_cksum)
*/
uint64_t lbp_prop;
zio_cksum_t lbp_cksum; /* checksum of log */
} l2arc_log_blkptr_t;
/*
* The persistent L2ARC device header.
* Byte order of magic determines whether 64-bit bswap of fields is necessary.
*/
typedef struct l2arc_dev_hdr_phys {
uint64_t dh_magic; /* L2ARC_DEV_HDR_MAGIC */
uint64_t dh_version; /* Persistent L2ARC version */
/*
* Global L2ARC device state and metadata.
*/
uint64_t dh_spa_guid;
uint64_t dh_vdev_guid;
uint64_t dh_log_entries; /* mirror of l2ad_log_entries */
uint64_t dh_evict; /* evicted offset in bytes */
uint64_t dh_flags; /* l2arc_dev_hdr_flags_t */
/*
* Used in zdb.c for determining if a log block is valid, in the same
* way that l2arc_rebuild() does.
*/
uint64_t dh_start; /* mirror of l2ad_start */
uint64_t dh_end; /* mirror of l2ad_end */
/*
* Start of log block chain. [0] -> newest log, [1] -> one older (used
* for initiating prefetch).
*/
l2arc_log_blkptr_t dh_start_lbps[2];
/*
* Aligned size of all log blocks as accounted by vdev_space_update().
*/
uint64_t dh_lb_asize; /* mirror of l2ad_lb_asize */
uint64_t dh_lb_count; /* mirror of l2ad_lb_count */
/*
* Mirrors of vdev_trim_action_time and vdev_trim_state, used to
* display when the cache device was fully trimmed for the last
* time.
*/
uint64_t dh_trim_action_time;
uint64_t dh_trim_state;
const uint64_t dh_pad[30]; /* pad to 512 bytes */
zio_eck_t dh_tail;
} l2arc_dev_hdr_phys_t;
CTASSERT_GLOBAL(sizeof (l2arc_dev_hdr_phys_t) == SPA_MINBLOCKSIZE);
/*
* A single ARC buffer header entry in a l2arc_log_blk_phys_t.
*/
typedef struct l2arc_log_ent_phys {
dva_t le_dva; /* dva of buffer */
uint64_t le_birth; /* birth txg of buffer */
/*
* le_prop has the following format:
* * logical size (in bytes)
* * physical (compressed) size (in bytes)
* * compression algorithm
* * object type (used to restore arc_buf_contents_t)
* * protected status (used for encryption)
* * prefetch status (used in l2arc_read_done())
*/
uint64_t le_prop;
uint64_t le_daddr; /* buf location on l2dev */
uint64_t le_complevel;
/*
* We pad the size of each entry to a power of 2 so that the size of
* l2arc_log_blk_phys_t is power-of-2 aligned with SPA_MINBLOCKSHIFT,
* because of the L2ARC_SET_*SIZE macros.
*/
const uint64_t le_pad[2]; /* pad to 64 bytes */
} l2arc_log_ent_phys_t;
#define L2ARC_LOG_BLK_MAX_ENTRIES (1022)
/*
* A log block of up to 1022 ARC buffer log entries, chained into the
* persistent L2ARC metadata linked list. Byte order of magic determines
* whether 64-bit bswap of fields is necessary.
*/
typedef struct l2arc_log_blk_phys {
uint64_t lb_magic; /* L2ARC_LOG_BLK_MAGIC */
/*
* There are 2 chains (headed by dh_start_lbps[2]), and this field
* points back to the previous block in this chain. We alternate
* which chain we append to, so they are time-wise and offset-wise
* interleaved, but that is an optimization rather than for
* correctness.
*/
l2arc_log_blkptr_t lb_prev_lbp; /* pointer to prev log block */
/*
* Pad header section to 128 bytes
*/
uint64_t lb_pad[7];
/* Payload */
l2arc_log_ent_phys_t lb_entries[L2ARC_LOG_BLK_MAX_ENTRIES];
} l2arc_log_blk_phys_t; /* 64K total */
/*
* The size of l2arc_log_blk_phys_t has to be power-of-2 aligned with
* SPA_MINBLOCKSHIFT because of L2BLK_SET_*SIZE macros.
*/
CTASSERT_GLOBAL(IS_P2ALIGNED(sizeof (l2arc_log_blk_phys_t),
1ULL << SPA_MINBLOCKSHIFT));
CTASSERT_GLOBAL(sizeof (l2arc_log_blk_phys_t) >= SPA_MINBLOCKSIZE);
CTASSERT_GLOBAL(sizeof (l2arc_log_blk_phys_t) <= SPA_MAXBLOCKSIZE);
/*
* These structures hold in-flight abd buffers for log blocks as they're being
* written to the L2ARC device.
*/
typedef struct l2arc_lb_abd_buf {
abd_t *abd;
list_node_t node;
} l2arc_lb_abd_buf_t;
/*
* These structures hold pointers to log blocks present on the L2ARC device.
*/
typedef struct l2arc_lb_ptr_buf {
l2arc_log_blkptr_t *lb_ptr;
list_node_t node;
} l2arc_lb_ptr_buf_t;
/* Macros for setting fields in le_prop and lbp_prop */
#define L2BLK_GET_LSIZE(field) \
BF64_GET_SB((field), 0, SPA_LSIZEBITS, SPA_MINBLOCKSHIFT, 1)
#define L2BLK_SET_LSIZE(field, x) \
BF64_SET_SB((field), 0, SPA_LSIZEBITS, SPA_MINBLOCKSHIFT, 1, x)
#define L2BLK_GET_PSIZE(field) \
BF64_GET_SB((field), 16, SPA_PSIZEBITS, SPA_MINBLOCKSHIFT, 1)
#define L2BLK_SET_PSIZE(field, x) \
BF64_SET_SB((field), 16, SPA_PSIZEBITS, SPA_MINBLOCKSHIFT, 1, x)
#define L2BLK_GET_COMPRESS(field) \
BF64_GET((field), 32, SPA_COMPRESSBITS)
#define L2BLK_SET_COMPRESS(field, x) \
BF64_SET((field), 32, SPA_COMPRESSBITS, x)
#define L2BLK_GET_PREFETCH(field) BF64_GET((field), 39, 1)
#define L2BLK_SET_PREFETCH(field, x) BF64_SET((field), 39, 1, x)
#define L2BLK_GET_CHECKSUM(field) BF64_GET((field), 40, 8)
#define L2BLK_SET_CHECKSUM(field, x) BF64_SET((field), 40, 8, x)
#define L2BLK_GET_TYPE(field) BF64_GET((field), 48, 8)
#define L2BLK_SET_TYPE(field, x) BF64_SET((field), 48, 8, x)
#define L2BLK_GET_PROTECTED(field) BF64_GET((field), 56, 1)
#define L2BLK_SET_PROTECTED(field, x) BF64_SET((field), 56, 1, x)
#define L2BLK_GET_STATE(field) BF64_GET((field), 57, 4)
#define L2BLK_SET_STATE(field, x) BF64_SET((field), 57, 4, x)
#define PTR_SWAP(x, y) \
do { \
void *tmp = (x);\
x = y; \
y = tmp; \
- _NOTE(CONSTCOND)\
} while (0)
#define L2ARC_DEV_HDR_MAGIC 0x5a46534341434845LLU /* ASCII: "ZFSCACHE" */
#define L2ARC_LOG_BLK_MAGIC 0x4c4f47424c4b4844LLU /* ASCII: "LOGBLKHD" */
/*
* L2ARC Internals
*/
typedef struct l2arc_dev {
vdev_t *l2ad_vdev; /* vdev */
spa_t *l2ad_spa; /* spa */
uint64_t l2ad_hand; /* next write location */
uint64_t l2ad_start; /* first addr on device */
uint64_t l2ad_end; /* last addr on device */
boolean_t l2ad_first; /* first sweep through */
boolean_t l2ad_writing; /* currently writing */
kmutex_t l2ad_mtx; /* lock for buffer list */
list_t l2ad_buflist; /* buffer list */
list_node_t l2ad_node; /* device list node */
zfs_refcount_t l2ad_alloc; /* allocated bytes */
/*
* Persistence-related stuff
*/
l2arc_dev_hdr_phys_t *l2ad_dev_hdr; /* persistent device header */
uint64_t l2ad_dev_hdr_asize; /* aligned hdr size */
l2arc_log_blk_phys_t l2ad_log_blk; /* currently open log block */
int l2ad_log_ent_idx; /* index into cur log blk */
/* Number of bytes in current log block's payload */
uint64_t l2ad_log_blk_payload_asize;
/*
* Offset (in bytes) of the first buffer in current log block's
* payload.
*/
uint64_t l2ad_log_blk_payload_start;
/* Flag indicating whether a rebuild is scheduled or is going on */
boolean_t l2ad_rebuild;
boolean_t l2ad_rebuild_cancel;
boolean_t l2ad_rebuild_began;
uint64_t l2ad_log_entries; /* entries per log blk */
uint64_t l2ad_evict; /* evicted offset in bytes */
/* List of pointers to log blocks present in the L2ARC device */
list_t l2ad_lbptr_list;
/*
* Aligned size of all log blocks as accounted by vdev_space_update().
*/
zfs_refcount_t l2ad_lb_asize;
/*
* Number of log blocks present on the device.
*/
zfs_refcount_t l2ad_lb_count;
boolean_t l2ad_trim_all; /* TRIM whole device */
} l2arc_dev_t;
/*
* Encrypted blocks will need to be stored encrypted on the L2ARC
* disk as they appear in the main pool. In order for this to work we
* need to pass around the encryption parameters so they can be used
* to write data to the L2ARC. This struct is only defined in the
* arc_buf_hdr_t if the L1 header is defined and has the ARC_FLAG_ENCRYPTED
* flag set.
*/
typedef struct arc_buf_hdr_crypt {
abd_t *b_rabd; /* raw encrypted data */
dmu_object_type_t b_ot; /* object type */
uint32_t b_ebufcnt; /* count of encrypted buffers */
/* dsobj for looking up encryption key for l2arc encryption */
uint64_t b_dsobj;
/* encryption parameters */
uint8_t b_salt[ZIO_DATA_SALT_LEN];
uint8_t b_iv[ZIO_DATA_IV_LEN];
/*
* Technically this could be removed since we will always be able to
* get the mac from the bp when we need it. However, it is inconvenient
* for callers of arc code to have to pass a bp in all the time. This
* also allows us to assert that L2ARC data is properly encrypted to
* match the data in the main storage pool.
*/
uint8_t b_mac[ZIO_DATA_MAC_LEN];
} arc_buf_hdr_crypt_t;
typedef struct l2arc_buf_hdr {
/* protected by arc_buf_hdr mutex */
l2arc_dev_t *b_dev; /* L2ARC device */
uint64_t b_daddr; /* disk address, offset byte */
uint32_t b_hits;
arc_state_type_t b_arcs_state;
list_node_t b_l2node;
} l2arc_buf_hdr_t;
typedef struct l2arc_write_callback {
l2arc_dev_t *l2wcb_dev; /* device info */
arc_buf_hdr_t *l2wcb_head; /* head of write buflist */
/* in-flight list of log blocks */
list_t l2wcb_abd_list;
} l2arc_write_callback_t;
struct arc_buf_hdr {
/* protected by hash lock */
dva_t b_dva;
uint64_t b_birth;
arc_buf_contents_t b_type;
uint8_t b_complevel;
uint8_t b_reserved1; /* used for 4 byte alignment */
uint16_t b_reserved2; /* used for 4 byte alignment */
arc_buf_hdr_t *b_hash_next;
arc_flags_t b_flags;
/*
* This field stores the size of the data buffer after
* compression, and is set in the arc's zio completion handlers.
* It is in units of SPA_MINBLOCKSIZE (e.g. 1 == 512 bytes).
*
* While the block pointers can store up to 32MB in their psize
* field, we can only store up to 32MB minus 512B. This is due
* to the bp using a bias of 1, whereas we use a bias of 0 (i.e.
* a field of zeros represents 512B in the bp). We can't use a
* bias of 1 since we need to reserve a psize of zero, here, to
* represent holes and embedded blocks.
*
* This isn't a problem in practice, since the maximum size of a
* buffer is limited to 16MB, so we never need to store 32MB in
* this field. Even in the upstream illumos code base, the
* maximum size of a buffer is limited to 16MB.
*/
uint16_t b_psize;
/*
* This field stores the size of the data buffer before
* compression, and cannot change once set. It is in units
* of SPA_MINBLOCKSIZE (e.g. 2 == 1024 bytes)
*/
uint16_t b_lsize; /* immutable */
uint64_t b_spa; /* immutable */
/* L2ARC fields. Undefined when not in L2ARC. */
l2arc_buf_hdr_t b_l2hdr;
/* L1ARC fields. Undefined when in l2arc_only state */
l1arc_buf_hdr_t b_l1hdr;
/*
* Encryption parameters. Defined only when ARC_FLAG_ENCRYPTED
* is set and the L1 header exists.
*/
arc_buf_hdr_crypt_t b_crypt_hdr;
};
typedef struct arc_stats {
kstat_named_t arcstat_hits;
kstat_named_t arcstat_misses;
kstat_named_t arcstat_demand_data_hits;
kstat_named_t arcstat_demand_data_misses;
kstat_named_t arcstat_demand_metadata_hits;
kstat_named_t arcstat_demand_metadata_misses;
kstat_named_t arcstat_prefetch_data_hits;
kstat_named_t arcstat_prefetch_data_misses;
kstat_named_t arcstat_prefetch_metadata_hits;
kstat_named_t arcstat_prefetch_metadata_misses;
kstat_named_t arcstat_mru_hits;
kstat_named_t arcstat_mru_ghost_hits;
kstat_named_t arcstat_mfu_hits;
kstat_named_t arcstat_mfu_ghost_hits;
kstat_named_t arcstat_deleted;
/*
* Number of buffers that could not be evicted because the hash lock
* was held by another thread. The lock may not necessarily be held
* by something using the same buffer, since hash locks are shared
* by multiple buffers.
*/
kstat_named_t arcstat_mutex_miss;
/*
* Number of buffers skipped when updating the access state due to the
* header having already been released after acquiring the hash lock.
*/
kstat_named_t arcstat_access_skip;
/*
* Number of buffers skipped because they have I/O in progress, are
* indirect prefetch buffers that have not lived long enough, or are
* not from the spa we're trying to evict from.
*/
kstat_named_t arcstat_evict_skip;
/*
* Number of times arc_evict_state() was unable to evict enough
* buffers to reach its target amount.
*/
kstat_named_t arcstat_evict_not_enough;
kstat_named_t arcstat_evict_l2_cached;
kstat_named_t arcstat_evict_l2_eligible;
kstat_named_t arcstat_evict_l2_eligible_mfu;
kstat_named_t arcstat_evict_l2_eligible_mru;
kstat_named_t arcstat_evict_l2_ineligible;
kstat_named_t arcstat_evict_l2_skip;
kstat_named_t arcstat_hash_elements;
kstat_named_t arcstat_hash_elements_max;
kstat_named_t arcstat_hash_collisions;
kstat_named_t arcstat_hash_chains;
kstat_named_t arcstat_hash_chain_max;
kstat_named_t arcstat_p;
kstat_named_t arcstat_c;
kstat_named_t arcstat_c_min;
kstat_named_t arcstat_c_max;
kstat_named_t arcstat_size;
/*
* Number of compressed bytes stored in the arc_buf_hdr_t's b_pabd.
* Note that the compressed bytes may match the uncompressed bytes
* if the block is either not compressed or compressed arc is disabled.
*/
kstat_named_t arcstat_compressed_size;
/*
* Uncompressed size of the data stored in b_pabd. If compressed
* arc is disabled then this value will be identical to the stat
* above.
*/
kstat_named_t arcstat_uncompressed_size;
/*
* Number of bytes stored in all the arc_buf_t's. This is classified
* as "overhead" since this data is typically short-lived and will
* be evicted from the arc when it becomes unreferenced unless the
* zfs_keep_uncompressed_metadata or zfs_keep_uncompressed_level
* values have been set (see comment in dbuf.c for more information).
*/
kstat_named_t arcstat_overhead_size;
/*
* Number of bytes consumed by internal ARC structures necessary
* for tracking purposes; these structures are not actually
* backed by ARC buffers. This includes arc_buf_hdr_t structures
* (allocated via arc_buf_hdr_t_full and arc_buf_hdr_t_l2only
* caches), and arc_buf_t structures (allocated via arc_buf_t
* cache).
*/
kstat_named_t arcstat_hdr_size;
/*
* Number of bytes consumed by ARC buffers of type equal to
* ARC_BUFC_DATA. This is generally consumed by buffers backing
* on disk user data (e.g. plain file contents).
*/
kstat_named_t arcstat_data_size;
/*
* Number of bytes consumed by ARC buffers of type equal to
* ARC_BUFC_METADATA. This is generally consumed by buffers
* backing on disk data that is used for internal ZFS
* structures (e.g. ZAP, dnode, indirect blocks, etc).
*/
kstat_named_t arcstat_metadata_size;
/*
* Number of bytes consumed by dmu_buf_impl_t objects.
*/
kstat_named_t arcstat_dbuf_size;
/*
* Number of bytes consumed by dnode_t objects.
*/
kstat_named_t arcstat_dnode_size;
/*
* Number of bytes consumed by bonus buffers.
*/
kstat_named_t arcstat_bonus_size;
#if defined(COMPAT_FREEBSD11)
/*
* Sum of the previous three counters, provided for compatibility.
*/
kstat_named_t arcstat_other_size;
#endif
/*
* Total number of bytes consumed by ARC buffers residing in the
* arc_anon state. This includes *all* buffers in the arc_anon
* state; e.g. data, metadata, evictable, and unevictable buffers
* are all included in this value.
*/
kstat_named_t arcstat_anon_size;
/*
* Number of bytes consumed by ARC buffers that meet the
* following criteria: backing buffers of type ARC_BUFC_DATA,
* residing in the arc_anon state, and are eligible for eviction
* (e.g. have no outstanding holds on the buffer).
*/
kstat_named_t arcstat_anon_evictable_data;
/*
* Number of bytes consumed by ARC buffers that meet the
* following criteria: backing buffers of type ARC_BUFC_METADATA,
* residing in the arc_anon state, and are eligible for eviction
* (e.g. have no outstanding holds on the buffer).
*/
kstat_named_t arcstat_anon_evictable_metadata;
/*
* Total number of bytes consumed by ARC buffers residing in the
* arc_mru state. This includes *all* buffers in the arc_mru
* state; e.g. data, metadata, evictable, and unevictable buffers
* are all included in this value.
*/
kstat_named_t arcstat_mru_size;
/*
* Number of bytes consumed by ARC buffers that meet the
* following criteria: backing buffers of type ARC_BUFC_DATA,
* residing in the arc_mru state, and are eligible for eviction
* (e.g. have no outstanding holds on the buffer).
*/
kstat_named_t arcstat_mru_evictable_data;
/*
* Number of bytes consumed by ARC buffers that meet the
* following criteria: backing buffers of type ARC_BUFC_METADATA,
* residing in the arc_mru state, and are eligible for eviction
* (e.g. have no outstanding holds on the buffer).
*/
kstat_named_t arcstat_mru_evictable_metadata;
/*
* Total number of bytes that *would have been* consumed by ARC
* buffers in the arc_mru_ghost state. The key thing to note
* here, is the fact that this size doesn't actually indicate
* RAM consumption. The ghost lists only consist of headers and
* don't actually have ARC buffers linked off of these headers.
* Thus, *if* the headers had associated ARC buffers, these
* buffers *would have* consumed this number of bytes.
*/
kstat_named_t arcstat_mru_ghost_size;
/*
* Number of bytes that *would have been* consumed by ARC
* buffers that are eligible for eviction, of type
* ARC_BUFC_DATA, and linked off the arc_mru_ghost state.
*/
kstat_named_t arcstat_mru_ghost_evictable_data;
/*
* Number of bytes that *would have been* consumed by ARC
* buffers that are eligible for eviction, of type
* ARC_BUFC_METADATA, and linked off the arc_mru_ghost state.
*/
kstat_named_t arcstat_mru_ghost_evictable_metadata;
/*
* Total number of bytes consumed by ARC buffers residing in the
* arc_mfu state. This includes *all* buffers in the arc_mfu
* state; e.g. data, metadata, evictable, and unevictable buffers
* are all included in this value.
*/
kstat_named_t arcstat_mfu_size;
/*
* Number of bytes consumed by ARC buffers that are eligible for
* eviction, of type ARC_BUFC_DATA, and reside in the arc_mfu
* state.
*/
kstat_named_t arcstat_mfu_evictable_data;
/*
* Number of bytes consumed by ARC buffers that are eligible for
* eviction, of type ARC_BUFC_METADATA, and reside in the
* arc_mfu state.
*/
kstat_named_t arcstat_mfu_evictable_metadata;
/*
* Total number of bytes that *would have been* consumed by ARC
* buffers in the arc_mfu_ghost state. See the comment above
* arcstat_mru_ghost_size for more details.
*/
kstat_named_t arcstat_mfu_ghost_size;
/*
* Number of bytes that *would have been* consumed by ARC
* buffers that are eligible for eviction, of type
* ARC_BUFC_DATA, and linked off the arc_mfu_ghost state.
*/
kstat_named_t arcstat_mfu_ghost_evictable_data;
/*
* Number of bytes that *would have been* consumed by ARC
* buffers that are eligible for eviction, of type
* ARC_BUFC_METADATA, and linked off the arc_mru_ghost state.
*/
kstat_named_t arcstat_mfu_ghost_evictable_metadata;
kstat_named_t arcstat_l2_hits;
kstat_named_t arcstat_l2_misses;
/*
* Allocated size (in bytes) of L2ARC cached buffers by ARC state.
*/
kstat_named_t arcstat_l2_prefetch_asize;
kstat_named_t arcstat_l2_mru_asize;
kstat_named_t arcstat_l2_mfu_asize;
/*
* Allocated size (in bytes) of L2ARC cached buffers by buffer content
* type.
*/
kstat_named_t arcstat_l2_bufc_data_asize;
kstat_named_t arcstat_l2_bufc_metadata_asize;
kstat_named_t arcstat_l2_feeds;
kstat_named_t arcstat_l2_rw_clash;
kstat_named_t arcstat_l2_read_bytes;
kstat_named_t arcstat_l2_write_bytes;
kstat_named_t arcstat_l2_writes_sent;
kstat_named_t arcstat_l2_writes_done;
kstat_named_t arcstat_l2_writes_error;
kstat_named_t arcstat_l2_writes_lock_retry;
kstat_named_t arcstat_l2_evict_lock_retry;
kstat_named_t arcstat_l2_evict_reading;
kstat_named_t arcstat_l2_evict_l1cached;
kstat_named_t arcstat_l2_free_on_write;
kstat_named_t arcstat_l2_abort_lowmem;
kstat_named_t arcstat_l2_cksum_bad;
kstat_named_t arcstat_l2_io_error;
kstat_named_t arcstat_l2_lsize;
kstat_named_t arcstat_l2_psize;
kstat_named_t arcstat_l2_hdr_size;
/*
* Number of L2ARC log blocks written. These are used for restoring the
* L2ARC. Updated during writing of L2ARC log blocks.
*/
kstat_named_t arcstat_l2_log_blk_writes;
/*
* Moving average of the aligned size of the L2ARC log blocks, in
* bytes. Updated during L2ARC rebuild and during writing of L2ARC
* log blocks.
*/
kstat_named_t arcstat_l2_log_blk_avg_asize;
/* Aligned size of L2ARC log blocks on L2ARC devices. */
kstat_named_t arcstat_l2_log_blk_asize;
/* Number of L2ARC log blocks present on L2ARC devices. */
kstat_named_t arcstat_l2_log_blk_count;
/*
* Moving average of the aligned size of L2ARC restored data, in bytes,
* to the aligned size of their metadata in L2ARC, in bytes.
* Updated during L2ARC rebuild and during writing of L2ARC log blocks.
*/
kstat_named_t arcstat_l2_data_to_meta_ratio;
/*
* Number of times the L2ARC rebuild was successful for an L2ARC device.
*/
kstat_named_t arcstat_l2_rebuild_success;
/*
* Number of times the L2ARC rebuild failed because the device header
* was in an unsupported format or corrupted.
*/
kstat_named_t arcstat_l2_rebuild_abort_unsupported;
/*
* Number of times the L2ARC rebuild failed because of IO errors
* while reading a log block.
*/
kstat_named_t arcstat_l2_rebuild_abort_io_errors;
/*
* Number of times the L2ARC rebuild failed because of IO errors when
* reading the device header.
*/
kstat_named_t arcstat_l2_rebuild_abort_dh_errors;
/*
* Number of L2ARC log blocks which failed to be restored due to
* checksum errors.
*/
kstat_named_t arcstat_l2_rebuild_abort_cksum_lb_errors;
/*
* Number of times the L2ARC rebuild was aborted due to low system
* memory.
*/
kstat_named_t arcstat_l2_rebuild_abort_lowmem;
/* Logical size of L2ARC restored data, in bytes. */
kstat_named_t arcstat_l2_rebuild_size;
/* Aligned size of L2ARC restored data, in bytes. */
kstat_named_t arcstat_l2_rebuild_asize;
/*
* Number of L2ARC log entries (buffers) that were successfully
* restored in ARC.
*/
kstat_named_t arcstat_l2_rebuild_bufs;
/*
* Number of L2ARC log entries (buffers) already cached in ARC. These
* were not restored again.
*/
kstat_named_t arcstat_l2_rebuild_bufs_precached;
/*
* Number of L2ARC log blocks that were restored successfully. Each
* log block may hold up to L2ARC_LOG_BLK_MAX_ENTRIES buffers.
*/
kstat_named_t arcstat_l2_rebuild_log_blks;
kstat_named_t arcstat_memory_throttle_count;
kstat_named_t arcstat_memory_direct_count;
kstat_named_t arcstat_memory_indirect_count;
kstat_named_t arcstat_memory_all_bytes;
kstat_named_t arcstat_memory_free_bytes;
kstat_named_t arcstat_memory_available_bytes;
kstat_named_t arcstat_no_grow;
kstat_named_t arcstat_tempreserve;
kstat_named_t arcstat_loaned_bytes;
kstat_named_t arcstat_prune;
kstat_named_t arcstat_meta_used;
kstat_named_t arcstat_meta_limit;
kstat_named_t arcstat_dnode_limit;
kstat_named_t arcstat_meta_max;
kstat_named_t arcstat_meta_min;
kstat_named_t arcstat_async_upgrade_sync;
kstat_named_t arcstat_demand_hit_predictive_prefetch;
kstat_named_t arcstat_demand_hit_prescient_prefetch;
kstat_named_t arcstat_need_free;
kstat_named_t arcstat_sys_free;
kstat_named_t arcstat_raw_size;
kstat_named_t arcstat_cached_only_in_progress;
kstat_named_t arcstat_abd_chunk_waste_size;
} arc_stats_t;
typedef struct arc_sums {
wmsum_t arcstat_hits;
wmsum_t arcstat_misses;
wmsum_t arcstat_demand_data_hits;
wmsum_t arcstat_demand_data_misses;
wmsum_t arcstat_demand_metadata_hits;
wmsum_t arcstat_demand_metadata_misses;
wmsum_t arcstat_prefetch_data_hits;
wmsum_t arcstat_prefetch_data_misses;
wmsum_t arcstat_prefetch_metadata_hits;
wmsum_t arcstat_prefetch_metadata_misses;
wmsum_t arcstat_mru_hits;
wmsum_t arcstat_mru_ghost_hits;
wmsum_t arcstat_mfu_hits;
wmsum_t arcstat_mfu_ghost_hits;
wmsum_t arcstat_deleted;
wmsum_t arcstat_mutex_miss;
wmsum_t arcstat_access_skip;
wmsum_t arcstat_evict_skip;
wmsum_t arcstat_evict_not_enough;
wmsum_t arcstat_evict_l2_cached;
wmsum_t arcstat_evict_l2_eligible;
wmsum_t arcstat_evict_l2_eligible_mfu;
wmsum_t arcstat_evict_l2_eligible_mru;
wmsum_t arcstat_evict_l2_ineligible;
wmsum_t arcstat_evict_l2_skip;
wmsum_t arcstat_hash_collisions;
wmsum_t arcstat_hash_chains;
aggsum_t arcstat_size;
wmsum_t arcstat_compressed_size;
wmsum_t arcstat_uncompressed_size;
wmsum_t arcstat_overhead_size;
wmsum_t arcstat_hdr_size;
wmsum_t arcstat_data_size;
wmsum_t arcstat_metadata_size;
wmsum_t arcstat_dbuf_size;
aggsum_t arcstat_dnode_size;
wmsum_t arcstat_bonus_size;
wmsum_t arcstat_l2_hits;
wmsum_t arcstat_l2_misses;
wmsum_t arcstat_l2_prefetch_asize;
wmsum_t arcstat_l2_mru_asize;
wmsum_t arcstat_l2_mfu_asize;
wmsum_t arcstat_l2_bufc_data_asize;
wmsum_t arcstat_l2_bufc_metadata_asize;
wmsum_t arcstat_l2_feeds;
wmsum_t arcstat_l2_rw_clash;
wmsum_t arcstat_l2_read_bytes;
wmsum_t arcstat_l2_write_bytes;
wmsum_t arcstat_l2_writes_sent;
wmsum_t arcstat_l2_writes_done;
wmsum_t arcstat_l2_writes_error;
wmsum_t arcstat_l2_writes_lock_retry;
wmsum_t arcstat_l2_evict_lock_retry;
wmsum_t arcstat_l2_evict_reading;
wmsum_t arcstat_l2_evict_l1cached;
wmsum_t arcstat_l2_free_on_write;
wmsum_t arcstat_l2_abort_lowmem;
wmsum_t arcstat_l2_cksum_bad;
wmsum_t arcstat_l2_io_error;
wmsum_t arcstat_l2_lsize;
wmsum_t arcstat_l2_psize;
aggsum_t arcstat_l2_hdr_size;
wmsum_t arcstat_l2_log_blk_writes;
wmsum_t arcstat_l2_log_blk_asize;
wmsum_t arcstat_l2_log_blk_count;
wmsum_t arcstat_l2_rebuild_success;
wmsum_t arcstat_l2_rebuild_abort_unsupported;
wmsum_t arcstat_l2_rebuild_abort_io_errors;
wmsum_t arcstat_l2_rebuild_abort_dh_errors;
wmsum_t arcstat_l2_rebuild_abort_cksum_lb_errors;
wmsum_t arcstat_l2_rebuild_abort_lowmem;
wmsum_t arcstat_l2_rebuild_size;
wmsum_t arcstat_l2_rebuild_asize;
wmsum_t arcstat_l2_rebuild_bufs;
wmsum_t arcstat_l2_rebuild_bufs_precached;
wmsum_t arcstat_l2_rebuild_log_blks;
wmsum_t arcstat_memory_throttle_count;
wmsum_t arcstat_memory_direct_count;
wmsum_t arcstat_memory_indirect_count;
wmsum_t arcstat_prune;
aggsum_t arcstat_meta_used;
wmsum_t arcstat_async_upgrade_sync;
wmsum_t arcstat_demand_hit_predictive_prefetch;
wmsum_t arcstat_demand_hit_prescient_prefetch;
wmsum_t arcstat_raw_size;
wmsum_t arcstat_cached_only_in_progress;
wmsum_t arcstat_abd_chunk_waste_size;
} arc_sums_t;
typedef struct arc_evict_waiter {
list_node_t aew_node;
kcondvar_t aew_cv;
uint64_t aew_count;
} arc_evict_waiter_t;
#define ARCSTAT(stat) (arc_stats.stat.value.ui64)
#define ARCSTAT_INCR(stat, val) \
wmsum_add(&arc_sums.stat, (val))
#define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1)
#define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1)
#define arc_no_grow ARCSTAT(arcstat_no_grow) /* do not grow cache size */
#define arc_p ARCSTAT(arcstat_p) /* target size of MRU */
#define arc_c ARCSTAT(arcstat_c) /* target size of cache */
#define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */
#define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */
#define arc_sys_free ARCSTAT(arcstat_sys_free) /* target system free bytes */
#define arc_anon (&ARC_anon)
#define arc_mru (&ARC_mru)
#define arc_mru_ghost (&ARC_mru_ghost)
#define arc_mfu (&ARC_mfu)
#define arc_mfu_ghost (&ARC_mfu_ghost)
#define arc_l2c_only (&ARC_l2c_only)
extern taskq_t *arc_prune_taskq;
extern arc_stats_t arc_stats;
extern arc_sums_t arc_sums;
extern hrtime_t arc_growtime;
extern boolean_t arc_warm;
extern int arc_grow_retry;
extern int arc_no_grow_shift;
extern int arc_shrink_shift;
extern kmutex_t arc_prune_mtx;
extern list_t arc_prune_list;
extern arc_state_t ARC_mfu;
extern arc_state_t ARC_mru;
extern uint_t zfs_arc_pc_percent;
extern int arc_lotsfree_percent;
extern unsigned long zfs_arc_min;
extern unsigned long zfs_arc_max;
extern void arc_reduce_target_size(int64_t to_free);
extern boolean_t arc_reclaim_needed(void);
extern void arc_kmem_reap_soon(void);
extern void arc_wait_for_eviction(uint64_t);
extern void arc_lowmem_init(void);
extern void arc_lowmem_fini(void);
extern void arc_prune_async(int64_t);
extern int arc_memory_throttle(spa_t *spa, uint64_t reserve, uint64_t txg);
extern uint64_t arc_free_memory(void);
extern int64_t arc_available_memory(void);
extern void arc_tuning_update(boolean_t);
extern void arc_register_hotplug(void);
extern void arc_unregister_hotplug(void);
extern int param_set_arc_long(ZFS_MODULE_PARAM_ARGS);
extern int param_set_arc_int(ZFS_MODULE_PARAM_ARGS);
/* used in zdb.c */
boolean_t l2arc_log_blkptr_valid(l2arc_dev_t *dev,
const l2arc_log_blkptr_t *lbp);
/* used in vdev_trim.c */
void l2arc_dev_hdr_update(l2arc_dev_t *dev);
l2arc_dev_t *l2arc_vdev_get(vdev_t *vd);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_ARC_IMPL_H */
diff --git a/sys/contrib/openzfs/include/sys/bitops.h b/sys/contrib/openzfs/include/sys/bitops.h
index 56d52073bcc8..69d07d76552a 100644
--- a/sys/contrib/openzfs/include/sys/bitops.h
+++ b/sys/contrib/openzfs/include/sys/bitops.h
@@ -1,90 +1,89 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2019 by Delphix. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright 2013 Saso Kiselkov. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2017 Joyent, Inc.
* Copyright (c) 2017 Datto Inc.
*/
#ifndef _SYS_BITOPS_H
#define _SYS_BITOPS_H
#include <sys/zfs_context.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* General-purpose 32-bit and 64-bit bitfield encodings.
*/
#define BF32_DECODE(x, low, len) P2PHASE((x) >> (low), 1U << (len))
#define BF64_DECODE(x, low, len) P2PHASE((x) >> (low), 1ULL << (len))
#define BF32_ENCODE(x, low, len) (P2PHASE((x), 1U << (len)) << (low))
#define BF64_ENCODE(x, low, len) (P2PHASE((x), 1ULL << (len)) << (low))
#define BF32_GET(x, low, len) BF32_DECODE(x, low, len)
#define BF64_GET(x, low, len) BF64_DECODE(x, low, len)
#define BF32_SET(x, low, len, val) do { \
ASSERT3U(val, <, 1U << (len)); \
ASSERT3U(low + len, <=, 32); \
(x) ^= BF32_ENCODE((x >> low) ^ (val), low, len); \
-_NOTE(CONSTCOND) } while (0)
+} while (0)
#define BF64_SET(x, low, len, val) do { \
ASSERT3U(val, <, 1ULL << (len)); \
ASSERT3U(low + len, <=, 64); \
((x) ^= BF64_ENCODE((x >> low) ^ (val), low, len)); \
-_NOTE(CONSTCOND) } while (0)
+} while (0)
#define BF32_GET_SB(x, low, len, shift, bias) \
((BF32_GET(x, low, len) + (bias)) << (shift))
#define BF64_GET_SB(x, low, len, shift, bias) \
((BF64_GET(x, low, len) + (bias)) << (shift))
/*
* We use ASSERT3U instead of ASSERT in these macros to prevent a lint error in
* the case where val is a constant. We can't fix ASSERT because it's used as
- * an expression in several places in the kernel; as a result, changing it to
- * the do{} while() syntax to allow us to _NOTE the CONSTCOND is not an option.
+ * an expression in several places in the kernel.
*/
#define BF32_SET_SB(x, low, len, shift, bias, val) do { \
ASSERT3U(IS_P2ALIGNED(val, 1U << shift), !=, B_FALSE); \
ASSERT3S((val) >> (shift), >=, bias); \
BF32_SET(x, low, len, ((val) >> (shift)) - (bias)); \
-_NOTE(CONSTCOND) } while (0)
+} while (0)
#define BF64_SET_SB(x, low, len, shift, bias, val) do { \
ASSERT3U(IS_P2ALIGNED(val, 1ULL << shift), !=, B_FALSE); \
ASSERT3S((val) >> (shift), >=, bias); \
BF64_SET(x, low, len, ((val) >> (shift)) - (bias)); \
-_NOTE(CONSTCOND) } while (0)
+} while (0)
#ifdef __cplusplus
}
#endif
#endif /* _SYS_BITOPS_H */
diff --git a/sys/contrib/openzfs/include/sys/dbuf.h b/sys/contrib/openzfs/include/sys/dbuf.h
index d2c175af649c..89422659d05a 100644
--- a/sys/contrib/openzfs/include/sys/dbuf.h
+++ b/sys/contrib/openzfs/include/sys/dbuf.h
@@ -1,503 +1,503 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2020 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
*/
#ifndef _SYS_DBUF_H
#define _SYS_DBUF_H
#include <sys/dmu.h>
#include <sys/spa.h>
#include <sys/txg.h>
#include <sys/zio.h>
#include <sys/arc.h>
#include <sys/zfs_context.h>
#include <sys/zfs_refcount.h>
#include <sys/zrlock.h>
#include <sys/multilist.h>
#ifdef __cplusplus
extern "C" {
#endif
#define IN_DMU_SYNC 2
/*
* define flags for dbuf_read
*/
#define DB_RF_MUST_SUCCEED (1 << 0)
#define DB_RF_CANFAIL (1 << 1)
#define DB_RF_HAVESTRUCT (1 << 2)
#define DB_RF_NOPREFETCH (1 << 3)
#define DB_RF_NEVERWAIT (1 << 4)
#define DB_RF_CACHED (1 << 5)
#define DB_RF_NO_DECRYPT (1 << 6)
/*
* The simplified state transition diagram for dbufs looks like:
*
* +----> READ ----+
* | |
* | V
* (alloc)-->UNCACHED CACHED-->EVICTING-->(free)
* | ^ ^
* | | |
* +----> FILL ----+ |
* | |
* | |
* +--------> NOFILL -------+
*
* DB_SEARCH is an invalid state for a dbuf. It is used by dbuf_free_range
* to find all dbufs in a range of a dnode and must be less than any other
* dbuf_states_t (see comment on dn_dbufs in dnode.h).
*/
typedef enum dbuf_states {
DB_SEARCH = -1,
DB_UNCACHED,
DB_FILL,
DB_NOFILL,
DB_READ,
DB_CACHED,
DB_EVICTING
} dbuf_states_t;
typedef enum dbuf_cached_state {
DB_NO_CACHE = -1,
DB_DBUF_CACHE,
DB_DBUF_METADATA_CACHE,
DB_CACHE_MAX
} dbuf_cached_state_t;
struct dnode;
struct dmu_tx;
/*
* level = 0 means the user data
* level = 1 means the single indirect block
* etc.
*/
struct dmu_buf_impl;
typedef enum override_states {
DR_NOT_OVERRIDDEN,
DR_IN_DMU_SYNC,
DR_OVERRIDDEN
} override_states_t;
typedef enum db_lock_type {
DLT_NONE,
DLT_PARENT,
DLT_OBJSET
} db_lock_type_t;
typedef struct dbuf_dirty_record {
/* link on our parents dirty list */
list_node_t dr_dirty_node;
/* transaction group this data will sync in */
uint64_t dr_txg;
/* zio of outstanding write IO */
zio_t *dr_zio;
/* pointer back to our dbuf */
struct dmu_buf_impl *dr_dbuf;
/* list link for dbuf dirty records */
list_node_t dr_dbuf_node;
/*
* The dnode we are part of. Note that the dnode can not be moved or
* evicted due to the hold that's added by dnode_setdirty() or
* dmu_objset_sync_dnodes(), and released by dnode_rele_task() or
* userquota_updates_task(). This hold is necessary for
* dirty_lightweight_leaf-type dirty records, which don't have a hold
* on a dbuf.
*/
dnode_t *dr_dnode;
/* pointer to parent dirty record */
struct dbuf_dirty_record *dr_parent;
/* How much space was changed to dsl_pool_dirty_space() for this? */
unsigned int dr_accounted;
/* A copy of the bp that points to us */
blkptr_t dr_bp_copy;
union dirty_types {
struct dirty_indirect {
/* protect access to list */
kmutex_t dr_mtx;
/* Our list of dirty children */
list_t dr_children;
} di;
struct dirty_leaf {
/*
* dr_data is set when we dirty the buffer
* so that we can retain the pointer even if it
* gets COW'd in a subsequent transaction group.
*/
arc_buf_t *dr_data;
blkptr_t dr_overridden_by;
override_states_t dr_override_state;
uint8_t dr_copies;
boolean_t dr_nopwrite;
boolean_t dr_has_raw_params;
/*
* If dr_has_raw_params is set, the following crypt
* params will be set on the BP that's written.
*/
boolean_t dr_byteorder;
uint8_t dr_salt[ZIO_DATA_SALT_LEN];
uint8_t dr_iv[ZIO_DATA_IV_LEN];
uint8_t dr_mac[ZIO_DATA_MAC_LEN];
} dl;
struct dirty_lightweight_leaf {
/*
* This dirty record refers to a leaf (level=0)
* block, whose dbuf has not been instantiated for
* performance reasons.
*/
uint64_t dr_blkid;
abd_t *dr_abd;
zio_prop_t dr_props;
enum zio_flag dr_flags;
} dll;
} dt;
} dbuf_dirty_record_t;
typedef struct dmu_buf_impl {
/*
* The following members are immutable, with the exception of
* db.db_data, which is protected by db_mtx.
*/
/* the publicly visible structure */
dmu_buf_t db;
/* the objset we belong to */
struct objset *db_objset;
/*
* handle to safely access the dnode we belong to (NULL when evicted)
*/
struct dnode_handle *db_dnode_handle;
/*
* our parent buffer; if the dnode points to us directly,
* db_parent == db_dnode_handle->dnh_dnode->dn_dbuf
* only accessed by sync thread ???
* (NULL when evicted)
* May change from NULL to non-NULL under the protection of db_mtx
* (see dbuf_check_blkptr())
*/
struct dmu_buf_impl *db_parent;
/*
* link for hash table of all dmu_buf_impl_t's
*/
struct dmu_buf_impl *db_hash_next;
/*
* Our link on the owner dnodes's dn_dbufs list.
* Protected by its dn_dbufs_mtx. Should be on the same cache line
* as db_level and db_blkid for the best avl_add() performance.
*/
avl_node_t db_link;
/* our block number */
uint64_t db_blkid;
/*
* Pointer to the blkptr_t which points to us. May be NULL if we
* don't have one yet. (NULL when evicted)
*/
blkptr_t *db_blkptr;
/*
* Our indirection level. Data buffers have db_level==0.
* Indirect buffers which point to data buffers have
* db_level==1. etc. Buffers which contain dnodes have
* db_level==0, since the dnodes are stored in a file.
*/
uint8_t db_level;
/*
* Protects db_buf's contents if they contain an indirect block or data
* block of the meta-dnode. We use this lock to protect the structure of
* the block tree. This means that when modifying this dbuf's data, we
* grab its rwlock. When modifying its parent's data (including the
* blkptr to this dbuf), we grab the parent's rwlock. The lock ordering
* for this lock is:
* 1) dn_struct_rwlock
* 2) db_rwlock
* We don't currently grab multiple dbufs' db_rwlocks at once.
*/
krwlock_t db_rwlock;
/* buffer holding our data */
arc_buf_t *db_buf;
/* db_mtx protects the members below */
kmutex_t db_mtx;
/*
* Current state of the buffer
*/
dbuf_states_t db_state;
/*
* Refcount accessed by dmu_buf_{hold,rele}.
* If nonzero, the buffer can't be destroyed.
* Protected by db_mtx.
*/
zfs_refcount_t db_holds;
kcondvar_t db_changed;
dbuf_dirty_record_t *db_data_pending;
/* List of dirty records for the buffer sorted newest to oldest. */
list_t db_dirty_records;
/* Link in dbuf_cache or dbuf_metadata_cache */
multilist_node_t db_cache_link;
/* Tells us which dbuf cache this dbuf is in, if any */
dbuf_cached_state_t db_caching_status;
/* Data which is unique to data (leaf) blocks: */
/* User callback information. */
dmu_buf_user_t *db_user;
/*
* Evict user data as soon as the dirty and reference
* counts are equal.
*/
uint8_t db_user_immediate_evict;
/*
* This block was freed while a read or write was
* active.
*/
uint8_t db_freed_in_flight;
/*
* dnode_evict_dbufs() or dnode_evict_bonus() tried to
* evict this dbuf, but couldn't due to outstanding
* references. Evict once the refcount drops to 0.
*/
uint8_t db_pending_evict;
uint8_t db_dirtycnt;
} dmu_buf_impl_t;
/* Note: the dbuf hash table is exposed only for the mdb module */
#define DBUF_MUTEXES 2048
#define DBUF_HASH_MUTEX(h, idx) (&(h)->hash_mutexes[(idx) & (DBUF_MUTEXES-1)])
typedef struct dbuf_hash_table {
uint64_t hash_table_mask;
dmu_buf_impl_t **hash_table;
kmutex_t hash_mutexes[DBUF_MUTEXES] ____cacheline_aligned;
} dbuf_hash_table_t;
typedef void (*dbuf_prefetch_fn)(void *, boolean_t);
uint64_t dbuf_whichblock(const struct dnode *di, const int64_t level,
const uint64_t offset);
void dbuf_create_bonus(struct dnode *dn);
int dbuf_spill_set_blksz(dmu_buf_t *db, uint64_t blksz, dmu_tx_t *tx);
void dbuf_rm_spill(struct dnode *dn, dmu_tx_t *tx);
dmu_buf_impl_t *dbuf_hold(struct dnode *dn, uint64_t blkid, void *tag);
dmu_buf_impl_t *dbuf_hold_level(struct dnode *dn, int level, uint64_t blkid,
void *tag);
int dbuf_hold_impl(struct dnode *dn, uint8_t level, uint64_t blkid,
boolean_t fail_sparse, boolean_t fail_uncached,
void *tag, dmu_buf_impl_t **dbp);
int dbuf_prefetch_impl(struct dnode *dn, int64_t level, uint64_t blkid,
zio_priority_t prio, arc_flags_t aflags, dbuf_prefetch_fn cb,
void *arg);
int dbuf_prefetch(struct dnode *dn, int64_t level, uint64_t blkid,
zio_priority_t prio, arc_flags_t aflags);
void dbuf_add_ref(dmu_buf_impl_t *db, void *tag);
boolean_t dbuf_try_add_ref(dmu_buf_t *db, objset_t *os, uint64_t obj,
uint64_t blkid, void *tag);
uint64_t dbuf_refcount(dmu_buf_impl_t *db);
void dbuf_rele(dmu_buf_impl_t *db, void *tag);
void dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag, boolean_t evicting);
dmu_buf_impl_t *dbuf_find(struct objset *os, uint64_t object, uint8_t level,
uint64_t blkid);
int dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags);
void dmu_buf_will_not_fill(dmu_buf_t *db, dmu_tx_t *tx);
void dmu_buf_will_fill(dmu_buf_t *db, dmu_tx_t *tx);
void dmu_buf_fill_done(dmu_buf_t *db, dmu_tx_t *tx);
void dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx);
dbuf_dirty_record_t *dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
dbuf_dirty_record_t *dbuf_dirty_lightweight(dnode_t *dn, uint64_t blkid,
dmu_tx_t *tx);
arc_buf_t *dbuf_loan_arcbuf(dmu_buf_impl_t *db);
void dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
bp_embedded_type_t etype, enum zio_compress comp,
int uncompressed_size, int compressed_size, int byteorder, dmu_tx_t *tx);
int dmu_lightweight_write_by_dnode(dnode_t *dn, uint64_t offset, abd_t *abd,
const struct zio_prop *zp, enum zio_flag flags, dmu_tx_t *tx);
void dmu_buf_redact(dmu_buf_t *dbuf, dmu_tx_t *tx);
void dbuf_destroy(dmu_buf_impl_t *db);
void dbuf_unoverride(dbuf_dirty_record_t *dr);
void dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx);
void dbuf_release_bp(dmu_buf_impl_t *db);
db_lock_type_t dmu_buf_lock_parent(dmu_buf_impl_t *db, krw_t rw, void *tag);
void dmu_buf_unlock_parent(dmu_buf_impl_t *db, db_lock_type_t type, void *tag);
void dbuf_free_range(struct dnode *dn, uint64_t start, uint64_t end,
struct dmu_tx *);
void dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx);
void dbuf_stats_init(dbuf_hash_table_t *hash);
void dbuf_stats_destroy(void);
int dbuf_dnode_findbp(dnode_t *dn, uint64_t level, uint64_t blkid,
blkptr_t *bp, uint16_t *datablkszsec, uint8_t *indblkshift);
#define DB_DNODE(_db) ((_db)->db_dnode_handle->dnh_dnode)
#define DB_DNODE_LOCK(_db) ((_db)->db_dnode_handle->dnh_zrlock)
#define DB_DNODE_ENTER(_db) (zrl_add(&DB_DNODE_LOCK(_db)))
#define DB_DNODE_EXIT(_db) (zrl_remove(&DB_DNODE_LOCK(_db)))
#define DB_DNODE_HELD(_db) (!zrl_is_zero(&DB_DNODE_LOCK(_db)))
void dbuf_init(void);
void dbuf_fini(void);
boolean_t dbuf_is_metadata(dmu_buf_impl_t *db);
static inline dbuf_dirty_record_t *
dbuf_find_dirty_lte(dmu_buf_impl_t *db, uint64_t txg)
{
dbuf_dirty_record_t *dr;
for (dr = list_head(&db->db_dirty_records);
dr != NULL && dr->dr_txg > txg;
dr = list_next(&db->db_dirty_records, dr))
continue;
return (dr);
}
static inline dbuf_dirty_record_t *
dbuf_find_dirty_eq(dmu_buf_impl_t *db, uint64_t txg)
{
dbuf_dirty_record_t *dr;
dr = dbuf_find_dirty_lte(db, txg);
if (dr && dr->dr_txg == txg)
return (dr);
return (NULL);
}
#define DBUF_GET_BUFC_TYPE(_db) \
(dbuf_is_metadata(_db) ? ARC_BUFC_METADATA : ARC_BUFC_DATA)
#define DBUF_IS_CACHEABLE(_db) \
((_db)->db_objset->os_primary_cache == ZFS_CACHE_ALL || \
(dbuf_is_metadata(_db) && \
((_db)->db_objset->os_primary_cache == ZFS_CACHE_METADATA)))
#define DBUF_IS_L2CACHEABLE(_db) \
((_db)->db_objset->os_secondary_cache == ZFS_CACHE_ALL || \
(dbuf_is_metadata(_db) && \
((_db)->db_objset->os_secondary_cache == ZFS_CACHE_METADATA)))
#define DNODE_LEVEL_IS_L2CACHEABLE(_dn, _level) \
((_dn)->dn_objset->os_secondary_cache == ZFS_CACHE_ALL || \
(((_level) > 0 || \
DMU_OT_IS_METADATA((_dn)->dn_handle->dnh_dnode->dn_type)) && \
((_dn)->dn_objset->os_secondary_cache == ZFS_CACHE_METADATA)))
#ifdef ZFS_DEBUG
/*
* There should be a ## between the string literal and fmt, to make it
* clear that we're joining two strings together, but gcc does not
* support that preprocessor token.
*/
#define dprintf_dbuf(dbuf, fmt, ...) do { \
if (zfs_flags & ZFS_DEBUG_DPRINTF) { \
char __db_buf[32]; \
uint64_t __db_obj = (dbuf)->db.db_object; \
if (__db_obj == DMU_META_DNODE_OBJECT) \
(void) strlcpy(__db_buf, "mdn", sizeof (__db_buf)); \
else \
(void) snprintf(__db_buf, sizeof (__db_buf), "%lld", \
(u_longlong_t)__db_obj); \
dprintf_ds((dbuf)->db_objset->os_dsl_dataset, \
"obj=%s lvl=%u blkid=%lld " fmt, \
__db_buf, (dbuf)->db_level, \
(u_longlong_t)(dbuf)->db_blkid, __VA_ARGS__); \
} \
-_NOTE(CONSTCOND) } while (0)
+} while (0)
#define dprintf_dbuf_bp(db, bp, fmt, ...) do { \
if (zfs_flags & ZFS_DEBUG_DPRINTF) { \
char *__blkbuf = kmem_alloc(BP_SPRINTF_LEN, KM_SLEEP); \
snprintf_blkptr(__blkbuf, BP_SPRINTF_LEN, bp); \
dprintf_dbuf(db, fmt " %s\n", __VA_ARGS__, __blkbuf); \
kmem_free(__blkbuf, BP_SPRINTF_LEN); \
} \
-_NOTE(CONSTCOND) } while (0)
+} while (0)
#define DBUF_VERIFY(db) dbuf_verify(db)
#else
#define dprintf_dbuf(db, fmt, ...)
#define dprintf_dbuf_bp(db, bp, fmt, ...)
#define DBUF_VERIFY(db)
#endif
#ifdef __cplusplus
}
#endif
#endif /* _SYS_DBUF_H */
diff --git a/sys/contrib/openzfs/include/sys/dnode.h b/sys/contrib/openzfs/include/sys/dnode.h
index 2cdc5b8798ad..e7cccd044abf 100644
--- a/sys/contrib/openzfs/include/sys/dnode.h
+++ b/sys/contrib/openzfs/include/sys/dnode.h
@@ -1,627 +1,627 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
*/
#ifndef _SYS_DNODE_H
#define _SYS_DNODE_H
#include <sys/zfs_context.h>
#include <sys/avl.h>
#include <sys/spa.h>
#include <sys/txg.h>
#include <sys/zio.h>
#include <sys/zfs_refcount.h>
#include <sys/dmu_zfetch.h>
#include <sys/zrlock.h>
#include <sys/multilist.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* dnode_hold() flags.
*/
#define DNODE_MUST_BE_ALLOCATED 1
#define DNODE_MUST_BE_FREE 2
#define DNODE_DRY_RUN 4
/*
* dnode_next_offset() flags.
*/
#define DNODE_FIND_HOLE 1
#define DNODE_FIND_BACKWARDS 2
#define DNODE_FIND_HAVELOCK 4
/*
* Fixed constants.
*/
#define DNODE_SHIFT 9 /* 512 bytes */
#define DN_MIN_INDBLKSHIFT 12 /* 4k */
/*
* If we ever increase this value beyond 20, we need to revisit all logic that
* does x << level * ebps to handle overflow. With a 1M indirect block size,
* 4 levels of indirect blocks would not be able to guarantee addressing an
* entire object, so 5 levels will be used, but 5 * (20 - 7) = 65.
*/
#define DN_MAX_INDBLKSHIFT 17 /* 128k */
#define DNODE_BLOCK_SHIFT 14 /* 16k */
#define DNODE_CORE_SIZE 64 /* 64 bytes for dnode sans blkptrs */
#define DN_MAX_OBJECT_SHIFT 48 /* 256 trillion (zfs_fid_t limit) */
#define DN_MAX_OFFSET_SHIFT 64 /* 2^64 bytes in a dnode */
/*
* dnode id flags
*
* Note: a file will never ever have its ids moved from bonus->spill
*/
#define DN_ID_CHKED_BONUS 0x1
#define DN_ID_CHKED_SPILL 0x2
#define DN_ID_OLD_EXIST 0x4
#define DN_ID_NEW_EXIST 0x8
/*
* Derived constants.
*/
#define DNODE_MIN_SIZE (1 << DNODE_SHIFT)
#define DNODE_MAX_SIZE (1 << DNODE_BLOCK_SHIFT)
#define DNODE_BLOCK_SIZE (1 << DNODE_BLOCK_SHIFT)
#define DNODE_MIN_SLOTS (DNODE_MIN_SIZE >> DNODE_SHIFT)
#define DNODE_MAX_SLOTS (DNODE_MAX_SIZE >> DNODE_SHIFT)
#define DN_BONUS_SIZE(dnsize) ((dnsize) - DNODE_CORE_SIZE - \
(1 << SPA_BLKPTRSHIFT))
#define DN_SLOTS_TO_BONUSLEN(slots) DN_BONUS_SIZE((slots) << DNODE_SHIFT)
#define DN_OLD_MAX_BONUSLEN (DN_BONUS_SIZE(DNODE_MIN_SIZE))
#define DN_MAX_NBLKPTR ((DNODE_MIN_SIZE - DNODE_CORE_SIZE) >> SPA_BLKPTRSHIFT)
#define DN_MAX_OBJECT (1ULL << DN_MAX_OBJECT_SHIFT)
#define DN_ZERO_BONUSLEN (DN_BONUS_SIZE(DNODE_MAX_SIZE) + 1)
#define DN_KILL_SPILLBLK (1)
#define DN_SLOT_UNINIT ((void *)NULL) /* Uninitialized */
#define DN_SLOT_FREE ((void *)1UL) /* Free slot */
#define DN_SLOT_ALLOCATED ((void *)2UL) /* Allocated slot */
#define DN_SLOT_INTERIOR ((void *)3UL) /* Interior allocated slot */
#define DN_SLOT_IS_PTR(dn) ((void *)dn > DN_SLOT_INTERIOR)
#define DN_SLOT_IS_VALID(dn) ((void *)dn != NULL)
#define DNODES_PER_BLOCK_SHIFT (DNODE_BLOCK_SHIFT - DNODE_SHIFT)
#define DNODES_PER_BLOCK (1ULL << DNODES_PER_BLOCK_SHIFT)
/*
* This is inaccurate if the indblkshift of the particular object is not the
* max. But it's only used by userland to calculate the zvol reservation.
*/
#define DNODES_PER_LEVEL_SHIFT (DN_MAX_INDBLKSHIFT - SPA_BLKPTRSHIFT)
#define DNODES_PER_LEVEL (1ULL << DNODES_PER_LEVEL_SHIFT)
#define DN_MAX_LEVELS (DIV_ROUND_UP(DN_MAX_OFFSET_SHIFT - SPA_MINBLOCKSHIFT, \
DN_MIN_INDBLKSHIFT - SPA_BLKPTRSHIFT) + 1)
#define DN_BONUS(dnp) ((void*)((dnp)->dn_bonus + \
(((dnp)->dn_nblkptr - 1) * sizeof (blkptr_t))))
#define DN_MAX_BONUS_LEN(dnp) \
((dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) ? \
(uint8_t *)DN_SPILL_BLKPTR(dnp) - (uint8_t *)DN_BONUS(dnp) : \
(uint8_t *)(dnp + (dnp->dn_extra_slots + 1)) - (uint8_t *)DN_BONUS(dnp))
#define DN_USED_BYTES(dnp) (((dnp)->dn_flags & DNODE_FLAG_USED_BYTES) ? \
(dnp)->dn_used : (dnp)->dn_used << SPA_MINBLOCKSHIFT)
#define EPB(blkshift, typeshift) (1 << (blkshift - typeshift))
struct dmu_buf_impl;
struct objset;
struct zio;
enum dnode_dirtycontext {
DN_UNDIRTIED,
DN_DIRTY_OPEN,
DN_DIRTY_SYNC
};
/* Is dn_used in bytes? if not, it's in multiples of SPA_MINBLOCKSIZE */
#define DNODE_FLAG_USED_BYTES (1 << 0)
#define DNODE_FLAG_USERUSED_ACCOUNTED (1 << 1)
/* Does dnode have a SA spill blkptr in bonus? */
#define DNODE_FLAG_SPILL_BLKPTR (1 << 2)
/* User/Group/Project dnode accounting */
#define DNODE_FLAG_USEROBJUSED_ACCOUNTED (1 << 3)
/*
* This mask defines the set of flags which are "portable", meaning
* that they can be preserved when doing a raw encrypted zfs send.
* Flags included in this mask will be protected by AAD when the block
* of dnodes is encrypted.
*/
#define DNODE_CRYPT_PORTABLE_FLAGS_MASK (DNODE_FLAG_SPILL_BLKPTR)
/*
* VARIABLE-LENGTH (LARGE) DNODES
*
* The motivation for variable-length dnodes is to eliminate the overhead
* associated with using spill blocks. Spill blocks are used to store
* system attribute data (i.e. file metadata) that does not fit in the
* dnode's bonus buffer. By allowing a larger bonus buffer area the use of
* a spill block can be avoided. Spill blocks potentially incur an
* additional read I/O for every dnode in a dnode block. As a worst case
* example, reading 32 dnodes from a 16k dnode block and all of the spill
* blocks could issue 33 separate reads. Now suppose those dnodes have size
* 1024 and therefore don't need spill blocks. Then the worst case number
* of blocks read is reduced from 33 to two--one per dnode block.
*
* ZFS-on-Linux systems that make heavy use of extended attributes benefit
* from this feature. In particular, ZFS-on-Linux supports the xattr=sa
* dataset property which allows file extended attribute data to be stored
* in the dnode bonus buffer as an alternative to the traditional
* directory-based format. Workloads such as SELinux and the Lustre
* distributed filesystem often store enough xattr data to force spill
* blocks when xattr=sa is in effect. Large dnodes may therefore provide a
* performance benefit to such systems. Other use cases that benefit from
* this feature include files with large ACLs and symbolic links with long
* target names.
*
* The size of a dnode may be a multiple of 512 bytes up to the size of a
* dnode block (currently 16384 bytes). The dn_extra_slots field of the
* on-disk dnode_phys_t structure describes the size of the physical dnode
* on disk. The field represents how many "extra" dnode_phys_t slots a
* dnode consumes in its dnode block. This convention results in a value of
* 0 for 512 byte dnodes which preserves on-disk format compatibility with
* older software which doesn't support large dnodes.
*
* Similarly, the in-memory dnode_t structure has a dn_num_slots field
* to represent the total number of dnode_phys_t slots consumed on disk.
* Thus dn->dn_num_slots is 1 greater than the corresponding
* dnp->dn_extra_slots. This difference in convention was adopted
* because, unlike on-disk structures, backward compatibility is not a
* concern for in-memory objects, so we used a more natural way to
* represent size for a dnode_t.
*
* The default size for newly created dnodes is determined by the value of
* the "dnodesize" dataset property. By default the property is set to
* "legacy" which is compatible with older software. Setting the property
* to "auto" will allow the filesystem to choose the most suitable dnode
* size. Currently this just sets the default dnode size to 1k, but future
* code improvements could dynamically choose a size based on observed
* workload patterns. Dnodes of varying sizes can coexist within the same
* dataset and even within the same dnode block.
*/
typedef struct dnode_phys {
uint8_t dn_type; /* dmu_object_type_t */
uint8_t dn_indblkshift; /* ln2(indirect block size) */
uint8_t dn_nlevels; /* 1=dn_blkptr->data blocks */
uint8_t dn_nblkptr; /* length of dn_blkptr */
uint8_t dn_bonustype; /* type of data in bonus buffer */
uint8_t dn_checksum; /* ZIO_CHECKSUM type */
uint8_t dn_compress; /* ZIO_COMPRESS type */
uint8_t dn_flags; /* DNODE_FLAG_* */
uint16_t dn_datablkszsec; /* data block size in 512b sectors */
uint16_t dn_bonuslen; /* length of dn_bonus */
uint8_t dn_extra_slots; /* # of subsequent slots consumed */
uint8_t dn_pad2[3];
/* accounting is protected by dn_dirty_mtx */
uint64_t dn_maxblkid; /* largest allocated block ID */
uint64_t dn_used; /* bytes (or sectors) of disk space */
/*
* Both dn_pad2 and dn_pad3 are protected by the block's MAC. This
* allows us to protect any fields that might be added here in the
* future. In either case, developers will want to check
* zio_crypt_init_uios_dnode() and zio_crypt_do_dnode_hmac_updates()
* to ensure the new field is being protected and updated properly.
*/
uint64_t dn_pad3[4];
/*
* The tail region is 448 bytes for a 512 byte dnode, and
* correspondingly larger for larger dnode sizes. The spill
* block pointer, when present, is always at the end of the tail
* region. There are three ways this space may be used, using
* a 512 byte dnode for this diagram:
*
* 0 64 128 192 256 320 384 448 (offset)
* +---------------+---------------+---------------+-------+
* | dn_blkptr[0] | dn_blkptr[1] | dn_blkptr[2] | / |
* +---------------+---------------+---------------+-------+
* | dn_blkptr[0] | dn_bonus[0..319] |
* +---------------+-----------------------+---------------+
* | dn_blkptr[0] | dn_bonus[0..191] | dn_spill |
* +---------------+-----------------------+---------------+
*/
union {
blkptr_t dn_blkptr[1+DN_OLD_MAX_BONUSLEN/sizeof (blkptr_t)];
struct {
blkptr_t __dn_ignore1;
uint8_t dn_bonus[DN_OLD_MAX_BONUSLEN];
};
struct {
blkptr_t __dn_ignore2;
uint8_t __dn_ignore3[DN_OLD_MAX_BONUSLEN -
sizeof (blkptr_t)];
blkptr_t dn_spill;
};
};
} dnode_phys_t;
#define DN_SPILL_BLKPTR(dnp) ((blkptr_t *)((char *)(dnp) + \
(((dnp)->dn_extra_slots + 1) << DNODE_SHIFT) - (1 << SPA_BLKPTRSHIFT)))
struct dnode {
/*
* Protects the structure of the dnode, including the number of levels
* of indirection (dn_nlevels), dn_maxblkid, and dn_next_*
*/
krwlock_t dn_struct_rwlock;
/* Our link on dn_objset->os_dnodes list; protected by os_lock. */
list_node_t dn_link;
/* immutable: */
struct objset *dn_objset;
uint64_t dn_object;
struct dmu_buf_impl *dn_dbuf;
struct dnode_handle *dn_handle;
dnode_phys_t *dn_phys; /* pointer into dn->dn_dbuf->db.db_data */
/*
* Copies of stuff in dn_phys. They're valid in the open
* context (eg. even before the dnode is first synced).
* Where necessary, these are protected by dn_struct_rwlock.
*/
dmu_object_type_t dn_type; /* object type */
uint16_t dn_bonuslen; /* bonus length */
uint8_t dn_bonustype; /* bonus type */
uint8_t dn_nblkptr; /* number of blkptrs (immutable) */
uint8_t dn_checksum; /* ZIO_CHECKSUM type */
uint8_t dn_compress; /* ZIO_COMPRESS type */
uint8_t dn_nlevels;
uint8_t dn_indblkshift;
uint8_t dn_datablkshift; /* zero if blksz not power of 2! */
uint8_t dn_moved; /* Has this dnode been moved? */
uint16_t dn_datablkszsec; /* in 512b sectors */
uint32_t dn_datablksz; /* in bytes */
uint64_t dn_maxblkid;
uint8_t dn_next_type[TXG_SIZE];
uint8_t dn_num_slots; /* metadnode slots consumed on disk */
uint8_t dn_next_nblkptr[TXG_SIZE];
uint8_t dn_next_nlevels[TXG_SIZE];
uint8_t dn_next_indblkshift[TXG_SIZE];
uint8_t dn_next_bonustype[TXG_SIZE];
uint8_t dn_rm_spillblk[TXG_SIZE]; /* for removing spill blk */
uint16_t dn_next_bonuslen[TXG_SIZE];
uint32_t dn_next_blksz[TXG_SIZE]; /* next block size in bytes */
uint64_t dn_next_maxblkid[TXG_SIZE]; /* next maxblkid in bytes */
/* protected by dn_dbufs_mtx; declared here to fill 32-bit hole */
uint32_t dn_dbufs_count; /* count of dn_dbufs */
/* protected by os_lock: */
multilist_node_t dn_dirty_link[TXG_SIZE]; /* next on dataset's dirty */
/* protected by dn_mtx: */
kmutex_t dn_mtx;
list_t dn_dirty_records[TXG_SIZE];
struct range_tree *dn_free_ranges[TXG_SIZE];
uint64_t dn_allocated_txg;
uint64_t dn_free_txg;
uint64_t dn_assigned_txg;
uint64_t dn_dirty_txg; /* txg dnode was last dirtied */
kcondvar_t dn_notxholds;
kcondvar_t dn_nodnholds;
enum dnode_dirtycontext dn_dirtyctx;
void *dn_dirtyctx_firstset; /* dbg: contents meaningless */
/* protected by own devices */
zfs_refcount_t dn_tx_holds;
zfs_refcount_t dn_holds;
kmutex_t dn_dbufs_mtx;
/*
* Descendent dbufs, ordered by dbuf_compare. Note that dn_dbufs
* can contain multiple dbufs of the same (level, blkid) when a
* dbuf is marked DB_EVICTING without being removed from
* dn_dbufs. To maintain the avl invariant that there cannot be
* duplicate entries, we order the dbufs by an arbitrary value -
* their address in memory. This means that dn_dbufs cannot be used to
* directly look up a dbuf. Instead, callers must use avl_walk, have
* a reference to the dbuf, or look up a non-existent node with
* db_state = DB_SEARCH (see dbuf_free_range for an example).
*/
avl_tree_t dn_dbufs;
/* protected by dn_struct_rwlock */
struct dmu_buf_impl *dn_bonus; /* bonus buffer dbuf */
boolean_t dn_have_spill; /* have spill or are spilling */
/* parent IO for current sync write */
zio_t *dn_zio;
/* used in syncing context */
uint64_t dn_oldused; /* old phys used bytes */
uint64_t dn_oldflags; /* old phys dn_flags */
uint64_t dn_olduid, dn_oldgid, dn_oldprojid;
uint64_t dn_newuid, dn_newgid, dn_newprojid;
int dn_id_flags;
/* holds prefetch structure */
struct zfetch dn_zfetch;
};
/*
* Since AVL already has embedded element counter, use dn_dbufs_count
* only for dbufs not counted there (bonus buffers) and just add them.
*/
#define DN_DBUFS_COUNT(dn) ((dn)->dn_dbufs_count + \
avl_numnodes(&(dn)->dn_dbufs))
/*
* We use this (otherwise unused) bit to indicate if the value of
* dn_next_maxblkid[txgoff] is valid to use in dnode_sync().
*/
#define DMU_NEXT_MAXBLKID_SET (1ULL << 63)
/*
* Adds a level of indirection between the dbuf and the dnode to avoid
* iterating descendent dbufs in dnode_move(). Handles are not allocated
* individually, but as an array of child dnodes in dnode_hold_impl().
*/
typedef struct dnode_handle {
/* Protects dnh_dnode from modification by dnode_move(). */
zrlock_t dnh_zrlock;
dnode_t *dnh_dnode;
} dnode_handle_t;
typedef struct dnode_children {
dmu_buf_user_t dnc_dbu; /* User evict data */
size_t dnc_count; /* number of children */
dnode_handle_t dnc_children[]; /* sized dynamically */
} dnode_children_t;
typedef struct free_range {
avl_node_t fr_node;
uint64_t fr_blkid;
uint64_t fr_nblks;
} free_range_t;
void dnode_special_open(struct objset *dd, dnode_phys_t *dnp,
uint64_t object, dnode_handle_t *dnh);
void dnode_special_close(dnode_handle_t *dnh);
void dnode_setbonuslen(dnode_t *dn, int newsize, dmu_tx_t *tx);
void dnode_setbonus_type(dnode_t *dn, dmu_object_type_t, dmu_tx_t *tx);
void dnode_rm_spill(dnode_t *dn, dmu_tx_t *tx);
int dnode_hold(struct objset *dd, uint64_t object,
void *ref, dnode_t **dnp);
int dnode_hold_impl(struct objset *dd, uint64_t object, int flag, int dn_slots,
void *ref, dnode_t **dnp);
boolean_t dnode_add_ref(dnode_t *dn, void *ref);
void dnode_rele(dnode_t *dn, void *ref);
void dnode_rele_and_unlock(dnode_t *dn, void *tag, boolean_t evicting);
int dnode_try_claim(objset_t *os, uint64_t object, int slots);
void dnode_setdirty(dnode_t *dn, dmu_tx_t *tx);
void dnode_set_dirtyctx(dnode_t *dn, dmu_tx_t *tx, void *tag);
void dnode_sync(dnode_t *dn, dmu_tx_t *tx);
void dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs,
dmu_object_type_t bonustype, int bonuslen, int dn_slots, dmu_tx_t *tx);
void dnode_reallocate(dnode_t *dn, dmu_object_type_t ot, int blocksize,
dmu_object_type_t bonustype, int bonuslen, int dn_slots,
boolean_t keep_spill, dmu_tx_t *tx);
void dnode_free(dnode_t *dn, dmu_tx_t *tx);
void dnode_byteswap(dnode_phys_t *dnp);
void dnode_buf_byteswap(void *buf, size_t size);
void dnode_verify(dnode_t *dn);
int dnode_set_nlevels(dnode_t *dn, int nlevels, dmu_tx_t *tx);
int dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx);
void dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx);
void dnode_diduse_space(dnode_t *dn, int64_t space);
void dnode_new_blkid(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx,
boolean_t have_read, boolean_t force);
uint64_t dnode_block_freed(dnode_t *dn, uint64_t blkid);
void dnode_init(void);
void dnode_fini(void);
int dnode_next_offset(dnode_t *dn, int flags, uint64_t *off,
int minlvl, uint64_t blkfill, uint64_t txg);
void dnode_evict_dbufs(dnode_t *dn);
void dnode_evict_bonus(dnode_t *dn);
void dnode_free_interior_slots(dnode_t *dn);
#define DNODE_IS_DIRTY(_dn) \
((_dn)->dn_dirty_txg >= spa_syncing_txg((_dn)->dn_objset->os_spa))
#define DNODE_IS_CACHEABLE(_dn) \
((_dn)->dn_objset->os_primary_cache == ZFS_CACHE_ALL || \
(DMU_OT_IS_METADATA((_dn)->dn_type) && \
(_dn)->dn_objset->os_primary_cache == ZFS_CACHE_METADATA))
#define DNODE_META_IS_CACHEABLE(_dn) \
((_dn)->dn_objset->os_primary_cache == ZFS_CACHE_ALL || \
(_dn)->dn_objset->os_primary_cache == ZFS_CACHE_METADATA)
/*
* Used for dnodestats kstat.
*/
typedef struct dnode_stats {
/*
* Number of failed attempts to hold a meta dnode dbuf.
*/
kstat_named_t dnode_hold_dbuf_hold;
/*
* Number of failed attempts to read a meta dnode dbuf.
*/
kstat_named_t dnode_hold_dbuf_read;
/*
* Number of times dnode_hold(..., DNODE_MUST_BE_ALLOCATED) was able
* to hold the requested object number which was allocated. This is
* the common case when looking up any allocated object number.
*/
kstat_named_t dnode_hold_alloc_hits;
/*
* Number of times dnode_hold(..., DNODE_MUST_BE_ALLOCATED) was not
* able to hold the request object number because it was not allocated.
*/
kstat_named_t dnode_hold_alloc_misses;
/*
* Number of times dnode_hold(..., DNODE_MUST_BE_ALLOCATED) was not
* able to hold the request object number because the object number
* refers to an interior large dnode slot.
*/
kstat_named_t dnode_hold_alloc_interior;
/*
* Number of times dnode_hold(..., DNODE_MUST_BE_ALLOCATED) needed
* to retry acquiring slot zrl locks due to contention.
*/
kstat_named_t dnode_hold_alloc_lock_retry;
/*
* Number of times dnode_hold(..., DNODE_MUST_BE_ALLOCATED) did not
* need to create the dnode because another thread did so after
* dropping the read lock but before acquiring the write lock.
*/
kstat_named_t dnode_hold_alloc_lock_misses;
/*
* Number of times dnode_hold(..., DNODE_MUST_BE_ALLOCATED) found
* a free dnode instantiated by dnode_create() but not yet allocated
* by dnode_allocate().
*/
kstat_named_t dnode_hold_alloc_type_none;
/*
* Number of times dnode_hold(..., DNODE_MUST_BE_FREE) was able
* to hold the requested range of free dnode slots.
*/
kstat_named_t dnode_hold_free_hits;
/*
* Number of times dnode_hold(..., DNODE_MUST_BE_FREE) was not
* able to hold the requested range of free dnode slots because
* at least one slot was allocated.
*/
kstat_named_t dnode_hold_free_misses;
/*
* Number of times dnode_hold(..., DNODE_MUST_BE_FREE) was not
* able to hold the requested range of free dnode slots because
* after acquiring the zrl lock at least one slot was allocated.
*/
kstat_named_t dnode_hold_free_lock_misses;
/*
* Number of times dnode_hold(..., DNODE_MUST_BE_FREE) needed
* to retry acquiring slot zrl locks due to contention.
*/
kstat_named_t dnode_hold_free_lock_retry;
/*
* Number of times dnode_hold(..., DNODE_MUST_BE_FREE) requested
* a range of dnode slots which were held by another thread.
*/
kstat_named_t dnode_hold_free_refcount;
/*
* Number of times dnode_hold(..., DNODE_MUST_BE_FREE) requested
* a range of dnode slots which would overflow the dnode_phys_t.
*/
kstat_named_t dnode_hold_free_overflow;
/*
* Number of times dnode_free_interior_slots() needed to retry
* acquiring a slot zrl lock due to contention.
*/
kstat_named_t dnode_free_interior_lock_retry;
/*
* Number of new dnodes allocated by dnode_allocate().
*/
kstat_named_t dnode_allocate;
/*
* Number of dnodes re-allocated by dnode_reallocate().
*/
kstat_named_t dnode_reallocate;
/*
* Number of meta dnode dbufs evicted.
*/
kstat_named_t dnode_buf_evict;
/*
* Number of times dmu_object_alloc*() reached the end of the existing
* object ID chunk and advanced to a new one.
*/
kstat_named_t dnode_alloc_next_chunk;
/*
* Number of times multiple threads attempted to allocate a dnode
* from the same block of free dnodes.
*/
kstat_named_t dnode_alloc_race;
/*
* Number of times dmu_object_alloc*() was forced to advance to the
* next meta dnode dbuf due to an error from dmu_object_next().
*/
kstat_named_t dnode_alloc_next_block;
/*
* Statistics for tracking dnodes which have been moved.
*/
kstat_named_t dnode_move_invalid;
kstat_named_t dnode_move_recheck1;
kstat_named_t dnode_move_recheck2;
kstat_named_t dnode_move_special;
kstat_named_t dnode_move_handle;
kstat_named_t dnode_move_rwlock;
kstat_named_t dnode_move_active;
} dnode_stats_t;
extern dnode_stats_t dnode_stats;
#define DNODE_STAT_INCR(stat, val) \
atomic_add_64(&dnode_stats.stat.value.ui64, (val));
#define DNODE_STAT_BUMP(stat) \
DNODE_STAT_INCR(stat, 1);
#ifdef ZFS_DEBUG
#define dprintf_dnode(dn, fmt, ...) do { \
if (zfs_flags & ZFS_DEBUG_DPRINTF) { \
char __db_buf[32]; \
uint64_t __db_obj = (dn)->dn_object; \
if (__db_obj == DMU_META_DNODE_OBJECT) \
(void) strlcpy(__db_buf, "mdn", sizeof (__db_buf)); \
else \
(void) snprintf(__db_buf, sizeof (__db_buf), "%lld", \
(u_longlong_t)__db_obj);\
dprintf_ds((dn)->dn_objset->os_dsl_dataset, "obj=%s " fmt, \
__db_buf, __VA_ARGS__); \
} \
-_NOTE(CONSTCOND) } while (0)
+} while (0)
#define DNODE_VERIFY(dn) dnode_verify(dn)
#define FREE_VERIFY(db, start, end, tx) free_verify(db, start, end, tx)
#else
#define dprintf_dnode(db, fmt, ...)
#define DNODE_VERIFY(dn)
#define FREE_VERIFY(db, start, end, tx)
#endif
#ifdef __cplusplus
}
#endif
#endif /* _SYS_DNODE_H */
diff --git a/sys/contrib/openzfs/include/sys/dsl_dataset.h b/sys/contrib/openzfs/include/sys/dsl_dataset.h
index ed934f969e92..3c9199b861c4 100644
--- a/sys/contrib/openzfs/include/sys/dsl_dataset.h
+++ b/sys/contrib/openzfs/include/sys/dsl_dataset.h
@@ -1,509 +1,509 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2018 by Delphix. All rights reserved.
* Copyright (c) 2013 Steven Hartland. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
*/
#ifndef _SYS_DSL_DATASET_H
#define _SYS_DSL_DATASET_H
#include <sys/dmu.h>
#include <sys/spa.h>
#include <sys/txg.h>
#include <sys/zio.h>
#include <sys/bplist.h>
#include <sys/dsl_synctask.h>
#include <sys/zfs_context.h>
#include <sys/dsl_deadlist.h>
#include <sys/zfs_refcount.h>
#include <sys/rrwlock.h>
#include <sys/dsl_crypt.h>
#include <zfeature_common.h>
#ifdef __cplusplus
extern "C" {
#endif
extern int zfs_allow_redacted_dataset_mount;
struct dsl_dataset;
struct dsl_dir;
struct dsl_pool;
struct dsl_crypto_params;
struct dsl_key_mapping;
struct zfs_bookmark_phys;
#define DS_FLAG_INCONSISTENT (1ULL<<0)
#define DS_IS_INCONSISTENT(ds) \
(dsl_dataset_phys(ds)->ds_flags & DS_FLAG_INCONSISTENT)
/*
* Do not allow this dataset to be promoted.
*/
#define DS_FLAG_NOPROMOTE (1ULL<<1)
/*
* DS_FLAG_UNIQUE_ACCURATE is set if ds_unique_bytes has been correctly
* calculated for head datasets (starting with SPA_VERSION_UNIQUE_ACCURATE,
* refquota/refreservations).
*/
#define DS_FLAG_UNIQUE_ACCURATE (1ULL<<2)
/*
* DS_FLAG_DEFER_DESTROY is set after 'zfs destroy -d' has been called
* on a dataset. This allows the dataset to be destroyed using 'zfs release'.
*/
#define DS_FLAG_DEFER_DESTROY (1ULL<<3)
#define DS_IS_DEFER_DESTROY(ds) \
(dsl_dataset_phys(ds)->ds_flags & DS_FLAG_DEFER_DESTROY)
/*
* DS_FIELD_* are strings that are used in the "extensified" dataset zap object.
* They should be of the format <reverse-dns>:<field>.
*/
/*
* This field's value is the object ID of a zap object which contains the
* bookmarks of this dataset. If it is present, then this dataset is counted
* in the refcount of the SPA_FEATURES_BOOKMARKS feature.
*/
#define DS_FIELD_BOOKMARK_NAMES "com.delphix:bookmarks"
/*
* This field is present (with value=0) if this dataset may contain large
* dnodes (>512B). If it is present, then this dataset is counted in the
* refcount of the SPA_FEATURE_LARGE_DNODE feature.
*/
#define DS_FIELD_LARGE_DNODE "org.zfsonlinux:large_dnode"
/*
* These fields are set on datasets that are in the middle of a resumable
* receive, and allow the sender to resume the send if it is interrupted.
*/
#define DS_FIELD_RESUME_FROMGUID "com.delphix:resume_fromguid"
#define DS_FIELD_RESUME_TONAME "com.delphix:resume_toname"
#define DS_FIELD_RESUME_TOGUID "com.delphix:resume_toguid"
#define DS_FIELD_RESUME_OBJECT "com.delphix:resume_object"
#define DS_FIELD_RESUME_OFFSET "com.delphix:resume_offset"
#define DS_FIELD_RESUME_BYTES "com.delphix:resume_bytes"
#define DS_FIELD_RESUME_LARGEBLOCK "com.delphix:resume_largeblockok"
#define DS_FIELD_RESUME_EMBEDOK "com.delphix:resume_embedok"
#define DS_FIELD_RESUME_COMPRESSOK "com.delphix:resume_compressok"
#define DS_FIELD_RESUME_RAWOK "com.datto:resume_rawok"
/*
* This field is set to the object number of the remap deadlist if one exists.
*/
#define DS_FIELD_REMAP_DEADLIST "com.delphix:remap_deadlist"
/*
* We were receiving an incremental from a redaction bookmark, and these are the
* guids of its snapshots.
*/
#define DS_FIELD_RESUME_REDACT_BOOKMARK_SNAPS \
"com.delphix:resume_redact_book_snaps"
/*
* This field is set to the ivset guid for encrypted snapshots. This is used
* for validating raw receives.
*/
#define DS_FIELD_IVSET_GUID "com.datto:ivset_guid"
/*
* DS_FLAG_CI_DATASET is set if the dataset contains a file system whose
* name lookups should be performed case-insensitively.
*/
#define DS_FLAG_CI_DATASET (1ULL<<16)
#define DS_CREATE_FLAG_NODIRTY (1ULL<<24)
typedef struct dsl_dataset_phys {
uint64_t ds_dir_obj; /* DMU_OT_DSL_DIR */
uint64_t ds_prev_snap_obj; /* DMU_OT_DSL_DATASET */
uint64_t ds_prev_snap_txg;
uint64_t ds_next_snap_obj; /* DMU_OT_DSL_DATASET */
uint64_t ds_snapnames_zapobj; /* DMU_OT_DSL_DS_SNAP_MAP 0 for snaps */
uint64_t ds_num_children; /* clone/snap children; ==0 for head */
uint64_t ds_creation_time; /* seconds since 1970 */
uint64_t ds_creation_txg;
uint64_t ds_deadlist_obj; /* DMU_OT_DEADLIST */
/*
* ds_referenced_bytes, ds_compressed_bytes, and ds_uncompressed_bytes
* include all blocks referenced by this dataset, including those
* shared with any other datasets.
*/
uint64_t ds_referenced_bytes;
uint64_t ds_compressed_bytes;
uint64_t ds_uncompressed_bytes;
uint64_t ds_unique_bytes; /* only relevant to snapshots */
/*
* The ds_fsid_guid is a 56-bit ID that can change to avoid
* collisions. The ds_guid is a 64-bit ID that will never
* change, so there is a small probability that it will collide.
*/
uint64_t ds_fsid_guid;
uint64_t ds_guid;
uint64_t ds_flags; /* DS_FLAG_* */
blkptr_t ds_bp;
uint64_t ds_next_clones_obj; /* DMU_OT_DSL_CLONES */
uint64_t ds_props_obj; /* DMU_OT_DSL_PROPS for snaps */
uint64_t ds_userrefs_obj; /* DMU_OT_USERREFS */
uint64_t ds_pad[5]; /* pad out to 320 bytes for good measure */
} dsl_dataset_phys_t;
typedef struct dsl_dataset {
dmu_buf_user_t ds_dbu;
rrwlock_t ds_bp_rwlock; /* Protects ds_phys->ds_bp */
/* Immutable: */
struct dsl_dir *ds_dir;
dmu_buf_t *ds_dbuf;
uint64_t ds_object;
uint64_t ds_fsid_guid;
boolean_t ds_is_snapshot;
struct dsl_key_mapping *ds_key_mapping;
/* only used in syncing context, only valid for non-snapshots: */
struct dsl_dataset *ds_prev;
uint64_t ds_bookmarks_obj; /* DMU_OTN_ZAP_METADATA */
avl_tree_t ds_bookmarks; /* dsl_bookmark_node_t */
/* has internal locking: */
dsl_deadlist_t ds_deadlist;
bplist_t ds_pending_deadlist;
/*
* The remap deadlist contains blocks (DVA's, really) that are
* referenced by the previous snapshot and point to indirect vdevs,
* but in this dataset they have been remapped to point to concrete
* (or at least, less-indirect) vdevs. In other words, the
* physical DVA is referenced by the previous snapshot but not by
* this dataset. Logically, the DVA continues to be referenced,
* but we are using a different (less indirect) physical DVA.
* This deadlist is used to determine when physical DVAs that
* point to indirect vdevs are no longer referenced anywhere,
* and thus should be marked obsolete.
*
* This is only used if SPA_FEATURE_OBSOLETE_COUNTS is enabled.
*/
dsl_deadlist_t ds_remap_deadlist;
/* protects creation of the ds_remap_deadlist */
kmutex_t ds_remap_deadlist_lock;
/* protected by lock on pool's dp_dirty_datasets list */
txg_node_t ds_dirty_link;
list_node_t ds_synced_link;
/*
* ds_phys->ds_<accounting> is also protected by ds_lock.
* Protected by ds_lock:
*/
kmutex_t ds_lock;
objset_t *ds_objset;
uint64_t ds_userrefs;
void *ds_owner;
/*
* Long holds prevent the ds from being destroyed; they allow the
* ds to remain held even after dropping the dp_config_rwlock.
* Owning counts as a long hold. See the comments above
* dsl_pool_hold() for details.
*/
zfs_refcount_t ds_longholds;
/* no locking; only for making guesses */
uint64_t ds_trysnap_txg;
/* for objset_open() */
kmutex_t ds_opening_lock;
uint64_t ds_reserved; /* cached refreservation */
uint64_t ds_quota; /* cached refquota */
kmutex_t ds_sendstream_lock;
list_t ds_sendstreams;
/*
* When in the middle of a resumable receive, tracks how much
* progress we have made.
*/
uint64_t ds_resume_object[TXG_SIZE];
uint64_t ds_resume_offset[TXG_SIZE];
uint64_t ds_resume_bytes[TXG_SIZE];
/* Protected by our dsl_dir's dd_lock */
list_t ds_prop_cbs;
/*
* For ZFEATURE_FLAG_PER_DATASET features, set if this dataset
* uses this feature.
*/
void *ds_feature[SPA_FEATURES];
/*
* Set if we need to activate the feature on this dataset this txg
* (used only in syncing context).
*/
void *ds_feature_activation[SPA_FEATURES];
/* Protected by ds_lock; keep at end of struct for better locality */
char ds_snapname[ZFS_MAX_DATASET_NAME_LEN];
} dsl_dataset_t;
static inline dsl_dataset_phys_t *
dsl_dataset_phys(dsl_dataset_t *ds)
{
return ((dsl_dataset_phys_t *)ds->ds_dbuf->db_data);
}
typedef struct dsl_dataset_promote_arg {
const char *ddpa_clonename;
dsl_dataset_t *ddpa_clone;
list_t shared_snaps, origin_snaps, clone_snaps;
dsl_dataset_t *origin_origin; /* origin of the origin */
uint64_t used, comp, uncomp, unique, cloneusedsnap, originusedsnap;
nvlist_t *err_ds;
cred_t *cr;
proc_t *proc;
} dsl_dataset_promote_arg_t;
typedef struct dsl_dataset_rollback_arg {
const char *ddra_fsname;
const char *ddra_tosnap;
void *ddra_owner;
nvlist_t *ddra_result;
} dsl_dataset_rollback_arg_t;
typedef struct dsl_dataset_snapshot_arg {
nvlist_t *ddsa_snaps;
nvlist_t *ddsa_props;
nvlist_t *ddsa_errors;
cred_t *ddsa_cr;
proc_t *ddsa_proc;
} dsl_dataset_snapshot_arg_t;
/*
* The max length of a temporary tag prefix is the number of hex digits
* required to express UINT64_MAX plus one for the hyphen.
*/
#define MAX_TAG_PREFIX_LEN 17
#define dsl_dataset_is_snapshot(ds) \
(dsl_dataset_phys(ds)->ds_num_children != 0)
#define DS_UNIQUE_IS_ACCURATE(ds) \
((dsl_dataset_phys(ds)->ds_flags & DS_FLAG_UNIQUE_ACCURATE) != 0)
/* flags for holding the dataset */
typedef enum ds_hold_flags {
DS_HOLD_FLAG_NONE = 0 << 0,
DS_HOLD_FLAG_DECRYPT = 1 << 0 /* needs access to encrypted data */
} ds_hold_flags_t;
int dsl_dataset_hold(struct dsl_pool *dp, const char *name, void *tag,
dsl_dataset_t **dsp);
int dsl_dataset_hold_flags(struct dsl_pool *dp, const char *name,
ds_hold_flags_t flags, void *tag, dsl_dataset_t **dsp);
boolean_t dsl_dataset_try_add_ref(struct dsl_pool *dp, dsl_dataset_t *ds,
void *tag);
int dsl_dataset_create_key_mapping(dsl_dataset_t *ds);
int dsl_dataset_hold_obj_flags(struct dsl_pool *dp, uint64_t dsobj,
ds_hold_flags_t flags, void *tag, dsl_dataset_t **);
void dsl_dataset_remove_key_mapping(dsl_dataset_t *ds);
int dsl_dataset_hold_obj(struct dsl_pool *dp, uint64_t dsobj,
void *tag, dsl_dataset_t **);
void dsl_dataset_rele_flags(dsl_dataset_t *ds, ds_hold_flags_t flags,
void *tag);
void dsl_dataset_rele(dsl_dataset_t *ds, void *tag);
int dsl_dataset_own(struct dsl_pool *dp, const char *name,
ds_hold_flags_t flags, void *tag, dsl_dataset_t **dsp);
int dsl_dataset_own_force(struct dsl_pool *dp, const char *name,
ds_hold_flags_t flags, void *tag, dsl_dataset_t **dsp);
int dsl_dataset_own_obj(struct dsl_pool *dp, uint64_t dsobj,
ds_hold_flags_t flags, void *tag, dsl_dataset_t **dsp);
int dsl_dataset_own_obj_force(struct dsl_pool *dp, uint64_t dsobj,
ds_hold_flags_t flags, void *tag, dsl_dataset_t **dsp);
void dsl_dataset_disown(dsl_dataset_t *ds, ds_hold_flags_t flags, void *tag);
void dsl_dataset_name(dsl_dataset_t *ds, char *name);
boolean_t dsl_dataset_tryown(dsl_dataset_t *ds, void *tag, boolean_t override);
int dsl_dataset_namelen(dsl_dataset_t *ds);
boolean_t dsl_dataset_has_owner(dsl_dataset_t *ds);
uint64_t dsl_dataset_create_sync(dsl_dir_t *pds, const char *lastname,
dsl_dataset_t *origin, uint64_t flags, cred_t *,
struct dsl_crypto_params *, dmu_tx_t *);
uint64_t dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin,
struct dsl_crypto_params *dcp, uint64_t flags, dmu_tx_t *tx);
void dsl_dataset_snapshot_sync(void *arg, dmu_tx_t *tx);
int dsl_dataset_snapshot_check(void *arg, dmu_tx_t *tx);
int dsl_dataset_snapshot(nvlist_t *snaps, nvlist_t *props, nvlist_t *errors);
void dsl_dataset_promote_sync(void *arg, dmu_tx_t *tx);
int dsl_dataset_promote_check(void *arg, dmu_tx_t *tx);
int dsl_dataset_promote(const char *name, char *conflsnap);
int dsl_dataset_rename_snapshot(const char *fsname,
const char *oldsnapname, const char *newsnapname, boolean_t recursive);
int dsl_dataset_snapshot_tmp(const char *fsname, const char *snapname,
minor_t cleanup_minor, const char *htag);
blkptr_t *dsl_dataset_get_blkptr(dsl_dataset_t *ds);
spa_t *dsl_dataset_get_spa(dsl_dataset_t *ds);
boolean_t dsl_dataset_modified_since_snap(dsl_dataset_t *ds,
dsl_dataset_t *snap);
void dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx);
void dsl_dataset_sync_done(dsl_dataset_t *ds, dmu_tx_t *tx);
void dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp,
dmu_tx_t *tx);
int dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp,
dmu_tx_t *tx, boolean_t async);
void dsl_dataset_block_remapped(dsl_dataset_t *ds, uint64_t vdev,
uint64_t offset, uint64_t size, uint64_t birth, dmu_tx_t *tx);
int dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name,
uint64_t *value);
void dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx);
int get_clones_stat_impl(dsl_dataset_t *ds, nvlist_t *val);
char *get_receive_resume_stats_impl(dsl_dataset_t *ds);
char *get_child_receive_stats(dsl_dataset_t *ds);
uint64_t dsl_get_refratio(dsl_dataset_t *ds);
uint64_t dsl_get_logicalreferenced(dsl_dataset_t *ds);
uint64_t dsl_get_compressratio(dsl_dataset_t *ds);
uint64_t dsl_get_used(dsl_dataset_t *ds);
uint64_t dsl_get_creation(dsl_dataset_t *ds);
uint64_t dsl_get_creationtxg(dsl_dataset_t *ds);
uint64_t dsl_get_refquota(dsl_dataset_t *ds);
uint64_t dsl_get_refreservation(dsl_dataset_t *ds);
uint64_t dsl_get_guid(dsl_dataset_t *ds);
uint64_t dsl_get_unique(dsl_dataset_t *ds);
uint64_t dsl_get_objsetid(dsl_dataset_t *ds);
uint64_t dsl_get_userrefs(dsl_dataset_t *ds);
uint64_t dsl_get_defer_destroy(dsl_dataset_t *ds);
uint64_t dsl_get_referenced(dsl_dataset_t *ds);
uint64_t dsl_get_numclones(dsl_dataset_t *ds);
uint64_t dsl_get_inconsistent(dsl_dataset_t *ds);
uint64_t dsl_get_redacted(dsl_dataset_t *ds);
uint64_t dsl_get_available(dsl_dataset_t *ds);
int dsl_get_written(dsl_dataset_t *ds, uint64_t *written);
int dsl_get_prev_snap(dsl_dataset_t *ds, char *snap);
void dsl_get_redact_snaps(dsl_dataset_t *ds, nvlist_t *propval);
int dsl_get_mountpoint(dsl_dataset_t *ds, const char *dsname, char *value,
char *source);
void get_clones_stat(dsl_dataset_t *ds, nvlist_t *nv);
void dsl_dataset_stats(dsl_dataset_t *os, nvlist_t *nv);
void dsl_dataset_fast_stat(dsl_dataset_t *ds, dmu_objset_stats_t *stat);
void dsl_dataset_space(dsl_dataset_t *ds,
uint64_t *refdbytesp, uint64_t *availbytesp,
uint64_t *usedobjsp, uint64_t *availobjsp);
uint64_t dsl_dataset_fsid_guid(dsl_dataset_t *ds);
int dsl_dataset_space_written(dsl_dataset_t *oldsnap, dsl_dataset_t *newds,
uint64_t *usedp, uint64_t *compp, uint64_t *uncompp);
int dsl_dataset_space_written_bookmark(struct zfs_bookmark_phys *bmp,
dsl_dataset_t *newds, uint64_t *usedp, uint64_t *compp, uint64_t *uncompp);
int dsl_dataset_space_wouldfree(dsl_dataset_t *firstsnap, dsl_dataset_t *last,
uint64_t *usedp, uint64_t *compp, uint64_t *uncompp);
int dsl_dsobj_to_dsname(char *pname, uint64_t obj, char *buf);
int dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota,
uint64_t asize, uint64_t inflight, uint64_t *used,
uint64_t *ref_rsrv);
int dsl_dataset_set_refquota(const char *dsname, zprop_source_t source,
uint64_t quota);
int dsl_dataset_set_refreservation(const char *dsname, zprop_source_t source,
uint64_t reservation);
int dsl_dataset_set_compression(const char *dsname, zprop_source_t source,
uint64_t compression);
boolean_t dsl_dataset_is_before(dsl_dataset_t *later, dsl_dataset_t *earlier,
uint64_t earlier_txg);
void dsl_dataset_long_hold(dsl_dataset_t *ds, void *tag);
void dsl_dataset_long_rele(dsl_dataset_t *ds, void *tag);
boolean_t dsl_dataset_long_held(dsl_dataset_t *ds);
int dsl_dataset_clone_swap_check_impl(dsl_dataset_t *clone,
dsl_dataset_t *origin_head, boolean_t force, void *owner, dmu_tx_t *tx);
void dsl_dataset_clone_swap_sync_impl(dsl_dataset_t *clone,
dsl_dataset_t *origin_head, dmu_tx_t *tx);
int dsl_dataset_snapshot_check_impl(dsl_dataset_t *ds, const char *snapname,
dmu_tx_t *tx, boolean_t recv, uint64_t cnt, cred_t *cr, proc_t *proc);
void dsl_dataset_snapshot_sync_impl(dsl_dataset_t *ds, const char *snapname,
dmu_tx_t *tx);
void dsl_dataset_remove_from_next_clones(dsl_dataset_t *ds, uint64_t obj,
dmu_tx_t *tx);
void dsl_dataset_recalc_head_uniq(dsl_dataset_t *ds);
int dsl_dataset_get_snapname(dsl_dataset_t *ds);
int dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name,
uint64_t *value);
int dsl_dataset_snap_remove(dsl_dataset_t *ds, const char *name, dmu_tx_t *tx,
boolean_t adj_cnt);
void dsl_dataset_set_refreservation_sync_impl(dsl_dataset_t *ds,
zprop_source_t source, uint64_t value, dmu_tx_t *tx);
void dsl_dataset_zapify(dsl_dataset_t *ds, dmu_tx_t *tx);
boolean_t dsl_dataset_is_zapified(dsl_dataset_t *ds);
boolean_t dsl_dataset_has_resume_receive_state(dsl_dataset_t *ds);
int dsl_dataset_rollback_check(void *arg, dmu_tx_t *tx);
void dsl_dataset_rollback_sync(void *arg, dmu_tx_t *tx);
int dsl_dataset_rollback(const char *fsname, const char *tosnap, void *owner,
nvlist_t *result);
uint64_t dsl_dataset_get_remap_deadlist_object(dsl_dataset_t *ds);
void dsl_dataset_create_remap_deadlist(dsl_dataset_t *ds, dmu_tx_t *tx);
boolean_t dsl_dataset_remap_deadlist_exists(dsl_dataset_t *ds);
void dsl_dataset_destroy_remap_deadlist(dsl_dataset_t *ds, dmu_tx_t *tx);
void dsl_dataset_activate_feature(uint64_t dsobj, spa_feature_t f, void *arg,
dmu_tx_t *tx);
void dsl_dataset_deactivate_feature(dsl_dataset_t *ds, spa_feature_t f,
dmu_tx_t *tx);
boolean_t dsl_dataset_feature_is_active(dsl_dataset_t *ds, spa_feature_t f);
boolean_t dsl_dataset_get_uint64_array_feature(dsl_dataset_t *ds,
spa_feature_t f, uint64_t *outlength, uint64_t **outp);
void dsl_dataset_activate_redaction(dsl_dataset_t *ds, uint64_t *redact_snaps,
uint64_t num_redact_snaps, dmu_tx_t *tx);
#ifdef ZFS_DEBUG
#define dprintf_ds(ds, fmt, ...) do { \
if (zfs_flags & ZFS_DEBUG_DPRINTF) { \
char *__ds_name = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP); \
dsl_dataset_name(ds, __ds_name); \
dprintf("ds=%s " fmt, __ds_name, __VA_ARGS__); \
kmem_free(__ds_name, ZFS_MAX_DATASET_NAME_LEN); \
} \
-_NOTE(CONSTCOND) } while (0)
+} while (0)
#else
#define dprintf_ds(dd, fmt, ...)
#endif
#ifdef __cplusplus
}
#endif
#endif /* _SYS_DSL_DATASET_H */
diff --git a/sys/contrib/openzfs/include/sys/dsl_dir.h b/sys/contrib/openzfs/include/sys/dsl_dir.h
index d635b3140423..993e44354475 100644
--- a/sys/contrib/openzfs/include/sys/dsl_dir.h
+++ b/sys/contrib/openzfs/include/sys/dsl_dir.h
@@ -1,230 +1,230 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
* Copyright (c) 2014, Joyent, Inc. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
*/
#ifndef _SYS_DSL_DIR_H
#define _SYS_DSL_DIR_H
#include <sys/dmu.h>
#include <sys/dsl_deadlist.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_synctask.h>
#include <sys/zfs_refcount.h>
#include <sys/zfs_context.h>
#include <sys/dsl_crypt.h>
#include <sys/bplist.h>
#ifdef __cplusplus
extern "C" {
#endif
struct dsl_dataset;
struct zthr;
/*
* DD_FIELD_* are strings that are used in the "extensified" dsl_dir zap object.
* They should be of the format <reverse-dns>:<field>.
*/
#define DD_FIELD_FILESYSTEM_COUNT "com.joyent:filesystem_count"
#define DD_FIELD_SNAPSHOT_COUNT "com.joyent:snapshot_count"
#define DD_FIELD_CRYPTO_KEY_OBJ "com.datto:crypto_key_obj"
#define DD_FIELD_LIVELIST "com.delphix:livelist"
typedef enum dd_used {
DD_USED_HEAD,
DD_USED_SNAP,
DD_USED_CHILD,
DD_USED_CHILD_RSRV,
DD_USED_REFRSRV,
DD_USED_NUM
} dd_used_t;
#define DD_FLAG_USED_BREAKDOWN (1<<0)
typedef struct dsl_dir_phys {
uint64_t dd_creation_time; /* not actually used */
uint64_t dd_head_dataset_obj;
uint64_t dd_parent_obj;
uint64_t dd_origin_obj;
uint64_t dd_child_dir_zapobj;
/*
* how much space our children are accounting for; for leaf
* datasets, == physical space used by fs + snaps
*/
uint64_t dd_used_bytes;
uint64_t dd_compressed_bytes;
uint64_t dd_uncompressed_bytes;
/* Administrative quota setting */
uint64_t dd_quota;
/* Administrative reservation setting */
uint64_t dd_reserved;
uint64_t dd_props_zapobj;
uint64_t dd_deleg_zapobj; /* dataset delegation permissions */
uint64_t dd_flags;
uint64_t dd_used_breakdown[DD_USED_NUM];
uint64_t dd_clones; /* dsl_dir objects */
uint64_t dd_pad[13]; /* pad out to 256 bytes for good measure */
} dsl_dir_phys_t;
struct dsl_dir {
dmu_buf_user_t dd_dbu;
/* These are immutable; no lock needed: */
uint64_t dd_object;
uint64_t dd_crypto_obj;
dsl_pool_t *dd_pool;
/* Stable until user eviction; no lock needed: */
dmu_buf_t *dd_dbuf;
/* protected by lock on pool's dp_dirty_dirs list */
txg_node_t dd_dirty_link;
/* protected by dp_config_rwlock */
dsl_dir_t *dd_parent;
/* Protected by dd_lock */
kmutex_t dd_lock;
list_t dd_props; /* list of dsl_prop_record_t's */
inode_timespec_t dd_snap_cmtime; /* last snapshot namespace change */
uint64_t dd_origin_txg;
/* gross estimate of space used by in-flight tx's */
uint64_t dd_tempreserved[TXG_SIZE];
/* amount of space we expect to write; == amount of dirty data */
int64_t dd_space_towrite[TXG_SIZE];
dsl_deadlist_t dd_livelist;
bplist_t dd_pending_frees;
bplist_t dd_pending_allocs;
kmutex_t dd_activity_lock;
kcondvar_t dd_activity_cv;
boolean_t dd_activity_cancelled;
uint64_t dd_activity_waiters;
/* protected by dd_lock; keep at end of struct for better locality */
char dd_myname[ZFS_MAX_DATASET_NAME_LEN];
};
static inline dsl_dir_phys_t *
dsl_dir_phys(dsl_dir_t *dd)
{
return (dd->dd_dbuf->db_data);
}
void dsl_dir_rele(dsl_dir_t *dd, void *tag);
void dsl_dir_async_rele(dsl_dir_t *dd, void *tag);
int dsl_dir_hold(dsl_pool_t *dp, const char *name, void *tag,
dsl_dir_t **, const char **tail);
int dsl_dir_hold_obj(dsl_pool_t *dp, uint64_t ddobj,
const char *tail, void *tag, dsl_dir_t **);
void dsl_dir_name(dsl_dir_t *dd, char *buf);
int dsl_dir_namelen(dsl_dir_t *dd);
uint64_t dsl_dir_create_sync(dsl_pool_t *dp, dsl_dir_t *pds,
const char *name, dmu_tx_t *tx);
uint64_t dsl_dir_get_used(dsl_dir_t *dd);
uint64_t dsl_dir_get_compressed(dsl_dir_t *dd);
uint64_t dsl_dir_get_quota(dsl_dir_t *dd);
uint64_t dsl_dir_get_reservation(dsl_dir_t *dd);
uint64_t dsl_dir_get_compressratio(dsl_dir_t *dd);
uint64_t dsl_dir_get_logicalused(dsl_dir_t *dd);
uint64_t dsl_dir_get_usedsnap(dsl_dir_t *dd);
uint64_t dsl_dir_get_usedds(dsl_dir_t *dd);
uint64_t dsl_dir_get_usedrefreserv(dsl_dir_t *dd);
uint64_t dsl_dir_get_usedchild(dsl_dir_t *dd);
void dsl_dir_get_origin(dsl_dir_t *dd, char *buf);
int dsl_dir_get_filesystem_count(dsl_dir_t *dd, uint64_t *count);
int dsl_dir_get_snapshot_count(dsl_dir_t *dd, uint64_t *count);
void dsl_dir_stats(dsl_dir_t *dd, nvlist_t *nv);
uint64_t dsl_dir_space_available(dsl_dir_t *dd,
dsl_dir_t *ancestor, int64_t delta, int ondiskonly);
void dsl_dir_dirty(dsl_dir_t *dd, dmu_tx_t *tx);
void dsl_dir_sync(dsl_dir_t *dd, dmu_tx_t *tx);
int dsl_dir_tempreserve_space(dsl_dir_t *dd, uint64_t mem,
uint64_t asize, boolean_t netfree, void **tr_cookiep, dmu_tx_t *tx);
void dsl_dir_tempreserve_clear(void *tr_cookie, dmu_tx_t *tx);
void dsl_dir_willuse_space(dsl_dir_t *dd, int64_t space, dmu_tx_t *tx);
void dsl_dir_diduse_space(dsl_dir_t *dd, dd_used_t type,
int64_t used, int64_t compressed, int64_t uncompressed, dmu_tx_t *tx);
void dsl_dir_transfer_space(dsl_dir_t *dd, int64_t delta,
dd_used_t oldtype, dd_used_t newtype, dmu_tx_t *tx);
void dsl_dir_diduse_transfer_space(dsl_dir_t *dd, int64_t used,
int64_t compressed, int64_t uncompressed, int64_t tonew,
dd_used_t oldtype, dd_used_t newtype, dmu_tx_t *tx);
int dsl_dir_set_quota(const char *ddname, zprop_source_t source,
uint64_t quota);
int dsl_dir_set_reservation(const char *ddname, zprop_source_t source,
uint64_t reservation);
int dsl_dir_activate_fs_ss_limit(const char *);
int dsl_fs_ss_limit_check(dsl_dir_t *, uint64_t, zfs_prop_t, dsl_dir_t *,
cred_t *, proc_t *);
void dsl_fs_ss_count_adjust(dsl_dir_t *, int64_t, const char *, dmu_tx_t *);
int dsl_dir_rename(const char *oldname, const char *newname);
int dsl_dir_transfer_possible(dsl_dir_t *sdd, dsl_dir_t *tdd,
uint64_t fs_cnt, uint64_t ss_cnt, uint64_t space, cred_t *, proc_t *);
boolean_t dsl_dir_is_clone(dsl_dir_t *dd);
void dsl_dir_new_refreservation(dsl_dir_t *dd, struct dsl_dataset *ds,
uint64_t reservation, cred_t *cr, dmu_tx_t *tx);
void dsl_dir_snap_cmtime_update(dsl_dir_t *dd);
inode_timespec_t dsl_dir_snap_cmtime(dsl_dir_t *dd);
void dsl_dir_set_reservation_sync_impl(dsl_dir_t *dd, uint64_t value,
dmu_tx_t *tx);
void dsl_dir_zapify(dsl_dir_t *dd, dmu_tx_t *tx);
boolean_t dsl_dir_is_zapified(dsl_dir_t *dd);
void dsl_dir_livelist_open(dsl_dir_t *dd, uint64_t obj);
void dsl_dir_livelist_close(dsl_dir_t *dd);
void dsl_dir_remove_livelist(dsl_dir_t *dd, dmu_tx_t *tx, boolean_t total);
int dsl_dir_wait(dsl_dir_t *dd, dsl_dataset_t *ds, zfs_wait_activity_t activity,
boolean_t *waited);
void dsl_dir_cancel_waiters(dsl_dir_t *dd);
/* internal reserved dir name */
#define MOS_DIR_NAME "$MOS"
#define ORIGIN_DIR_NAME "$ORIGIN"
#define FREE_DIR_NAME "$FREE"
#define LEAK_DIR_NAME "$LEAK"
#ifdef ZFS_DEBUG
#define dprintf_dd(dd, fmt, ...) do { \
if (zfs_flags & ZFS_DEBUG_DPRINTF) { \
char *__ds_name = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP); \
dsl_dir_name(dd, __ds_name); \
dprintf("dd=%s " fmt, __ds_name, __VA_ARGS__); \
kmem_free(__ds_name, ZFS_MAX_DATASET_NAME_LEN); \
} \
-_NOTE(CONSTCOND) } while (0)
+} while (0)
#else
#define dprintf_dd(dd, fmt, ...)
#endif
#ifdef __cplusplus
}
#endif
#endif /* _SYS_DSL_DIR_H */
diff --git a/sys/contrib/openzfs/include/sys/note.h b/sys/contrib/openzfs/include/sys/note.h
deleted file mode 100644
index 33b5476686ea..000000000000
--- a/sys/contrib/openzfs/include/sys/note.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright (c) 1994 by Sun Microsystems, Inc.
- */
-
-/*
- * sys/note.h: interface for annotating source with info for tools
- *
- * This is the underlying interface; NOTE (/usr/include/note.h) is the
- * preferred interface, but all exported header files should include this
- * file directly and use _NOTE so as not to take "NOTE" from the user's
- * namespace. For consistency, *all* kernel source should use _NOTE.
- *
- * By default, annotations expand to nothing. This file implements
- * that. Tools using annotations will interpose a different version
- * of this file that will expand annotations as needed.
- */
-
-#ifndef _SYS_NOTE_H
-#define _SYS_NOTE_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#ifndef _NOTE
-#define _NOTE(s)
-#endif
-
-#define NOTE(s) _NOTE(s)
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _SYS_NOTE_H */
diff --git a/sys/contrib/openzfs/include/sys/spa.h b/sys/contrib/openzfs/include/sys/spa.h
index 08eba250d3a3..f811d6f5a743 100644
--- a/sys/contrib/openzfs/include/sys/spa.h
+++ b/sys/contrib/openzfs/include/sys/spa.h
@@ -1,1209 +1,1211 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2021 by Delphix. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright 2013 Saso Kiselkov. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2017 Joyent, Inc.
* Copyright (c) 2017, 2019, Datto Inc. All rights reserved.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2019, Allan Jude
* Copyright (c) 2019, Klara Inc.
*/
#ifndef _SYS_SPA_H
#define _SYS_SPA_H
#include <sys/avl.h>
#include <sys/zfs_context.h>
#include <sys/kstat.h>
#include <sys/nvpair.h>
#include <sys/sysmacros.h>
#include <sys/types.h>
#include <sys/fs/zfs.h>
#include <sys/spa_checksum.h>
#include <sys/dmu.h>
#include <sys/space_map.h>
#include <sys/bitops.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Forward references that lots of things need.
*/
typedef struct spa spa_t;
typedef struct vdev vdev_t;
typedef struct metaslab metaslab_t;
typedef struct metaslab_group metaslab_group_t;
typedef struct metaslab_class metaslab_class_t;
typedef struct zio zio_t;
typedef struct zilog zilog_t;
typedef struct spa_aux_vdev spa_aux_vdev_t;
typedef struct ddt ddt_t;
typedef struct ddt_entry ddt_entry_t;
typedef struct zbookmark_phys zbookmark_phys_t;
struct bpobj;
struct bplist;
struct dsl_pool;
struct dsl_dataset;
struct dsl_crypto_params;
/*
* Alignment Shift (ashift) is an immutable, internal top-level vdev property
* which can only be set at vdev creation time. Physical writes are always done
* according to it, which makes 2^ashift the smallest possible IO on a vdev.
*
* We currently allow values ranging from 512 bytes (2^9 = 512) to 64 KiB
* (2^16 = 65,536).
*/
#define ASHIFT_MIN 9
#define ASHIFT_MAX 16
/*
* Size of block to hold the configuration data (a packed nvlist)
*/
#define SPA_CONFIG_BLOCKSIZE (1ULL << 14)
/*
* The DVA size encodings for LSIZE and PSIZE support blocks up to 32MB.
* The ASIZE encoding should be at least 64 times larger (6 more bits)
* to support up to 4-way RAID-Z mirror mode with worst-case gang block
* overhead, three DVAs per bp, plus one more bit in case we do anything
* else that expands the ASIZE.
*/
#define SPA_LSIZEBITS 16 /* LSIZE up to 32M (2^16 * 512) */
#define SPA_PSIZEBITS 16 /* PSIZE up to 32M (2^16 * 512) */
#define SPA_ASIZEBITS 24 /* ASIZE up to 64 times larger */
#define SPA_COMPRESSBITS 7
#define SPA_VDEVBITS 24
#define SPA_COMPRESSMASK ((1U << SPA_COMPRESSBITS) - 1)
/*
* All SPA data is represented by 128-bit data virtual addresses (DVAs).
* The members of the dva_t should be considered opaque outside the SPA.
*/
typedef struct dva {
uint64_t dva_word[2];
} dva_t;
/*
* Some checksums/hashes need a 256-bit initialization salt. This salt is kept
* secret and is suitable for use in MAC algorithms as the key.
*/
typedef struct zio_cksum_salt {
uint8_t zcs_bytes[32];
} zio_cksum_salt_t;
/*
* Each block is described by its DVAs, time of birth, checksum, etc.
* The word-by-word, bit-by-bit layout of the blkptr is as follows:
*
* 64 56 48 40 32 24 16 8 0
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 0 | pad | vdev1 | GRID | ASIZE |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 1 |G| offset1 |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 2 | pad | vdev2 | GRID | ASIZE |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 3 |G| offset2 |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 4 | pad | vdev3 | GRID | ASIZE |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 5 |G| offset3 |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 6 |BDX|lvl| type | cksum |E| comp| PSIZE | LSIZE |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 7 | padding |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 8 | padding |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 9 | physical birth txg |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* a | logical birth txg |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* b | fill count |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* c | checksum[0] |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* d | checksum[1] |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* e | checksum[2] |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* f | checksum[3] |
* +-------+-------+-------+-------+-------+-------+-------+-------+
*
* Legend:
*
* vdev virtual device ID
* offset offset into virtual device
* LSIZE logical size
* PSIZE physical size (after compression)
* ASIZE allocated size (including RAID-Z parity and gang block headers)
* GRID RAID-Z layout information (reserved for future use)
* cksum checksum function
* comp compression function
* G gang block indicator
* B byteorder (endianness)
* D dedup
* X encryption
* E blkptr_t contains embedded data (see below)
* lvl level of indirection
* type DMU object type
* phys birth txg when dva[0] was written; zero if same as logical birth txg
* note that typically all the dva's would be written in this
* txg, but they could be different if they were moved by
* device removal.
* log. birth transaction group in which the block was logically born
* fill count number of non-zero blocks under this bp
* checksum[4] 256-bit checksum of the data this bp describes
*/
/*
* The blkptr_t's of encrypted blocks also need to store the encryption
* parameters so that the block can be decrypted. This layout is as follows:
*
* 64 56 48 40 32 24 16 8 0
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 0 | vdev1 | GRID | ASIZE |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 1 |G| offset1 |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 2 | vdev2 | GRID | ASIZE |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 3 |G| offset2 |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 4 | salt |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 5 | IV1 |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 6 |BDX|lvl| type | cksum |E| comp| PSIZE | LSIZE |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 7 | padding |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 8 | padding |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 9 | physical birth txg |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* a | logical birth txg |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* b | IV2 | fill count |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* c | checksum[0] |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* d | checksum[1] |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* e | MAC[0] |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* f | MAC[1] |
* +-------+-------+-------+-------+-------+-------+-------+-------+
*
* Legend:
*
* salt Salt for generating encryption keys
* IV1 First 64 bits of encryption IV
* X Block requires encryption handling (set to 1)
* E blkptr_t contains embedded data (set to 0, see below)
* fill count number of non-zero blocks under this bp (truncated to 32 bits)
* IV2 Last 32 bits of encryption IV
* checksum[2] 128-bit checksum of the data this bp describes
* MAC[2] 128-bit message authentication code for this data
*
* The X bit being set indicates that this block is one of 3 types. If this is
* a level 0 block with an encrypted object type, the block is encrypted
* (see BP_IS_ENCRYPTED()). If this is a level 0 block with an unencrypted
* object type, this block is authenticated with an HMAC (see
* BP_IS_AUTHENTICATED()). Otherwise (if level > 0), this bp will use the MAC
* words to store a checksum-of-MACs from the level below (see
* BP_HAS_INDIRECT_MAC_CKSUM()). For convenience in the code, BP_IS_PROTECTED()
* refers to both encrypted and authenticated blocks and BP_USES_CRYPT()
* refers to any of these 3 kinds of blocks.
*
* The additional encryption parameters are the salt, IV, and MAC which are
* explained in greater detail in the block comment at the top of zio_crypt.c.
* The MAC occupies half of the checksum space since it serves a very similar
* purpose: to prevent data corruption on disk. The only functional difference
* is that the checksum is used to detect on-disk corruption whether or not the
* encryption key is loaded and the MAC provides additional protection against
* malicious disk tampering. We use the 3rd DVA to store the salt and first
* 64 bits of the IV. As a result encrypted blocks can only have 2 copies
* maximum instead of the normal 3. The last 32 bits of the IV are stored in
* the upper bits of what is usually the fill count. Note that only blocks at
* level 0 or -2 are ever encrypted, which allows us to guarantee that these
* 32 bits are not trampled over by other code (see zio_crypt.c for details).
* The salt and IV are not used for authenticated bps or bps with an indirect
* MAC checksum, so these blocks can utilize all 3 DVAs and the full 64 bits
* for the fill count.
*/
/*
* "Embedded" blkptr_t's don't actually point to a block, instead they
* have a data payload embedded in the blkptr_t itself. See the comment
* in blkptr.c for more details.
*
* The blkptr_t is laid out as follows:
*
* 64 56 48 40 32 24 16 8 0
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 0 | payload |
* 1 | payload |
* 2 | payload |
* 3 | payload |
* 4 | payload |
* 5 | payload |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 6 |BDX|lvl| type | etype |E| comp| PSIZE| LSIZE |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 7 | payload |
* 8 | payload |
* 9 | payload |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* a | logical birth txg |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* b | payload |
* c | payload |
* d | payload |
* e | payload |
* f | payload |
* +-------+-------+-------+-------+-------+-------+-------+-------+
*
* Legend:
*
* payload contains the embedded data
* B (byteorder) byteorder (endianness)
* D (dedup) padding (set to zero)
* X encryption (set to zero)
* E (embedded) set to one
* lvl indirection level
* type DMU object type
* etype how to interpret embedded data (BP_EMBEDDED_TYPE_*)
* comp compression function of payload
* PSIZE size of payload after compression, in bytes
* LSIZE logical size of payload, in bytes
* note that 25 bits is enough to store the largest
* "normal" BP's LSIZE (2^16 * 2^9) in bytes
* log. birth transaction group in which the block was logically born
*
* Note that LSIZE and PSIZE are stored in bytes, whereas for non-embedded
* bp's they are stored in units of SPA_MINBLOCKSHIFT.
* Generally, the generic BP_GET_*() macros can be used on embedded BP's.
* The B, D, X, lvl, type, and comp fields are stored the same as with normal
* BP's so the BP_SET_* macros can be used with them. etype, PSIZE, LSIZE must
* be set with the BPE_SET_* macros. BP_SET_EMBEDDED() should be called before
* other macros, as they assert that they are only used on BP's of the correct
* "embedded-ness". Encrypted blkptr_t's cannot be embedded because they use
* the payload space for encryption parameters (see the comment above on
* how encryption parameters are stored).
*/
#define BPE_GET_ETYPE(bp) \
(ASSERT(BP_IS_EMBEDDED(bp)), \
BF64_GET((bp)->blk_prop, 40, 8))
#define BPE_SET_ETYPE(bp, t) do { \
ASSERT(BP_IS_EMBEDDED(bp)); \
BF64_SET((bp)->blk_prop, 40, 8, t); \
-_NOTE(CONSTCOND) } while (0)
+} while (0)
#define BPE_GET_LSIZE(bp) \
(ASSERT(BP_IS_EMBEDDED(bp)), \
BF64_GET_SB((bp)->blk_prop, 0, 25, 0, 1))
#define BPE_SET_LSIZE(bp, x) do { \
ASSERT(BP_IS_EMBEDDED(bp)); \
BF64_SET_SB((bp)->blk_prop, 0, 25, 0, 1, x); \
-_NOTE(CONSTCOND) } while (0)
+} while (0)
#define BPE_GET_PSIZE(bp) \
(ASSERT(BP_IS_EMBEDDED(bp)), \
BF64_GET_SB((bp)->blk_prop, 25, 7, 0, 1))
#define BPE_SET_PSIZE(bp, x) do { \
ASSERT(BP_IS_EMBEDDED(bp)); \
BF64_SET_SB((bp)->blk_prop, 25, 7, 0, 1, x); \
-_NOTE(CONSTCOND) } while (0)
+} while (0)
typedef enum bp_embedded_type {
BP_EMBEDDED_TYPE_DATA,
BP_EMBEDDED_TYPE_RESERVED, /* Reserved for Delphix byteswap feature. */
BP_EMBEDDED_TYPE_REDACTED,
NUM_BP_EMBEDDED_TYPES
} bp_embedded_type_t;
#define BPE_NUM_WORDS 14
#define BPE_PAYLOAD_SIZE (BPE_NUM_WORDS * sizeof (uint64_t))
#define BPE_IS_PAYLOADWORD(bp, wp) \
((wp) != &(bp)->blk_prop && (wp) != &(bp)->blk_birth)
#define SPA_BLKPTRSHIFT 7 /* blkptr_t is 128 bytes */
#define SPA_DVAS_PER_BP 3 /* Number of DVAs in a bp */
#define SPA_SYNC_MIN_VDEVS 3 /* min vdevs to update during sync */
/*
* A block is a hole when it has either 1) never been written to, or
* 2) is zero-filled. In both cases, ZFS can return all zeroes for all reads
* without physically allocating disk space. Holes are represented in the
* blkptr_t structure by zeroed blk_dva. Correct checking for holes is
* done through the BP_IS_HOLE macro. For holes, the logical size, level,
* DMU object type, and birth times are all also stored for holes that
* were written to at some point (i.e. were punched after having been filled).
*/
typedef struct blkptr {
dva_t blk_dva[SPA_DVAS_PER_BP]; /* Data Virtual Addresses */
uint64_t blk_prop; /* size, compression, type, etc */
uint64_t blk_pad[2]; /* Extra space for the future */
uint64_t blk_phys_birth; /* txg when block was allocated */
uint64_t blk_birth; /* transaction group at birth */
uint64_t blk_fill; /* fill count */
zio_cksum_t blk_cksum; /* 256-bit checksum */
} blkptr_t;
/*
* Macros to get and set fields in a bp or DVA.
*/
/*
* Note, for gang blocks, DVA_GET_ASIZE() is the total space allocated for
* this gang DVA including its children BP's. The space allocated at this
* DVA's vdev/offset is vdev_gang_header_asize(vdev).
*/
#define DVA_GET_ASIZE(dva) \
BF64_GET_SB((dva)->dva_word[0], 0, SPA_ASIZEBITS, SPA_MINBLOCKSHIFT, 0)
#define DVA_SET_ASIZE(dva, x) \
BF64_SET_SB((dva)->dva_word[0], 0, SPA_ASIZEBITS, \
SPA_MINBLOCKSHIFT, 0, x)
#define DVA_GET_GRID(dva) BF64_GET((dva)->dva_word[0], 24, 8)
#define DVA_SET_GRID(dva, x) BF64_SET((dva)->dva_word[0], 24, 8, x)
#define DVA_GET_VDEV(dva) BF64_GET((dva)->dva_word[0], 32, SPA_VDEVBITS)
#define DVA_SET_VDEV(dva, x) \
BF64_SET((dva)->dva_word[0], 32, SPA_VDEVBITS, x)
#define DVA_GET_OFFSET(dva) \
BF64_GET_SB((dva)->dva_word[1], 0, 63, SPA_MINBLOCKSHIFT, 0)
#define DVA_SET_OFFSET(dva, x) \
BF64_SET_SB((dva)->dva_word[1], 0, 63, SPA_MINBLOCKSHIFT, 0, x)
#define DVA_GET_GANG(dva) BF64_GET((dva)->dva_word[1], 63, 1)
#define DVA_SET_GANG(dva, x) BF64_SET((dva)->dva_word[1], 63, 1, x)
#define BP_GET_LSIZE(bp) \
(BP_IS_EMBEDDED(bp) ? \
(BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA ? BPE_GET_LSIZE(bp) : 0): \
BF64_GET_SB((bp)->blk_prop, 0, SPA_LSIZEBITS, SPA_MINBLOCKSHIFT, 1))
#define BP_SET_LSIZE(bp, x) do { \
ASSERT(!BP_IS_EMBEDDED(bp)); \
BF64_SET_SB((bp)->blk_prop, \
0, SPA_LSIZEBITS, SPA_MINBLOCKSHIFT, 1, x); \
-_NOTE(CONSTCOND) } while (0)
+} while (0)
#define BP_GET_PSIZE(bp) \
(BP_IS_EMBEDDED(bp) ? 0 : \
BF64_GET_SB((bp)->blk_prop, 16, SPA_PSIZEBITS, SPA_MINBLOCKSHIFT, 1))
#define BP_SET_PSIZE(bp, x) do { \
ASSERT(!BP_IS_EMBEDDED(bp)); \
BF64_SET_SB((bp)->blk_prop, \
16, SPA_PSIZEBITS, SPA_MINBLOCKSHIFT, 1, x); \
-_NOTE(CONSTCOND) } while (0)
+} while (0)
#define BP_GET_COMPRESS(bp) \
BF64_GET((bp)->blk_prop, 32, SPA_COMPRESSBITS)
#define BP_SET_COMPRESS(bp, x) \
BF64_SET((bp)->blk_prop, 32, SPA_COMPRESSBITS, x)
#define BP_IS_EMBEDDED(bp) BF64_GET((bp)->blk_prop, 39, 1)
#define BP_SET_EMBEDDED(bp, x) BF64_SET((bp)->blk_prop, 39, 1, x)
#define BP_GET_CHECKSUM(bp) \
(BP_IS_EMBEDDED(bp) ? ZIO_CHECKSUM_OFF : \
BF64_GET((bp)->blk_prop, 40, 8))
#define BP_SET_CHECKSUM(bp, x) do { \
ASSERT(!BP_IS_EMBEDDED(bp)); \
BF64_SET((bp)->blk_prop, 40, 8, x); \
-_NOTE(CONSTCOND) } while (0)
+} while (0)
#define BP_GET_TYPE(bp) BF64_GET((bp)->blk_prop, 48, 8)
#define BP_SET_TYPE(bp, x) BF64_SET((bp)->blk_prop, 48, 8, x)
#define BP_GET_LEVEL(bp) BF64_GET((bp)->blk_prop, 56, 5)
#define BP_SET_LEVEL(bp, x) BF64_SET((bp)->blk_prop, 56, 5, x)
/* encrypted, authenticated, and MAC cksum bps use the same bit */
#define BP_USES_CRYPT(bp) BF64_GET((bp)->blk_prop, 61, 1)
#define BP_SET_CRYPT(bp, x) BF64_SET((bp)->blk_prop, 61, 1, x)
#define BP_IS_ENCRYPTED(bp) \
(BP_USES_CRYPT(bp) && \
BP_GET_LEVEL(bp) <= 0 && \
DMU_OT_IS_ENCRYPTED(BP_GET_TYPE(bp)))
#define BP_IS_AUTHENTICATED(bp) \
(BP_USES_CRYPT(bp) && \
BP_GET_LEVEL(bp) <= 0 && \
!DMU_OT_IS_ENCRYPTED(BP_GET_TYPE(bp)))
#define BP_HAS_INDIRECT_MAC_CKSUM(bp) \
(BP_USES_CRYPT(bp) && BP_GET_LEVEL(bp) > 0)
#define BP_IS_PROTECTED(bp) \
(BP_IS_ENCRYPTED(bp) || BP_IS_AUTHENTICATED(bp))
#define BP_GET_DEDUP(bp) BF64_GET((bp)->blk_prop, 62, 1)
#define BP_SET_DEDUP(bp, x) BF64_SET((bp)->blk_prop, 62, 1, x)
#define BP_GET_BYTEORDER(bp) BF64_GET((bp)->blk_prop, 63, 1)
#define BP_SET_BYTEORDER(bp, x) BF64_SET((bp)->blk_prop, 63, 1, x)
#define BP_GET_FREE(bp) BF64_GET((bp)->blk_fill, 0, 1)
#define BP_SET_FREE(bp, x) BF64_SET((bp)->blk_fill, 0, 1, x)
#define BP_PHYSICAL_BIRTH(bp) \
(BP_IS_EMBEDDED(bp) ? 0 : \
(bp)->blk_phys_birth ? (bp)->blk_phys_birth : (bp)->blk_birth)
#define BP_SET_BIRTH(bp, logical, physical) \
{ \
ASSERT(!BP_IS_EMBEDDED(bp)); \
(bp)->blk_birth = (logical); \
(bp)->blk_phys_birth = ((logical) == (physical) ? 0 : (physical)); \
}
#define BP_GET_FILL(bp) \
((BP_IS_ENCRYPTED(bp)) ? BF64_GET((bp)->blk_fill, 0, 32) : \
((BP_IS_EMBEDDED(bp)) ? 1 : (bp)->blk_fill))
#define BP_SET_FILL(bp, fill) \
{ \
if (BP_IS_ENCRYPTED(bp)) \
BF64_SET((bp)->blk_fill, 0, 32, fill); \
else \
(bp)->blk_fill = fill; \
}
#define BP_GET_IV2(bp) \
(ASSERT(BP_IS_ENCRYPTED(bp)), \
BF64_GET((bp)->blk_fill, 32, 32))
#define BP_SET_IV2(bp, iv2) \
{ \
ASSERT(BP_IS_ENCRYPTED(bp)); \
BF64_SET((bp)->blk_fill, 32, 32, iv2); \
}
#define BP_IS_METADATA(bp) \
(BP_GET_LEVEL(bp) > 0 || DMU_OT_IS_METADATA(BP_GET_TYPE(bp)))
#define BP_GET_ASIZE(bp) \
(BP_IS_EMBEDDED(bp) ? 0 : \
DVA_GET_ASIZE(&(bp)->blk_dva[0]) + \
DVA_GET_ASIZE(&(bp)->blk_dva[1]) + \
(DVA_GET_ASIZE(&(bp)->blk_dva[2]) * !BP_IS_ENCRYPTED(bp)))
#define BP_GET_UCSIZE(bp) \
(BP_IS_METADATA(bp) ? BP_GET_PSIZE(bp) : BP_GET_LSIZE(bp))
#define BP_GET_NDVAS(bp) \
(BP_IS_EMBEDDED(bp) ? 0 : \
!!DVA_GET_ASIZE(&(bp)->blk_dva[0]) + \
!!DVA_GET_ASIZE(&(bp)->blk_dva[1]) + \
(!!DVA_GET_ASIZE(&(bp)->blk_dva[2]) * !BP_IS_ENCRYPTED(bp)))
#define BP_COUNT_GANG(bp) \
(BP_IS_EMBEDDED(bp) ? 0 : \
(DVA_GET_GANG(&(bp)->blk_dva[0]) + \
DVA_GET_GANG(&(bp)->blk_dva[1]) + \
(DVA_GET_GANG(&(bp)->blk_dva[2]) * !BP_IS_ENCRYPTED(bp))))
#define DVA_EQUAL(dva1, dva2) \
((dva1)->dva_word[1] == (dva2)->dva_word[1] && \
(dva1)->dva_word[0] == (dva2)->dva_word[0])
#define BP_EQUAL(bp1, bp2) \
(BP_PHYSICAL_BIRTH(bp1) == BP_PHYSICAL_BIRTH(bp2) && \
(bp1)->blk_birth == (bp2)->blk_birth && \
DVA_EQUAL(&(bp1)->blk_dva[0], &(bp2)->blk_dva[0]) && \
DVA_EQUAL(&(bp1)->blk_dva[1], &(bp2)->blk_dva[1]) && \
DVA_EQUAL(&(bp1)->blk_dva[2], &(bp2)->blk_dva[2]))
#define DVA_IS_VALID(dva) (DVA_GET_ASIZE(dva) != 0)
#define BP_IDENTITY(bp) (ASSERT(!BP_IS_EMBEDDED(bp)), &(bp)->blk_dva[0])
#define BP_IS_GANG(bp) \
(BP_IS_EMBEDDED(bp) ? B_FALSE : DVA_GET_GANG(BP_IDENTITY(bp)))
#define DVA_IS_EMPTY(dva) ((dva)->dva_word[0] == 0ULL && \
(dva)->dva_word[1] == 0ULL)
#define BP_IS_HOLE(bp) \
(!BP_IS_EMBEDDED(bp) && DVA_IS_EMPTY(BP_IDENTITY(bp)))
#define BP_SET_REDACTED(bp) \
{ \
BP_SET_EMBEDDED(bp, B_TRUE); \
BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_REDACTED); \
}
#define BP_IS_REDACTED(bp) \
(BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_REDACTED)
/* BP_IS_RAIDZ(bp) assumes no block compression */
#define BP_IS_RAIDZ(bp) (DVA_GET_ASIZE(&(bp)->blk_dva[0]) > \
BP_GET_PSIZE(bp))
#define BP_ZERO(bp) \
{ \
(bp)->blk_dva[0].dva_word[0] = 0; \
(bp)->blk_dva[0].dva_word[1] = 0; \
(bp)->blk_dva[1].dva_word[0] = 0; \
(bp)->blk_dva[1].dva_word[1] = 0; \
(bp)->blk_dva[2].dva_word[0] = 0; \
(bp)->blk_dva[2].dva_word[1] = 0; \
(bp)->blk_prop = 0; \
(bp)->blk_pad[0] = 0; \
(bp)->blk_pad[1] = 0; \
(bp)->blk_phys_birth = 0; \
(bp)->blk_birth = 0; \
(bp)->blk_fill = 0; \
ZIO_SET_CHECKSUM(&(bp)->blk_cksum, 0, 0, 0, 0); \
}
#ifdef _ZFS_BIG_ENDIAN
#define ZFS_HOST_BYTEORDER (0ULL)
#else
#define ZFS_HOST_BYTEORDER (1ULL)
#endif
#define BP_SHOULD_BYTESWAP(bp) (BP_GET_BYTEORDER(bp) != ZFS_HOST_BYTEORDER)
#define BP_SPRINTF_LEN 400
/*
* This macro allows code sharing between zfs, libzpool, and mdb.
* 'func' is either snprintf() or mdb_snprintf().
* 'ws' (whitespace) can be ' ' for single-line format, '\n' for multi-line.
*/
#define SNPRINTF_BLKPTR(func, ws, buf, size, bp, type, checksum, compress) \
{ \
static const char *copyname[] = \
{ "zero", "single", "double", "triple" }; \
int len = 0; \
int copies = 0; \
const char *crypt_type; \
if (bp != NULL) { \
if (BP_IS_ENCRYPTED(bp)) { \
crypt_type = "encrypted"; \
/* LINTED E_SUSPICIOUS_COMPARISON */ \
} else if (BP_IS_AUTHENTICATED(bp)) { \
crypt_type = "authenticated"; \
} else if (BP_HAS_INDIRECT_MAC_CKSUM(bp)) { \
crypt_type = "indirect-MAC"; \
} else { \
crypt_type = "unencrypted"; \
} \
} \
if (bp == NULL) { \
len += func(buf + len, size - len, "<NULL>"); \
} else if (BP_IS_HOLE(bp)) { \
len += func(buf + len, size - len, \
"HOLE [L%llu %s] " \
"size=%llxL birth=%lluL", \
(u_longlong_t)BP_GET_LEVEL(bp), \
type, \
(u_longlong_t)BP_GET_LSIZE(bp), \
(u_longlong_t)bp->blk_birth); \
} else if (BP_IS_EMBEDDED(bp)) { \
len = func(buf + len, size - len, \
"EMBEDDED [L%llu %s] et=%u %s " \
"size=%llxL/%llxP birth=%lluL", \
(u_longlong_t)BP_GET_LEVEL(bp), \
type, \
(int)BPE_GET_ETYPE(bp), \
compress, \
(u_longlong_t)BPE_GET_LSIZE(bp), \
(u_longlong_t)BPE_GET_PSIZE(bp), \
(u_longlong_t)bp->blk_birth); \
} else if (BP_IS_REDACTED(bp)) { \
len += func(buf + len, size - len, \
"REDACTED [L%llu %s] size=%llxL birth=%lluL", \
(u_longlong_t)BP_GET_LEVEL(bp), \
type, \
(u_longlong_t)BP_GET_LSIZE(bp), \
(u_longlong_t)bp->blk_birth); \
} else { \
for (int d = 0; d < BP_GET_NDVAS(bp); d++) { \
const dva_t *dva = &bp->blk_dva[d]; \
if (DVA_IS_VALID(dva)) \
copies++; \
len += func(buf + len, size - len, \
"DVA[%d]=<%llu:%llx:%llx>%c", d, \
(u_longlong_t)DVA_GET_VDEV(dva), \
(u_longlong_t)DVA_GET_OFFSET(dva), \
(u_longlong_t)DVA_GET_ASIZE(dva), \
ws); \
} \
if (BP_IS_ENCRYPTED(bp)) { \
len += func(buf + len, size - len, \
"salt=%llx iv=%llx:%llx%c", \
(u_longlong_t)bp->blk_dva[2].dva_word[0], \
(u_longlong_t)bp->blk_dva[2].dva_word[1], \
(u_longlong_t)BP_GET_IV2(bp), \
ws); \
} \
if (BP_IS_GANG(bp) && \
DVA_GET_ASIZE(&bp->blk_dva[2]) <= \
DVA_GET_ASIZE(&bp->blk_dva[1]) / 2) \
copies--; \
len += func(buf + len, size - len, \
"[L%llu %s] %s %s %s %s %s %s %s%c" \
"size=%llxL/%llxP birth=%lluL/%lluP fill=%llu%c" \
"cksum=%llx:%llx:%llx:%llx", \
(u_longlong_t)BP_GET_LEVEL(bp), \
type, \
checksum, \
compress, \
crypt_type, \
BP_GET_BYTEORDER(bp) == 0 ? "BE" : "LE", \
BP_IS_GANG(bp) ? "gang" : "contiguous", \
BP_GET_DEDUP(bp) ? "dedup" : "unique", \
copyname[copies], \
ws, \
(u_longlong_t)BP_GET_LSIZE(bp), \
(u_longlong_t)BP_GET_PSIZE(bp), \
(u_longlong_t)bp->blk_birth, \
(u_longlong_t)BP_PHYSICAL_BIRTH(bp), \
(u_longlong_t)BP_GET_FILL(bp), \
ws, \
(u_longlong_t)bp->blk_cksum.zc_word[0], \
(u_longlong_t)bp->blk_cksum.zc_word[1], \
(u_longlong_t)bp->blk_cksum.zc_word[2], \
(u_longlong_t)bp->blk_cksum.zc_word[3]); \
} \
ASSERT(len < size); \
}
#define BP_GET_BUFC_TYPE(bp) \
(BP_IS_METADATA(bp) ? ARC_BUFC_METADATA : ARC_BUFC_DATA)
typedef enum spa_import_type {
SPA_IMPORT_EXISTING,
SPA_IMPORT_ASSEMBLE
} spa_import_type_t;
typedef enum spa_mode {
SPA_MODE_UNINIT = 0,
SPA_MODE_READ = 1,
SPA_MODE_WRITE = 2,
} spa_mode_t;
/*
* Send TRIM commands in-line during normal pool operation while deleting.
* OFF: no
* ON: yes
* NB: IN_FREEBSD_BASE is defined within the FreeBSD sources.
*/
typedef enum {
SPA_AUTOTRIM_OFF = 0, /* default */
SPA_AUTOTRIM_ON,
#ifdef IN_FREEBSD_BASE
SPA_AUTOTRIM_DEFAULT = SPA_AUTOTRIM_ON,
#else
SPA_AUTOTRIM_DEFAULT = SPA_AUTOTRIM_OFF,
#endif
} spa_autotrim_t;
/*
* Reason TRIM command was issued, used internally for accounting purposes.
*/
typedef enum trim_type {
TRIM_TYPE_MANUAL = 0,
TRIM_TYPE_AUTO = 1,
TRIM_TYPE_SIMPLE = 2
} trim_type_t;
/* state manipulation functions */
extern int spa_open(const char *pool, spa_t **, void *tag);
extern int spa_open_rewind(const char *pool, spa_t **, void *tag,
nvlist_t *policy, nvlist_t **config);
extern int spa_get_stats(const char *pool, nvlist_t **config, char *altroot,
size_t buflen);
extern int spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
nvlist_t *zplprops, struct dsl_crypto_params *dcp);
extern int spa_import(char *pool, nvlist_t *config, nvlist_t *props,
uint64_t flags);
extern nvlist_t *spa_tryimport(nvlist_t *tryconfig);
extern int spa_destroy(const char *pool);
extern int spa_checkpoint(const char *pool);
extern int spa_checkpoint_discard(const char *pool);
extern int spa_export(const char *pool, nvlist_t **oldconfig, boolean_t force,
boolean_t hardforce);
extern int spa_reset(const char *pool);
extern void spa_async_request(spa_t *spa, int flag);
extern void spa_async_unrequest(spa_t *spa, int flag);
extern void spa_async_suspend(spa_t *spa);
extern void spa_async_resume(spa_t *spa);
extern int spa_async_tasks(spa_t *spa);
extern spa_t *spa_inject_addref(char *pool);
extern void spa_inject_delref(spa_t *spa);
extern void spa_scan_stat_init(spa_t *spa);
extern int spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps);
extern int bpobj_enqueue_alloc_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx);
extern int bpobj_enqueue_free_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx);
#define SPA_ASYNC_CONFIG_UPDATE 0x01
#define SPA_ASYNC_REMOVE 0x02
#define SPA_ASYNC_PROBE 0x04
#define SPA_ASYNC_RESILVER_DONE 0x08
#define SPA_ASYNC_RESILVER 0x10
#define SPA_ASYNC_AUTOEXPAND 0x20
#define SPA_ASYNC_REMOVE_DONE 0x40
#define SPA_ASYNC_REMOVE_STOP 0x80
#define SPA_ASYNC_INITIALIZE_RESTART 0x100
#define SPA_ASYNC_TRIM_RESTART 0x200
#define SPA_ASYNC_AUTOTRIM_RESTART 0x400
#define SPA_ASYNC_L2CACHE_REBUILD 0x800
#define SPA_ASYNC_L2CACHE_TRIM 0x1000
#define SPA_ASYNC_REBUILD_DONE 0x2000
/* device manipulation */
extern int spa_vdev_add(spa_t *spa, nvlist_t *nvroot);
extern int spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot,
int replacing, int rebuild);
extern int spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid,
int replace_done);
extern int spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare);
extern boolean_t spa_vdev_remove_active(spa_t *spa);
extern int spa_vdev_initialize(spa_t *spa, nvlist_t *nv, uint64_t cmd_type,
nvlist_t *vdev_errlist);
extern int spa_vdev_trim(spa_t *spa, nvlist_t *nv, uint64_t cmd_type,
uint64_t rate, boolean_t partial, boolean_t secure, nvlist_t *vdev_errlist);
extern int spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath);
extern int spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru);
extern int spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config,
nvlist_t *props, boolean_t exp);
/* spare state (which is global across all pools) */
extern void spa_spare_add(vdev_t *vd);
extern void spa_spare_remove(vdev_t *vd);
extern boolean_t spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt);
extern void spa_spare_activate(vdev_t *vd);
/* L2ARC state (which is global across all pools) */
extern void spa_l2cache_add(vdev_t *vd);
extern void spa_l2cache_remove(vdev_t *vd);
extern boolean_t spa_l2cache_exists(uint64_t guid, uint64_t *pool);
extern void spa_l2cache_activate(vdev_t *vd);
extern void spa_l2cache_drop(spa_t *spa);
/* scanning */
extern int spa_scan(spa_t *spa, pool_scan_func_t func);
extern int spa_scan_stop(spa_t *spa);
extern int spa_scrub_pause_resume(spa_t *spa, pool_scrub_cmd_t flag);
/* spa syncing */
extern void spa_sync(spa_t *spa, uint64_t txg); /* only for DMU use */
extern void spa_sync_allpools(void);
extern int zfs_sync_pass_deferred_free;
/* spa namespace global mutex */
extern kmutex_t spa_namespace_lock;
/*
* SPA configuration functions in spa_config.c
*/
#define SPA_CONFIG_UPDATE_POOL 0
#define SPA_CONFIG_UPDATE_VDEVS 1
extern void spa_write_cachefile(spa_t *, boolean_t, boolean_t);
extern void spa_config_load(void);
extern nvlist_t *spa_all_configs(uint64_t *);
extern void spa_config_set(spa_t *spa, nvlist_t *config);
extern nvlist_t *spa_config_generate(spa_t *spa, vdev_t *vd, uint64_t txg,
int getstats);
extern void spa_config_update(spa_t *spa, int what);
extern int spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv,
vdev_t *parent, uint_t id, int atype);
/*
* Miscellaneous SPA routines in spa_misc.c
*/
/* Namespace manipulation */
extern spa_t *spa_lookup(const char *name);
extern spa_t *spa_add(const char *name, nvlist_t *config, const char *altroot);
extern void spa_remove(spa_t *spa);
extern spa_t *spa_next(spa_t *prev);
/* Refcount functions */
extern void spa_open_ref(spa_t *spa, void *tag);
extern void spa_close(spa_t *spa, void *tag);
extern void spa_async_close(spa_t *spa, void *tag);
extern boolean_t spa_refcount_zero(spa_t *spa);
#define SCL_NONE 0x00
#define SCL_CONFIG 0x01
#define SCL_STATE 0x02
#define SCL_L2ARC 0x04 /* hack until L2ARC 2.0 */
#define SCL_ALLOC 0x08
#define SCL_ZIO 0x10
#define SCL_FREE 0x20
#define SCL_VDEV 0x40
#define SCL_LOCKS 7
#define SCL_ALL ((1 << SCL_LOCKS) - 1)
#define SCL_STATE_ALL (SCL_STATE | SCL_L2ARC | SCL_ZIO)
/* Historical pool statistics */
typedef struct spa_history_kstat {
kmutex_t lock;
uint64_t count;
uint64_t size;
kstat_t *kstat;
void *priv;
list_t list;
} spa_history_kstat_t;
typedef struct spa_history_list {
uint64_t size;
procfs_list_t procfs_list;
} spa_history_list_t;
typedef struct spa_stats {
spa_history_list_t read_history;
spa_history_list_t txg_history;
spa_history_kstat_t tx_assign_histogram;
spa_history_list_t mmp_history;
spa_history_kstat_t state; /* pool state */
spa_history_kstat_t iostats;
} spa_stats_t;
typedef enum txg_state {
TXG_STATE_BIRTH = 0,
TXG_STATE_OPEN = 1,
TXG_STATE_QUIESCED = 2,
TXG_STATE_WAIT_FOR_SYNC = 3,
TXG_STATE_SYNCED = 4,
TXG_STATE_COMMITTED = 5,
} txg_state_t;
typedef struct txg_stat {
vdev_stat_t vs1;
vdev_stat_t vs2;
uint64_t txg;
uint64_t ndirty;
} txg_stat_t;
/* Assorted pool IO kstats */
typedef struct spa_iostats {
kstat_named_t trim_extents_written;
kstat_named_t trim_bytes_written;
kstat_named_t trim_extents_skipped;
kstat_named_t trim_bytes_skipped;
kstat_named_t trim_extents_failed;
kstat_named_t trim_bytes_failed;
kstat_named_t autotrim_extents_written;
kstat_named_t autotrim_bytes_written;
kstat_named_t autotrim_extents_skipped;
kstat_named_t autotrim_bytes_skipped;
kstat_named_t autotrim_extents_failed;
kstat_named_t autotrim_bytes_failed;
kstat_named_t simple_trim_extents_written;
kstat_named_t simple_trim_bytes_written;
kstat_named_t simple_trim_extents_skipped;
kstat_named_t simple_trim_bytes_skipped;
kstat_named_t simple_trim_extents_failed;
kstat_named_t simple_trim_bytes_failed;
} spa_iostats_t;
extern void spa_stats_init(spa_t *spa);
extern void spa_stats_destroy(spa_t *spa);
extern void spa_read_history_add(spa_t *spa, const zbookmark_phys_t *zb,
uint32_t aflags);
extern void spa_txg_history_add(spa_t *spa, uint64_t txg, hrtime_t birth_time);
extern int spa_txg_history_set(spa_t *spa, uint64_t txg,
txg_state_t completed_state, hrtime_t completed_time);
extern txg_stat_t *spa_txg_history_init_io(spa_t *, uint64_t,
struct dsl_pool *);
extern void spa_txg_history_fini_io(spa_t *, txg_stat_t *);
extern void spa_tx_assign_add_nsecs(spa_t *spa, uint64_t nsecs);
extern int spa_mmp_history_set_skip(spa_t *spa, uint64_t mmp_kstat_id);
extern int spa_mmp_history_set(spa_t *spa, uint64_t mmp_kstat_id, int io_error,
hrtime_t duration);
extern void spa_mmp_history_add(spa_t *spa, uint64_t txg, uint64_t timestamp,
uint64_t mmp_delay, vdev_t *vd, int label, uint64_t mmp_kstat_id,
int error);
extern void spa_iostats_trim_add(spa_t *spa, trim_type_t type,
uint64_t extents_written, uint64_t bytes_written,
uint64_t extents_skipped, uint64_t bytes_skipped,
uint64_t extents_failed, uint64_t bytes_failed);
extern void spa_import_progress_add(spa_t *spa);
extern void spa_import_progress_remove(uint64_t spa_guid);
extern int spa_import_progress_set_mmp_check(uint64_t pool_guid,
uint64_t mmp_sec_remaining);
extern int spa_import_progress_set_max_txg(uint64_t pool_guid,
uint64_t max_txg);
extern int spa_import_progress_set_state(uint64_t pool_guid,
spa_load_state_t spa_load_state);
/* Pool configuration locks */
extern int spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw);
extern void spa_config_enter(spa_t *spa, int locks, const void *tag, krw_t rw);
extern void spa_config_exit(spa_t *spa, int locks, const void *tag);
extern int spa_config_held(spa_t *spa, int locks, krw_t rw);
/* Pool vdev add/remove lock */
extern uint64_t spa_vdev_enter(spa_t *spa);
extern uint64_t spa_vdev_detach_enter(spa_t *spa, uint64_t guid);
extern uint64_t spa_vdev_config_enter(spa_t *spa);
extern void spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg,
int error, char *tag);
extern int spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error);
/* Pool vdev state change lock */
extern void spa_vdev_state_enter(spa_t *spa, int oplock);
extern int spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error);
/* Log state */
typedef enum spa_log_state {
SPA_LOG_UNKNOWN = 0, /* unknown log state */
SPA_LOG_MISSING, /* missing log(s) */
SPA_LOG_CLEAR, /* clear the log(s) */
SPA_LOG_GOOD, /* log(s) are good */
} spa_log_state_t;
extern spa_log_state_t spa_get_log_state(spa_t *spa);
extern void spa_set_log_state(spa_t *spa, spa_log_state_t state);
extern int spa_reset_logs(spa_t *spa);
/* Log claim callback */
extern void spa_claim_notify(zio_t *zio);
extern void spa_deadman(void *);
/* Accessor functions */
extern boolean_t spa_shutting_down(spa_t *spa);
extern struct dsl_pool *spa_get_dsl(spa_t *spa);
extern boolean_t spa_is_initializing(spa_t *spa);
extern boolean_t spa_indirect_vdevs_loaded(spa_t *spa);
extern blkptr_t *spa_get_rootblkptr(spa_t *spa);
extern void spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp);
extern void spa_altroot(spa_t *, char *, size_t);
extern int spa_sync_pass(spa_t *spa);
extern char *spa_name(spa_t *spa);
extern uint64_t spa_guid(spa_t *spa);
extern uint64_t spa_load_guid(spa_t *spa);
extern uint64_t spa_last_synced_txg(spa_t *spa);
extern uint64_t spa_first_txg(spa_t *spa);
extern uint64_t spa_syncing_txg(spa_t *spa);
extern uint64_t spa_final_dirty_txg(spa_t *spa);
extern uint64_t spa_version(spa_t *spa);
extern pool_state_t spa_state(spa_t *spa);
extern spa_load_state_t spa_load_state(spa_t *spa);
extern uint64_t spa_freeze_txg(spa_t *spa);
extern uint64_t spa_get_worst_case_asize(spa_t *spa, uint64_t lsize);
extern uint64_t spa_get_dspace(spa_t *spa);
extern uint64_t spa_get_checkpoint_space(spa_t *spa);
extern uint64_t spa_get_slop_space(spa_t *spa);
extern void spa_update_dspace(spa_t *spa);
extern uint64_t spa_version(spa_t *spa);
extern boolean_t spa_deflate(spa_t *spa);
extern metaslab_class_t *spa_normal_class(spa_t *spa);
extern metaslab_class_t *spa_log_class(spa_t *spa);
extern metaslab_class_t *spa_embedded_log_class(spa_t *spa);
extern metaslab_class_t *spa_special_class(spa_t *spa);
extern metaslab_class_t *spa_dedup_class(spa_t *spa);
extern metaslab_class_t *spa_preferred_class(spa_t *spa, uint64_t size,
dmu_object_type_t objtype, uint_t level, uint_t special_smallblk);
extern void spa_evicting_os_register(spa_t *, objset_t *os);
extern void spa_evicting_os_deregister(spa_t *, objset_t *os);
extern void spa_evicting_os_wait(spa_t *spa);
extern int spa_max_replication(spa_t *spa);
extern int spa_prev_software_version(spa_t *spa);
extern uint64_t spa_get_failmode(spa_t *spa);
extern uint64_t spa_get_deadman_failmode(spa_t *spa);
extern void spa_set_deadman_failmode(spa_t *spa, const char *failmode);
extern boolean_t spa_suspended(spa_t *spa);
extern uint64_t spa_bootfs(spa_t *spa);
extern uint64_t spa_delegation(spa_t *spa);
extern objset_t *spa_meta_objset(spa_t *spa);
extern space_map_t *spa_syncing_log_sm(spa_t *spa);
extern uint64_t spa_deadman_synctime(spa_t *spa);
extern uint64_t spa_deadman_ziotime(spa_t *spa);
extern uint64_t spa_dirty_data(spa_t *spa);
extern spa_autotrim_t spa_get_autotrim(spa_t *spa);
/* Miscellaneous support routines */
-extern void spa_load_failed(spa_t *spa, const char *fmt, ...);
-extern void spa_load_note(spa_t *spa, const char *fmt, ...);
+extern void spa_load_failed(spa_t *spa, const char *fmt, ...)
+ __attribute__((format(printf, 2, 3)));
+extern void spa_load_note(spa_t *spa, const char *fmt, ...)
+ __attribute__((format(printf, 2, 3)));
extern void spa_activate_mos_feature(spa_t *spa, const char *feature,
dmu_tx_t *tx);
extern void spa_deactivate_mos_feature(spa_t *spa, const char *feature);
extern spa_t *spa_by_guid(uint64_t pool_guid, uint64_t device_guid);
extern boolean_t spa_guid_exists(uint64_t pool_guid, uint64_t device_guid);
extern char *spa_strdup(const char *);
extern void spa_strfree(char *);
extern uint64_t spa_generate_guid(spa_t *spa);
extern void snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp);
extern void spa_freeze(spa_t *spa);
extern int spa_change_guid(spa_t *spa);
extern void spa_upgrade(spa_t *spa, uint64_t version);
extern void spa_evict_all(void);
extern vdev_t *spa_lookup_by_guid(spa_t *spa, uint64_t guid,
boolean_t l2cache);
extern boolean_t spa_has_spare(spa_t *, uint64_t guid);
extern uint64_t dva_get_dsize_sync(spa_t *spa, const dva_t *dva);
extern uint64_t bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp);
extern uint64_t bp_get_dsize(spa_t *spa, const blkptr_t *bp);
extern boolean_t spa_has_slogs(spa_t *spa);
extern boolean_t spa_is_root(spa_t *spa);
extern boolean_t spa_writeable(spa_t *spa);
extern boolean_t spa_has_pending_synctask(spa_t *spa);
extern int spa_maxblocksize(spa_t *spa);
extern int spa_maxdnodesize(spa_t *spa);
extern boolean_t spa_has_checkpoint(spa_t *spa);
extern boolean_t spa_importing_readonly_checkpoint(spa_t *spa);
extern boolean_t spa_suspend_async_destroy(spa_t *spa);
extern uint64_t spa_min_claim_txg(spa_t *spa);
extern boolean_t zfs_dva_valid(spa_t *spa, const dva_t *dva,
const blkptr_t *bp);
typedef void (*spa_remap_cb_t)(uint64_t vdev, uint64_t offset, uint64_t size,
void *arg);
extern boolean_t spa_remap_blkptr(spa_t *spa, blkptr_t *bp,
spa_remap_cb_t callback, void *arg);
extern uint64_t spa_get_last_removal_txg(spa_t *spa);
extern boolean_t spa_trust_config(spa_t *spa);
extern uint64_t spa_missing_tvds_allowed(spa_t *spa);
extern void spa_set_missing_tvds(spa_t *spa, uint64_t missing);
extern boolean_t spa_top_vdevs_spacemap_addressable(spa_t *spa);
extern uint64_t spa_total_metaslabs(spa_t *spa);
extern boolean_t spa_multihost(spa_t *spa);
extern uint32_t spa_get_hostid(spa_t *spa);
extern void spa_activate_allocation_classes(spa_t *, dmu_tx_t *);
extern boolean_t spa_livelist_delete_check(spa_t *spa);
extern spa_mode_t spa_mode(spa_t *spa);
extern uint64_t zfs_strtonum(const char *str, char **nptr);
extern char *spa_his_ievent_table[];
extern void spa_history_create_obj(spa_t *spa, dmu_tx_t *tx);
extern int spa_history_get(spa_t *spa, uint64_t *offset, uint64_t *len_read,
char *his_buf);
extern int spa_history_log(spa_t *spa, const char *his_buf);
extern int spa_history_log_nvl(spa_t *spa, nvlist_t *nvl);
extern void spa_history_log_version(spa_t *spa, const char *operation,
dmu_tx_t *tx);
extern void spa_history_log_internal(spa_t *spa, const char *operation,
dmu_tx_t *tx, const char *fmt, ...) __printflike(4, 5);
extern void spa_history_log_internal_ds(struct dsl_dataset *ds, const char *op,
dmu_tx_t *tx, const char *fmt, ...) __printflike(4, 5);
extern void spa_history_log_internal_dd(dsl_dir_t *dd, const char *operation,
dmu_tx_t *tx, const char *fmt, ...) __printflike(4, 5);
extern const char *spa_state_to_name(spa_t *spa);
/* error handling */
struct zbookmark_phys;
extern void spa_log_error(spa_t *spa, const zbookmark_phys_t *zb);
extern int zfs_ereport_post(const char *clazz, spa_t *spa, vdev_t *vd,
const zbookmark_phys_t *zb, zio_t *zio, uint64_t state);
extern boolean_t zfs_ereport_is_valid(const char *clazz, spa_t *spa, vdev_t *vd,
zio_t *zio);
extern void zfs_ereport_taskq_fini(void);
extern void zfs_ereport_clear(spa_t *spa, vdev_t *vd);
extern nvlist_t *zfs_event_create(spa_t *spa, vdev_t *vd, const char *type,
const char *name, nvlist_t *aux);
extern void zfs_post_remove(spa_t *spa, vdev_t *vd);
extern void zfs_post_state_change(spa_t *spa, vdev_t *vd, uint64_t laststate);
extern void zfs_post_autoreplace(spa_t *spa, vdev_t *vd);
extern uint64_t spa_get_errlog_size(spa_t *spa);
extern int spa_get_errlog(spa_t *spa, void *uaddr, size_t *count);
extern void spa_errlog_rotate(spa_t *spa);
extern void spa_errlog_drain(spa_t *spa);
extern void spa_errlog_sync(spa_t *spa, uint64_t txg);
extern void spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub);
/* vdev cache */
extern void vdev_cache_stat_init(void);
extern void vdev_cache_stat_fini(void);
/* vdev mirror */
extern void vdev_mirror_stat_init(void);
extern void vdev_mirror_stat_fini(void);
/* Initialization and termination */
extern void spa_init(spa_mode_t mode);
extern void spa_fini(void);
extern void spa_boot_init(void);
/* properties */
extern int spa_prop_set(spa_t *spa, nvlist_t *nvp);
extern int spa_prop_get(spa_t *spa, nvlist_t **nvp);
extern void spa_prop_clear_bootfs(spa_t *spa, uint64_t obj, dmu_tx_t *tx);
extern void spa_configfile_set(spa_t *, nvlist_t *, boolean_t);
/* asynchronous event notification */
extern void spa_event_notify(spa_t *spa, vdev_t *vdev, nvlist_t *hist_nvl,
const char *name);
/* waiting for pool activities to complete */
extern int spa_wait(const char *pool, zpool_wait_activity_t activity,
boolean_t *waited);
extern int spa_wait_tag(const char *name, zpool_wait_activity_t activity,
uint64_t tag, boolean_t *waited);
extern void spa_notify_waiters(spa_t *spa);
extern void spa_wake_waiters(spa_t *spa);
/* module param call functions */
int param_set_deadman_ziotime(ZFS_MODULE_PARAM_ARGS);
int param_set_deadman_synctime(ZFS_MODULE_PARAM_ARGS);
int param_set_slop_shift(ZFS_MODULE_PARAM_ARGS);
int param_set_deadman_failmode(ZFS_MODULE_PARAM_ARGS);
#ifdef ZFS_DEBUG
#define dprintf_bp(bp, fmt, ...) do { \
if (zfs_flags & ZFS_DEBUG_DPRINTF) { \
char *__blkbuf = kmem_alloc(BP_SPRINTF_LEN, KM_SLEEP); \
snprintf_blkptr(__blkbuf, BP_SPRINTF_LEN, (bp)); \
dprintf(fmt " %s\n", __VA_ARGS__, __blkbuf); \
kmem_free(__blkbuf, BP_SPRINTF_LEN); \
} \
-_NOTE(CONSTCOND) } while (0)
+} while (0)
#else
#define dprintf_bp(bp, fmt, ...)
#endif
extern spa_mode_t spa_mode_global;
extern int zfs_deadman_enabled;
extern unsigned long zfs_deadman_synctime_ms;
extern unsigned long zfs_deadman_ziotime_ms;
extern unsigned long zfs_deadman_checktime_ms;
#ifdef __cplusplus
}
#endif
#endif /* _SYS_SPA_H */
diff --git a/sys/contrib/openzfs/include/sys/vdev.h b/sys/contrib/openzfs/include/sys/vdev.h
index f235bfc8cc19..0a81713a44d0 100644
--- a/sys/contrib/openzfs/include/sys/vdev.h
+++ b/sys/contrib/openzfs/include/sys/vdev.h
@@ -1,225 +1,226 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2019, Datto Inc. All rights reserved.
*/
#ifndef _SYS_VDEV_H
#define _SYS_VDEV_H
#include <sys/spa.h>
#include <sys/zio.h>
#include <sys/dmu.h>
#include <sys/space_map.h>
#include <sys/metaslab.h>
#include <sys/fs/zfs.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef enum vdev_dtl_type {
DTL_MISSING, /* 0% replication: no copies of the data */
DTL_PARTIAL, /* less than 100% replication: some copies missing */
DTL_SCRUB, /* unable to fully repair during scrub/resilver */
DTL_OUTAGE, /* temporarily missing (used to attempt detach) */
DTL_TYPES
} vdev_dtl_type_t;
extern int zfs_nocacheflush;
typedef boolean_t vdev_open_children_func_t(vdev_t *vd);
-extern void vdev_dbgmsg(vdev_t *vd, const char *fmt, ...);
+extern void vdev_dbgmsg(vdev_t *vd, const char *fmt, ...)
+ __attribute__((format(printf, 2, 3)));
extern void vdev_dbgmsg_print_tree(vdev_t *, int);
extern int vdev_open(vdev_t *);
extern void vdev_open_children(vdev_t *);
extern void vdev_open_children_subset(vdev_t *, vdev_open_children_func_t *);
extern int vdev_validate(vdev_t *);
extern int vdev_copy_path_strict(vdev_t *, vdev_t *);
extern void vdev_copy_path_relaxed(vdev_t *, vdev_t *);
extern void vdev_close(vdev_t *);
extern int vdev_create(vdev_t *, uint64_t txg, boolean_t isreplace);
extern void vdev_reopen(vdev_t *);
extern int vdev_validate_aux(vdev_t *vd);
extern zio_t *vdev_probe(vdev_t *vd, zio_t *pio);
extern boolean_t vdev_is_concrete(vdev_t *vd);
extern boolean_t vdev_is_bootable(vdev_t *vd);
extern vdev_t *vdev_lookup_top(spa_t *spa, uint64_t vdev);
extern vdev_t *vdev_lookup_by_guid(vdev_t *vd, uint64_t guid);
extern int vdev_count_leaves(spa_t *spa);
extern void vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t d,
uint64_t txg, uint64_t size);
extern boolean_t vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t d,
uint64_t txg, uint64_t size);
extern boolean_t vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t d);
extern boolean_t vdev_default_need_resilver(vdev_t *vd, const dva_t *dva,
size_t psize, uint64_t phys_birth);
extern boolean_t vdev_dtl_need_resilver(vdev_t *vd, const dva_t *dva,
size_t psize, uint64_t phys_birth);
extern void vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg,
boolean_t scrub_done, boolean_t rebuild_done);
extern boolean_t vdev_dtl_required(vdev_t *vd);
extern boolean_t vdev_resilver_needed(vdev_t *vd,
uint64_t *minp, uint64_t *maxp);
extern void vdev_destroy_unlink_zap(vdev_t *vd, uint64_t zapobj,
dmu_tx_t *tx);
extern uint64_t vdev_create_link_zap(vdev_t *vd, dmu_tx_t *tx);
extern void vdev_construct_zaps(vdev_t *vd, dmu_tx_t *tx);
extern void vdev_destroy_spacemaps(vdev_t *vd, dmu_tx_t *tx);
extern void vdev_indirect_mark_obsolete(vdev_t *vd, uint64_t offset,
uint64_t size);
extern void spa_vdev_indirect_mark_obsolete(spa_t *spa, uint64_t vdev,
uint64_t offset, uint64_t size, dmu_tx_t *tx);
extern boolean_t vdev_replace_in_progress(vdev_t *vdev);
extern void vdev_hold(vdev_t *);
extern void vdev_rele(vdev_t *);
extern int vdev_metaslab_init(vdev_t *vd, uint64_t txg);
extern void vdev_metaslab_fini(vdev_t *vd);
extern void vdev_metaslab_set_size(vdev_t *);
extern void vdev_expand(vdev_t *vd, uint64_t txg);
extern void vdev_split(vdev_t *vd);
extern void vdev_deadman(vdev_t *vd, char *tag);
typedef void vdev_xlate_func_t(void *arg, range_seg64_t *physical_rs);
extern boolean_t vdev_xlate_is_empty(range_seg64_t *rs);
extern void vdev_xlate(vdev_t *vd, const range_seg64_t *logical_rs,
range_seg64_t *physical_rs, range_seg64_t *remain_rs);
extern void vdev_xlate_walk(vdev_t *vd, const range_seg64_t *logical_rs,
vdev_xlate_func_t *func, void *arg);
extern void vdev_get_stats_ex(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx);
extern metaslab_group_t *vdev_get_mg(vdev_t *vd, metaslab_class_t *mc);
extern void vdev_get_stats(vdev_t *vd, vdev_stat_t *vs);
extern void vdev_clear_stats(vdev_t *vd);
extern void vdev_stat_update(zio_t *zio, uint64_t psize);
extern void vdev_scan_stat_init(vdev_t *vd);
extern void vdev_propagate_state(vdev_t *vd);
extern void vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state,
vdev_aux_t aux);
extern boolean_t vdev_children_are_offline(vdev_t *vd);
extern void vdev_space_update(vdev_t *vd,
int64_t alloc_delta, int64_t defer_delta, int64_t space_delta);
extern int64_t vdev_deflated_space(vdev_t *vd, int64_t space);
extern uint64_t vdev_psize_to_asize(vdev_t *vd, uint64_t psize);
/*
* Return the amount of space allocated for a gang block header.
*/
static inline uint64_t
vdev_gang_header_asize(vdev_t *vd)
{
return (vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE));
}
extern int vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux);
extern int vdev_degrade(spa_t *spa, uint64_t guid, vdev_aux_t aux);
extern int vdev_online(spa_t *spa, uint64_t guid, uint64_t flags,
vdev_state_t *);
extern int vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags);
extern void vdev_clear(spa_t *spa, vdev_t *vd);
extern boolean_t vdev_is_dead(vdev_t *vd);
extern boolean_t vdev_readable(vdev_t *vd);
extern boolean_t vdev_writeable(vdev_t *vd);
extern boolean_t vdev_allocatable(vdev_t *vd);
extern boolean_t vdev_accessible(vdev_t *vd, zio_t *zio);
extern boolean_t vdev_is_spacemap_addressable(vdev_t *vd);
extern void vdev_cache_init(vdev_t *vd);
extern void vdev_cache_fini(vdev_t *vd);
extern boolean_t vdev_cache_read(zio_t *zio);
extern void vdev_cache_write(zio_t *zio);
extern void vdev_cache_purge(vdev_t *vd);
extern void vdev_queue_init(vdev_t *vd);
extern void vdev_queue_fini(vdev_t *vd);
extern zio_t *vdev_queue_io(zio_t *zio);
extern void vdev_queue_io_done(zio_t *zio);
extern void vdev_queue_change_io_priority(zio_t *zio, zio_priority_t priority);
extern int vdev_queue_length(vdev_t *vd);
extern uint64_t vdev_queue_last_offset(vdev_t *vd);
extern void vdev_config_dirty(vdev_t *vd);
extern void vdev_config_clean(vdev_t *vd);
extern int vdev_config_sync(vdev_t **svd, int svdcount, uint64_t txg);
extern void vdev_state_dirty(vdev_t *vd);
extern void vdev_state_clean(vdev_t *vd);
extern void vdev_defer_resilver(vdev_t *vd);
extern boolean_t vdev_clear_resilver_deferred(vdev_t *vd, dmu_tx_t *tx);
typedef enum vdev_config_flag {
VDEV_CONFIG_SPARE = 1 << 0,
VDEV_CONFIG_L2CACHE = 1 << 1,
VDEV_CONFIG_REMOVING = 1 << 2,
VDEV_CONFIG_MOS = 1 << 3,
VDEV_CONFIG_MISSING = 1 << 4
} vdev_config_flag_t;
extern void vdev_top_config_generate(spa_t *spa, nvlist_t *config);
extern nvlist_t *vdev_config_generate(spa_t *spa, vdev_t *vd,
boolean_t getstats, vdev_config_flag_t flags);
/*
* Label routines
*/
struct uberblock;
extern uint64_t vdev_label_offset(uint64_t psize, int l, uint64_t offset);
extern int vdev_label_number(uint64_t psise, uint64_t offset);
extern nvlist_t *vdev_label_read_config(vdev_t *vd, uint64_t txg);
extern void vdev_uberblock_load(vdev_t *, struct uberblock *, nvlist_t **);
extern void vdev_config_generate_stats(vdev_t *vd, nvlist_t *nv);
extern void vdev_label_write(zio_t *zio, vdev_t *vd, int l, abd_t *buf, uint64_t
offset, uint64_t size, zio_done_func_t *done, void *priv, int flags);
extern int vdev_label_read_bootenv(vdev_t *, nvlist_t *);
extern int vdev_label_write_bootenv(vdev_t *, nvlist_t *);
typedef enum {
VDEV_LABEL_CREATE, /* create/add a new device */
VDEV_LABEL_REPLACE, /* replace an existing device */
VDEV_LABEL_SPARE, /* add a new hot spare */
VDEV_LABEL_REMOVE, /* remove an existing device */
VDEV_LABEL_L2CACHE, /* add an L2ARC cache device */
VDEV_LABEL_SPLIT /* generating new label for split-off dev */
} vdev_labeltype_t;
extern int vdev_label_init(vdev_t *vd, uint64_t txg, vdev_labeltype_t reason);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_VDEV_H */
diff --git a/sys/contrib/openzfs/include/sys/zfs_context.h b/sys/contrib/openzfs/include/sys/zfs_context.h
index 4d67e652ab62..80931f98eb97 100644
--- a/sys/contrib/openzfs/include/sys/zfs_context.h
+++ b/sys/contrib/openzfs/include/sys/zfs_context.h
@@ -1,785 +1,783 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
* Copyright (c) 2012, Joyent, Inc. All rights reserved.
*/
#ifndef _SYS_ZFS_CONTEXT_H
#define _SYS_ZFS_CONTEXT_H
#ifdef __cplusplus
extern "C" {
#endif
/*
* This code compiles in three different contexts. When __KERNEL__ is defined,
* the code uses "unix-like" kernel interfaces. When _STANDALONE is defined, the
* code is running in a reduced capacity environment of the boot loader which is
* generally a subset of both POSIX and kernel interfaces (with a few unique
* interfaces too). When neither are defined, it's in a userland POSIX or
* similar environment.
*/
#if defined(__KERNEL__) || defined(_STANDALONE)
-#include <sys/note.h>
#include <sys/types.h>
#include <sys/atomic.h>
#include <sys/sysmacros.h>
#include <sys/vmsystm.h>
#include <sys/condvar.h>
#include <sys/cmn_err.h>
#include <sys/kmem.h>
#include <sys/kmem_cache.h>
#include <sys/vmem.h>
#include <sys/taskq.h>
#include <sys/param.h>
#include <sys/disp.h>
#include <sys/debug.h>
#include <sys/random.h>
#include <sys/strings.h>
#include <sys/byteorder.h>
#include <sys/list.h>
#include <sys/time.h>
#include <sys/zone.h>
#include <sys/kstat.h>
#include <sys/zfs_debug.h>
#include <sys/sysevent.h>
#include <sys/sysevent/eventdefs.h>
#include <sys/zfs_delay.h>
#include <sys/sunddi.h>
#include <sys/ctype.h>
#include <sys/disp.h>
#include <sys/trace.h>
#include <sys/procfs_list.h>
#include <sys/mod.h>
#include <sys/uio_impl.h>
#include <sys/zfs_context_os.h>
#else /* _KERNEL || _STANDALONE */
#define _SYS_MUTEX_H
#define _SYS_RWLOCK_H
#define _SYS_CONDVAR_H
#define _SYS_VNODE_H
#define _SYS_VFS_H
#define _SYS_SUNDDI_H
#define _SYS_CALLB_H
#include <stdio.h>
#include <stdlib.h>
#include <stddef.h>
#include <stdarg.h>
#include <fcntl.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
#include <strings.h>
#include <pthread.h>
#include <setjmp.h>
#include <assert.h>
#include <umem.h>
#include <limits.h>
#include <atomic.h>
#include <dirent.h>
#include <time.h>
#include <ctype.h>
#include <signal.h>
#include <sys/mman.h>
-#include <sys/note.h>
#include <sys/types.h>
#include <sys/cred.h>
#include <sys/sysmacros.h>
#include <sys/resource.h>
#include <sys/byteorder.h>
#include <sys/list.h>
#include <sys/mod.h>
#include <sys/uio.h>
#include <sys/zfs_debug.h>
#include <sys/kstat.h>
#include <sys/u8_textprep.h>
#include <sys/sysevent.h>
#include <sys/sysevent/eventdefs.h>
#include <sys/sunddi.h>
#include <sys/debug.h>
#include <sys/utsname.h>
#include <sys/trace_zfs.h>
#include <sys/zfs_context_os.h>
/*
* Stack
*/
#define noinline __attribute__((noinline))
#define likely(x) __builtin_expect((x), 1)
#define unlikely(x) __builtin_expect((x), 0)
/*
* Debugging
*/
/*
* Note that we are not using the debugging levels.
*/
#define CE_CONT 0 /* continuation */
#define CE_NOTE 1 /* notice */
#define CE_WARN 2 /* warning */
#define CE_PANIC 3 /* panic */
#define CE_IGNORE 4 /* print nothing */
/*
* ZFS debugging
*/
extern void dprintf_setup(int *argc, char **argv);
extern void cmn_err(int, const char *, ...);
extern void vcmn_err(int, const char *, va_list);
extern void panic(const char *, ...) __NORETURN;
extern void vpanic(const char *, va_list) __NORETURN;
#define fm_panic panic
/*
* DTrace SDT probes have different signatures in userland than they do in
* the kernel. If they're being used in kernel code, re-define them out of
* existence for their counterparts in libzpool.
*
* Here's an example of how to use the set-error probes in userland:
* zfs$target:::set-error /arg0 == EBUSY/ {stack();}
*
* Here's an example of how to use DTRACE_PROBE probes in userland:
* If there is a probe declared as follows:
* DTRACE_PROBE2(zfs__probe_name, uint64_t, blkid, dnode_t *, dn);
* Then you can use it as follows:
* zfs$target:::probe2 /copyinstr(arg0) == "zfs__probe_name"/
* {printf("%u %p\n", arg1, arg2);}
*/
#ifdef DTRACE_PROBE
#undef DTRACE_PROBE
#endif /* DTRACE_PROBE */
#define DTRACE_PROBE(a)
#ifdef DTRACE_PROBE1
#undef DTRACE_PROBE1
#endif /* DTRACE_PROBE1 */
#define DTRACE_PROBE1(a, b, c)
#ifdef DTRACE_PROBE2
#undef DTRACE_PROBE2
#endif /* DTRACE_PROBE2 */
#define DTRACE_PROBE2(a, b, c, d, e)
#ifdef DTRACE_PROBE3
#undef DTRACE_PROBE3
#endif /* DTRACE_PROBE3 */
#define DTRACE_PROBE3(a, b, c, d, e, f, g)
#ifdef DTRACE_PROBE4
#undef DTRACE_PROBE4
#endif /* DTRACE_PROBE4 */
#define DTRACE_PROBE4(a, b, c, d, e, f, g, h, i)
/*
* Tunables.
*/
typedef struct zfs_kernel_param {
const char *name; /* unused stub */
} zfs_kernel_param_t;
#define ZFS_MODULE_PARAM(scope_prefix, name_prefix, name, type, perm, desc)
#define ZFS_MODULE_PARAM_ARGS void
#define ZFS_MODULE_PARAM_CALL(scope_prefix, name_prefix, name, setfunc, \
getfunc, perm, desc)
/*
* Threads.
*/
typedef pthread_t kthread_t;
#define TS_RUN 0x00000002
#define TS_JOINABLE 0x00000004
#define curthread ((void *)(uintptr_t)pthread_self())
#define kpreempt(x) yield()
#define getcomm() "unknown"
#define thread_create_named(name, stk, stksize, func, arg, len, \
pp, state, pri) \
zk_thread_create(func, arg, stksize, state)
#define thread_create(stk, stksize, func, arg, len, pp, state, pri) \
zk_thread_create(func, arg, stksize, state)
#define thread_exit() pthread_exit(NULL)
#define thread_join(t) pthread_join((pthread_t)(t), NULL)
#define newproc(f, a, cid, pri, ctp, pid) (ENOSYS)
/* in libzpool, p0 exists only to have its address taken */
typedef struct proc {
uintptr_t this_is_never_used_dont_dereference_it;
} proc_t;
extern struct proc p0;
#define curproc (&p0)
#define PS_NONE -1
extern kthread_t *zk_thread_create(void (*func)(void *), void *arg,
size_t stksize, int state);
#define issig(why) (FALSE)
#define ISSIG(thr, why) (FALSE)
#define kpreempt_disable() ((void)0)
#define kpreempt_enable() ((void)0)
#define cond_resched() sched_yield()
/*
* Mutexes
*/
typedef struct kmutex {
pthread_mutex_t m_lock;
pthread_t m_owner;
} kmutex_t;
#define MUTEX_DEFAULT 0
#define MUTEX_NOLOCKDEP MUTEX_DEFAULT
#define MUTEX_HELD(mp) pthread_equal((mp)->m_owner, pthread_self())
#define MUTEX_NOT_HELD(mp) !MUTEX_HELD(mp)
extern void mutex_init(kmutex_t *mp, char *name, int type, void *cookie);
extern void mutex_destroy(kmutex_t *mp);
extern void mutex_enter(kmutex_t *mp);
extern void mutex_exit(kmutex_t *mp);
extern int mutex_tryenter(kmutex_t *mp);
#define NESTED_SINGLE 1
#define mutex_enter_nested(mp, class) mutex_enter(mp)
/*
* RW locks
*/
typedef struct krwlock {
pthread_rwlock_t rw_lock;
pthread_t rw_owner;
uint_t rw_readers;
} krwlock_t;
typedef int krw_t;
#define RW_READER 0
#define RW_WRITER 1
#define RW_DEFAULT RW_READER
#define RW_NOLOCKDEP RW_READER
#define RW_READ_HELD(rw) ((rw)->rw_readers > 0)
#define RW_WRITE_HELD(rw) pthread_equal((rw)->rw_owner, pthread_self())
#define RW_LOCK_HELD(rw) (RW_READ_HELD(rw) || RW_WRITE_HELD(rw))
extern void rw_init(krwlock_t *rwlp, char *name, int type, void *arg);
extern void rw_destroy(krwlock_t *rwlp);
extern void rw_enter(krwlock_t *rwlp, krw_t rw);
extern int rw_tryenter(krwlock_t *rwlp, krw_t rw);
extern int rw_tryupgrade(krwlock_t *rwlp);
extern void rw_exit(krwlock_t *rwlp);
#define rw_downgrade(rwlp) do { } while (0)
/*
* Credentials
*/
extern uid_t crgetuid(cred_t *cr);
extern uid_t crgetruid(cred_t *cr);
extern gid_t crgetgid(cred_t *cr);
extern int crgetngroups(cred_t *cr);
extern gid_t *crgetgroups(cred_t *cr);
/*
* Condition variables
*/
typedef pthread_cond_t kcondvar_t;
#define CV_DEFAULT 0
#define CALLOUT_FLAG_ABSOLUTE 0x2
extern void cv_init(kcondvar_t *cv, char *name, int type, void *arg);
extern void cv_destroy(kcondvar_t *cv);
extern void cv_wait(kcondvar_t *cv, kmutex_t *mp);
extern int cv_wait_sig(kcondvar_t *cv, kmutex_t *mp);
extern int cv_timedwait(kcondvar_t *cv, kmutex_t *mp, clock_t abstime);
extern int cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim,
hrtime_t res, int flag);
extern void cv_signal(kcondvar_t *cv);
extern void cv_broadcast(kcondvar_t *cv);
#define cv_timedwait_io(cv, mp, at) cv_timedwait(cv, mp, at)
#define cv_timedwait_idle(cv, mp, at) cv_timedwait(cv, mp, at)
#define cv_timedwait_sig(cv, mp, at) cv_timedwait(cv, mp, at)
#define cv_wait_io(cv, mp) cv_wait(cv, mp)
#define cv_wait_idle(cv, mp) cv_wait(cv, mp)
#define cv_wait_io_sig(cv, mp) cv_wait_sig(cv, mp)
#define cv_timedwait_sig_hires(cv, mp, t, r, f) \
cv_timedwait_hires(cv, mp, t, r, f)
#define cv_timedwait_idle_hires(cv, mp, t, r, f) \
cv_timedwait_hires(cv, mp, t, r, f)
/*
* Thread-specific data
*/
#define tsd_get(k) pthread_getspecific(k)
#define tsd_set(k, v) pthread_setspecific(k, v)
#define tsd_create(kp, d) pthread_key_create((pthread_key_t *)kp, d)
#define tsd_destroy(kp) /* nothing */
#ifdef __FreeBSD__
typedef off_t loff_t;
#endif
/*
* kstat creation, installation and deletion
*/
extern kstat_t *kstat_create(const char *, int,
const char *, const char *, uchar_t, ulong_t, uchar_t);
extern void kstat_install(kstat_t *);
extern void kstat_delete(kstat_t *);
extern void kstat_set_raw_ops(kstat_t *ksp,
int (*headers)(char *buf, size_t size),
int (*data)(char *buf, size_t size, void *data),
void *(*addr)(kstat_t *ksp, loff_t index));
/*
* procfs list manipulation
*/
typedef struct procfs_list {
void *pl_private;
kmutex_t pl_lock;
list_t pl_list;
uint64_t pl_next_id;
size_t pl_node_offset;
} procfs_list_t;
#ifndef __cplusplus
struct seq_file { };
void seq_printf(struct seq_file *m, const char *fmt, ...);
typedef struct procfs_list_node {
list_node_t pln_link;
uint64_t pln_id;
} procfs_list_node_t;
void procfs_list_install(const char *module,
const char *submodule,
const char *name,
mode_t mode,
procfs_list_t *procfs_list,
int (*show)(struct seq_file *f, void *p),
int (*show_header)(struct seq_file *f),
int (*clear)(procfs_list_t *procfs_list),
size_t procfs_list_node_off);
void procfs_list_uninstall(procfs_list_t *procfs_list);
void procfs_list_destroy(procfs_list_t *procfs_list);
void procfs_list_add(procfs_list_t *procfs_list, void *p);
#endif
/*
* Kernel memory
*/
#define KM_SLEEP UMEM_NOFAIL
#define KM_PUSHPAGE KM_SLEEP
#define KM_NOSLEEP UMEM_DEFAULT
#define KM_NORMALPRI 0 /* not needed with UMEM_DEFAULT */
#define KMC_NODEBUG UMC_NODEBUG
#define KMC_KVMEM 0x0
#define kmem_alloc(_s, _f) umem_alloc(_s, _f)
#define kmem_zalloc(_s, _f) umem_zalloc(_s, _f)
#define kmem_free(_b, _s) umem_free(_b, _s)
#define vmem_alloc(_s, _f) kmem_alloc(_s, _f)
#define vmem_zalloc(_s, _f) kmem_zalloc(_s, _f)
#define vmem_free(_b, _s) kmem_free(_b, _s)
#define kmem_cache_create(_a, _b, _c, _d, _e, _f, _g, _h, _i) \
umem_cache_create(_a, _b, _c, _d, _e, _f, _g, _h, _i)
#define kmem_cache_destroy(_c) umem_cache_destroy(_c)
#define kmem_cache_alloc(_c, _f) umem_cache_alloc(_c, _f)
#define kmem_cache_free(_c, _b) umem_cache_free(_c, _b)
#define kmem_debugging() 0
#define kmem_cache_reap_now(_c) umem_cache_reap_now(_c);
#define kmem_cache_set_move(_c, _cb) /* nothing */
#define POINTER_INVALIDATE(_pp) /* nothing */
#define POINTER_IS_VALID(_p) 0
typedef umem_cache_t kmem_cache_t;
typedef enum kmem_cbrc {
KMEM_CBRC_YES,
KMEM_CBRC_NO,
KMEM_CBRC_LATER,
KMEM_CBRC_DONT_NEED,
KMEM_CBRC_DONT_KNOW
} kmem_cbrc_t;
/*
* Task queues
*/
#define TASKQ_NAMELEN 31
typedef uintptr_t taskqid_t;
typedef void (task_func_t)(void *);
typedef struct taskq_ent {
struct taskq_ent *tqent_next;
struct taskq_ent *tqent_prev;
task_func_t *tqent_func;
void *tqent_arg;
uintptr_t tqent_flags;
} taskq_ent_t;
typedef struct taskq {
char tq_name[TASKQ_NAMELEN + 1];
kmutex_t tq_lock;
krwlock_t tq_threadlock;
kcondvar_t tq_dispatch_cv;
kcondvar_t tq_wait_cv;
kthread_t **tq_threadlist;
int tq_flags;
int tq_active;
int tq_nthreads;
int tq_nalloc;
int tq_minalloc;
int tq_maxalloc;
kcondvar_t tq_maxalloc_cv;
int tq_maxalloc_wait;
taskq_ent_t *tq_freelist;
taskq_ent_t tq_task;
} taskq_t;
#define TQENT_FLAG_PREALLOC 0x1 /* taskq_dispatch_ent used */
#define TASKQ_PREPOPULATE 0x0001
#define TASKQ_CPR_SAFE 0x0002 /* Use CPR safe protocol */
#define TASKQ_DYNAMIC 0x0004 /* Use dynamic thread scheduling */
#define TASKQ_THREADS_CPU_PCT 0x0008 /* Scale # threads by # cpus */
#define TASKQ_DC_BATCH 0x0010 /* Mark threads as batch */
#define TQ_SLEEP KM_SLEEP /* Can block for memory */
#define TQ_NOSLEEP KM_NOSLEEP /* cannot block for memory; may fail */
#define TQ_NOQUEUE 0x02 /* Do not enqueue if can't dispatch */
#define TQ_FRONT 0x08 /* Queue in front */
#define TASKQID_INVALID ((taskqid_t)0)
extern taskq_t *system_taskq;
extern taskq_t *system_delay_taskq;
extern taskq_t *taskq_create(const char *, int, pri_t, int, int, uint_t);
#define taskq_create_proc(a, b, c, d, e, p, f) \
(taskq_create(a, b, c, d, e, f))
#define taskq_create_sysdc(a, b, d, e, p, dc, f) \
(taskq_create(a, b, maxclsyspri, d, e, f))
extern taskqid_t taskq_dispatch(taskq_t *, task_func_t, void *, uint_t);
extern taskqid_t taskq_dispatch_delay(taskq_t *, task_func_t, void *, uint_t,
clock_t);
extern void taskq_dispatch_ent(taskq_t *, task_func_t, void *, uint_t,
taskq_ent_t *);
extern int taskq_empty_ent(taskq_ent_t *);
extern void taskq_init_ent(taskq_ent_t *);
extern void taskq_destroy(taskq_t *);
extern void taskq_wait(taskq_t *);
extern void taskq_wait_id(taskq_t *, taskqid_t);
extern void taskq_wait_outstanding(taskq_t *, taskqid_t);
extern int taskq_member(taskq_t *, kthread_t *);
extern taskq_t *taskq_of_curthread(void);
extern int taskq_cancel_id(taskq_t *, taskqid_t);
extern void system_taskq_init(void);
extern void system_taskq_fini(void);
#define XVA_MAPSIZE 3
#define XVA_MAGIC 0x78766174
extern char *vn_dumpdir;
#define AV_SCANSTAMP_SZ 32 /* length of anti-virus scanstamp */
typedef struct xoptattr {
inode_timespec_t xoa_createtime; /* Create time of file */
uint8_t xoa_archive;
uint8_t xoa_system;
uint8_t xoa_readonly;
uint8_t xoa_hidden;
uint8_t xoa_nounlink;
uint8_t xoa_immutable;
uint8_t xoa_appendonly;
uint8_t xoa_nodump;
uint8_t xoa_settable;
uint8_t xoa_opaque;
uint8_t xoa_av_quarantined;
uint8_t xoa_av_modified;
uint8_t xoa_av_scanstamp[AV_SCANSTAMP_SZ];
uint8_t xoa_reparse;
uint8_t xoa_offline;
uint8_t xoa_sparse;
} xoptattr_t;
typedef struct vattr {
uint_t va_mask; /* bit-mask of attributes */
u_offset_t va_size; /* file size in bytes */
} vattr_t;
typedef struct xvattr {
vattr_t xva_vattr; /* Embedded vattr structure */
uint32_t xva_magic; /* Magic Number */
uint32_t xva_mapsize; /* Size of attr bitmap (32-bit words) */
uint32_t *xva_rtnattrmapp; /* Ptr to xva_rtnattrmap[] */
uint32_t xva_reqattrmap[XVA_MAPSIZE]; /* Requested attrs */
uint32_t xva_rtnattrmap[XVA_MAPSIZE]; /* Returned attrs */
xoptattr_t xva_xoptattrs; /* Optional attributes */
} xvattr_t;
typedef struct vsecattr {
uint_t vsa_mask; /* See below */
int vsa_aclcnt; /* ACL entry count */
void *vsa_aclentp; /* pointer to ACL entries */
int vsa_dfaclcnt; /* default ACL entry count */
void *vsa_dfaclentp; /* pointer to default ACL entries */
size_t vsa_aclentsz; /* ACE size in bytes of vsa_aclentp */
} vsecattr_t;
#define AT_MODE 0x00002
#define AT_UID 0x00004
#define AT_GID 0x00008
#define AT_FSID 0x00010
#define AT_NODEID 0x00020
#define AT_NLINK 0x00040
#define AT_SIZE 0x00080
#define AT_ATIME 0x00100
#define AT_MTIME 0x00200
#define AT_CTIME 0x00400
#define AT_RDEV 0x00800
#define AT_BLKSIZE 0x01000
#define AT_NBLOCKS 0x02000
#define AT_SEQ 0x08000
#define AT_XVATTR 0x10000
#define CRCREAT 0
#define F_FREESP 11
#define FIGNORECASE 0x80000 /* request case-insensitive lookups */
/*
* Random stuff
*/
#define ddi_get_lbolt() (gethrtime() >> 23)
#define ddi_get_lbolt64() (gethrtime() >> 23)
#define hz 119 /* frequency when using gethrtime() >> 23 for lbolt */
#define ddi_time_before(a, b) (a < b)
#define ddi_time_after(a, b) ddi_time_before(b, a)
#define ddi_time_before_eq(a, b) (!ddi_time_after(a, b))
#define ddi_time_after_eq(a, b) ddi_time_before_eq(b, a)
#define ddi_time_before64(a, b) (a < b)
#define ddi_time_after64(a, b) ddi_time_before64(b, a)
#define ddi_time_before_eq64(a, b) (!ddi_time_after64(a, b))
#define ddi_time_after_eq64(a, b) ddi_time_before_eq64(b, a)
extern void delay(clock_t ticks);
#define SEC_TO_TICK(sec) ((sec) * hz)
#define MSEC_TO_TICK(msec) (howmany((hrtime_t)(msec) * hz, MILLISEC))
#define USEC_TO_TICK(usec) (howmany((hrtime_t)(usec) * hz, MICROSEC))
#define NSEC_TO_TICK(nsec) (howmany((hrtime_t)(nsec) * hz, NANOSEC))
#define max_ncpus 64
#define boot_ncpus (sysconf(_SC_NPROCESSORS_ONLN))
/*
* Process priorities as defined by setpriority(2) and getpriority(2).
*/
#define minclsyspri 19
#define maxclsyspri -20
#define defclsyspri 0
#define CPU_SEQID ((uintptr_t)pthread_self() & (max_ncpus - 1))
#define CPU_SEQID_UNSTABLE CPU_SEQID
#define kcred NULL
#define CRED() NULL
#define ptob(x) ((x) * PAGESIZE)
#define NN_DIVISOR_1000 (1U << 0)
#define NN_NUMBUF_SZ (6)
extern uint64_t physmem;
extern const char *random_path;
extern const char *urandom_path;
extern int highbit64(uint64_t i);
extern int lowbit64(uint64_t i);
extern int random_get_bytes(uint8_t *ptr, size_t len);
extern int random_get_pseudo_bytes(uint8_t *ptr, size_t len);
static __inline__ uint32_t
random_in_range(uint32_t range)
{
uint32_t r;
ASSERT(range != 0);
if (range == 1)
return (0);
(void) random_get_pseudo_bytes((uint8_t *)&r, sizeof (r));
return (r % range);
}
extern void kernel_init(int mode);
extern void kernel_fini(void);
extern void random_init(void);
extern void random_fini(void);
struct spa;
extern void show_pool_stats(struct spa *);
extern int set_global_var(char const *arg);
typedef struct callb_cpr {
kmutex_t *cc_lockp;
} callb_cpr_t;
#define CALLB_CPR_INIT(cp, lockp, func, name) { \
(cp)->cc_lockp = lockp; \
}
#define CALLB_CPR_SAFE_BEGIN(cp) { \
ASSERT(MUTEX_HELD((cp)->cc_lockp)); \
}
#define CALLB_CPR_SAFE_END(cp, lockp) { \
ASSERT(MUTEX_HELD((cp)->cc_lockp)); \
}
#define CALLB_CPR_EXIT(cp) { \
ASSERT(MUTEX_HELD((cp)->cc_lockp)); \
mutex_exit((cp)->cc_lockp); \
}
#define zone_dataset_visible(x, y) (1)
#define INGLOBALZONE(z) (1)
extern uint32_t zone_get_hostid(void *zonep);
extern char *kmem_vasprintf(const char *fmt, va_list adx);
extern char *kmem_asprintf(const char *fmt, ...);
#define kmem_strfree(str) kmem_free((str), strlen(str) + 1)
#define kmem_strdup(s) strdup(s)
/*
* Hostname information
*/
extern char hw_serial[]; /* for userland-emulated hostid access */
extern int ddi_strtoul(const char *str, char **nptr, int base,
unsigned long *result);
extern int ddi_strtoull(const char *str, char **nptr, int base,
u_longlong_t *result);
typedef struct utsname utsname_t;
extern utsname_t *utsname(void);
/* ZFS Boot Related stuff. */
struct _buf {
intptr_t _fd;
};
struct bootstat {
uint64_t st_size;
};
typedef struct ace_object {
uid_t a_who;
uint32_t a_access_mask;
uint16_t a_flags;
uint16_t a_type;
uint8_t a_obj_type[16];
uint8_t a_inherit_obj_type[16];
} ace_object_t;
#define ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE 0x05
#define ACE_ACCESS_DENIED_OBJECT_ACE_TYPE 0x06
#define ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE 0x07
#define ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE 0x08
extern int zfs_secpolicy_snapshot_perms(const char *name, cred_t *cr);
extern int zfs_secpolicy_rename_perms(const char *from, const char *to,
cred_t *cr);
extern int zfs_secpolicy_destroy_perms(const char *name, cred_t *cr);
extern int secpolicy_zfs(const cred_t *cr);
extern int secpolicy_zfs_proc(const cred_t *cr, proc_t *proc);
extern zoneid_t getzoneid(void);
/* SID stuff */
typedef struct ksiddomain {
uint_t kd_ref;
uint_t kd_len;
char *kd_name;
} ksiddomain_t;
ksiddomain_t *ksid_lookupdomain(const char *);
void ksiddomain_rele(ksiddomain_t *);
#define DDI_SLEEP KM_SLEEP
#define ddi_log_sysevent(_a, _b, _c, _d, _e, _f, _g) \
sysevent_post_event(_c, _d, _b, "libzpool", _e, _f)
#define zfs_sleep_until(wakeup) \
do { \
hrtime_t delta = wakeup - gethrtime(); \
struct timespec ts; \
ts.tv_sec = delta / NANOSEC; \
ts.tv_nsec = delta % NANOSEC; \
(void) nanosleep(&ts, NULL); \
} while (0)
typedef int fstrans_cookie_t;
extern fstrans_cookie_t spl_fstrans_mark(void);
extern void spl_fstrans_unmark(fstrans_cookie_t);
extern int __spl_pf_fstrans_check(void);
extern int kmem_cache_reap_active(void);
#define ____cacheline_aligned
/*
* Kernel modules
*/
#define __init
#define __exit
#endif /* _KERNEL || _STANDALONE */
#ifdef __cplusplus
};
#endif
#endif /* _SYS_ZFS_CONTEXT_H */
diff --git a/sys/contrib/openzfs/lib/libnvpair/libnvpair.abi b/sys/contrib/openzfs/lib/libnvpair/libnvpair.abi
index 8c503fecd152..f0845c796342 100644
--- a/sys/contrib/openzfs/lib/libnvpair/libnvpair.abi
+++ b/sys/contrib/openzfs/lib/libnvpair/libnvpair.abi
@@ -1,3636 +1,3191 @@
-<abi-corpus path='libnvpair.so' architecture='elf-amd-x86_64' soname='libnvpair.so.3'>
+<abi-corpus architecture='elf-amd-x86_64' soname='libnvpair.so.3'>
<elf-needed>
<dependency name='libc.so.6'/>
</elf-needed>
<elf-function-symbols>
+ <elf-symbol name='_fini' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='dump_nvlist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_boolean' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_boolean_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_boolean_value' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_byte' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_byte_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_int16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_int16_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_int32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_int32_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_int64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_int64_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_int8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_int8_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_nvlist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_nvlist_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_nvpair' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_string_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_uint16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_uint16_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_uint32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_uint32_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_uint64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_uint64_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_uint8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_uint8_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_alloc' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_dup' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_free' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_boolean' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_boolean_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_boolean_value' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_byte' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_byte_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_int16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_int16_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_int32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_int32_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_int64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_int64_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_int8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_int8_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_nvlist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_nvpair' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_uint16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_uint16_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_uint32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_uint32_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_uint64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_uint64_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_uint8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_uint8_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_merge' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_num_pairs' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_pack' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_pack_free' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_remove' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_remove_nvpair' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_size' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_unpack' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvpair_value_boolean_value' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvpair_value_byte' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvpair_value_int16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvpair_value_int32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvpair_value_int64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvpair_value_int8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvpair_value_nvlist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvpair_value_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvpair_value_uint16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvpair_value_uint32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvpair_value_uint64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvpair_value_uint8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libspl_assertf' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nv_alloc_fini' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nv_alloc_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nv_alloc_reset' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_boolean' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_boolean_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_boolean_value' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_byte' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_byte_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_double' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_hrtime' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_int16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_int16_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_int32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_int32_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_int64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_int64_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_int8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_int8_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_nvlist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_nvlist_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_nvpair' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_string_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_uint16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_uint16_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_uint32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_uint32_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_uint64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_uint64_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_uint8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_uint8_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_alloc' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_dup' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_empty' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_exists' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_free' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_boolean' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_boolean_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_boolean_value' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_byte' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_byte_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_double' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_hrtime' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_int16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_int16_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_int32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_int32_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_int64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_int64_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_int8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_int8_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_nv_alloc' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_nvlist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_nvlist_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_nvpair' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_nvpair_embedded_index' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_pairs' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_string_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_uint16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_uint16_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_uint32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_uint32_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_uint64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_uint64_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_uint8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_uint8_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_merge' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_next_nvpair' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_nvflag' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_pack' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prev_nvpair' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_print' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_print_json' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prt' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctl_alloc' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctl_dofmt' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctl_doindent' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctl_free' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctl_getdest' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctl_setdest' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctl_setfmt' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctl_setindent' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_boolean' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_boolean_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_boolean_value' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_byte' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_byte_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_double' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_hrtime' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_int16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_int16_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_int32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_int32_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_int64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_int64_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_int8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_int8_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_nvlist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_nvlist_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_string_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_uint16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_uint16_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_uint32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_uint32_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_uint64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_uint64_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_uint8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_uint8_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_remove' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_remove_all' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_remove_nvpair' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_size' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_unpack' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_xalloc' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_xdup' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_xpack' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_xunpack' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_type' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_type_is_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_boolean_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_boolean_value' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_byte' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_byte_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_double' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_hrtime' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_int16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_int16_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_int32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_int32_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_int64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_int64_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_int8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_int8_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_match' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_match_regex' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_nvlist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_nvlist_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_string_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_uint16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_uint16_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_uint32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_uint32_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_uint64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_uint64_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_uint8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_uint8_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
</elf-function-symbols>
<elf-variable-symbols>
<elf-symbol name='libspl_assert_ok' size='4' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nv_alloc_nosleep' size='8' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nv_fixed_ops' size='8' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
</elf-variable-symbols>
- <abi-instr version='1.0' address-size='64' path='libnvpair.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libnvpair' language='LANG_C99'>
- <type-decl name='void' id='type-id-1'/>
- <class-decl name='nvlist_prtctl' size-in-bits='576' is-struct='yes' visibility='default' id='type-id-2'>
+ <abi-instr version='1.0' address-size='64' path='libnvpair.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libnvpair' language='LANG_C99'>
+ <type-decl name='int' size-in-bits='32' id='type-id-1'/>
+ <class-decl name='nvpair' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-2'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='nvprt_fp' type-id='type-id-3' visibility='default'/>
+ <var-decl name='nvp_size' type-id='type-id-3' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='32'>
+ <var-decl name='nvp_name_sz' type-id='type-id-4' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='48'>
+ <var-decl name='nvp_reserve' type-id='type-id-4' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='nvp_value_elem' type-id='type-id-3' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='96'>
+ <var-decl name='nvp_type' type-id='type-id-5' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='__int32_t' type-id='type-id-1' id='type-id-6'/>
+ <typedef-decl name='int32_t' type-id='type-id-6' id='type-id-3'/>
+ <type-decl name='short int' size-in-bits='16' id='type-id-7'/>
+ <typedef-decl name='__int16_t' type-id='type-id-7' id='type-id-8'/>
+ <typedef-decl name='int16_t' type-id='type-id-8' id='type-id-4'/>
+ <type-decl name='unnamed-enum-underlying-type' is-anonymous='yes' size-in-bits='32' alignment-in-bits='32' id='type-id-9'/>
+ <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-10'>
+ <underlying-type type-id='type-id-9'/>
+ <enumerator name='DATA_TYPE_DONTCARE' value='-1'/>
+ <enumerator name='DATA_TYPE_UNKNOWN' value='0'/>
+ <enumerator name='DATA_TYPE_BOOLEAN' value='1'/>
+ <enumerator name='DATA_TYPE_BYTE' value='2'/>
+ <enumerator name='DATA_TYPE_INT16' value='3'/>
+ <enumerator name='DATA_TYPE_UINT16' value='4'/>
+ <enumerator name='DATA_TYPE_INT32' value='5'/>
+ <enumerator name='DATA_TYPE_UINT32' value='6'/>
+ <enumerator name='DATA_TYPE_INT64' value='7'/>
+ <enumerator name='DATA_TYPE_UINT64' value='8'/>
+ <enumerator name='DATA_TYPE_STRING' value='9'/>
+ <enumerator name='DATA_TYPE_BYTE_ARRAY' value='10'/>
+ <enumerator name='DATA_TYPE_INT16_ARRAY' value='11'/>
+ <enumerator name='DATA_TYPE_UINT16_ARRAY' value='12'/>
+ <enumerator name='DATA_TYPE_INT32_ARRAY' value='13'/>
+ <enumerator name='DATA_TYPE_UINT32_ARRAY' value='14'/>
+ <enumerator name='DATA_TYPE_INT64_ARRAY' value='15'/>
+ <enumerator name='DATA_TYPE_UINT64_ARRAY' value='16'/>
+ <enumerator name='DATA_TYPE_STRING_ARRAY' value='17'/>
+ <enumerator name='DATA_TYPE_HRTIME' value='18'/>
+ <enumerator name='DATA_TYPE_NVLIST' value='19'/>
+ <enumerator name='DATA_TYPE_NVLIST_ARRAY' value='20'/>
+ <enumerator name='DATA_TYPE_BOOLEAN_VALUE' value='21'/>
+ <enumerator name='DATA_TYPE_INT8' value='22'/>
+ <enumerator name='DATA_TYPE_UINT8' value='23'/>
+ <enumerator name='DATA_TYPE_BOOLEAN_ARRAY' value='24'/>
+ <enumerator name='DATA_TYPE_INT8_ARRAY' value='25'/>
+ <enumerator name='DATA_TYPE_UINT8_ARRAY' value='26'/>
+ <enumerator name='DATA_TYPE_DOUBLE' value='27'/>
+ </enum-decl>
+ <typedef-decl name='data_type_t' type-id='type-id-10' id='type-id-5'/>
+ <typedef-decl name='nvpair_t' type-id='type-id-2' id='type-id-11'/>
+ <pointer-type-def type-id='type-id-11' size-in-bits='64' id='type-id-12'/>
+ <type-decl name='char' size-in-bits='8' id='type-id-13'/>
+ <pointer-type-def type-id='type-id-13' size-in-bits='64' id='type-id-14'/>
+ <pointer-type-def type-id='type-id-14' size-in-bits='64' id='type-id-15'/>
+ <function-decl name='nvpair_value_match' mangled-name='nvpair_value_match' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_match'>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <parameter type-id='type-id-1' name='ai'/>
+ <parameter type-id='type-id-14' name='value'/>
+ <parameter type-id='type-id-15' name='ep'/>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <class-decl name='re_pattern_buffer' size-in-bits='512' is-struct='yes' visibility='default' id='type-id-16'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='buffer' type-id='type-id-17' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='allocated' type-id='type-id-18' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='used' type-id='type-id-18' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='syntax' type-id='type-id-19' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='fastmap' type-id='type-id-14' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='320'>
+ <var-decl name='translate' type-id='type-id-17' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='384'>
+ <var-decl name='re_nsub' type-id='type-id-20' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='31'>
+ <var-decl name='can_be_null' type-id='type-id-21' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='29'>
+ <var-decl name='regs_allocated' type-id='type-id-21' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='28'>
+ <var-decl name='fastmap_accurate' type-id='type-id-21' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='27'>
+ <var-decl name='no_sub' type-id='type-id-21' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='26'>
+ <var-decl name='not_bol' type-id='type-id-21' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='25'>
+ <var-decl name='not_eol' type-id='type-id-21' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='24'>
+ <var-decl name='newline_anchor' type-id='type-id-21' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <type-decl name='unsigned char' size-in-bits='8' id='type-id-22'/>
+ <pointer-type-def type-id='type-id-22' size-in-bits='64' id='type-id-17'/>
+ <type-decl name='unsigned long int' size-in-bits='64' id='type-id-18'/>
+ <typedef-decl name='reg_syntax_t' type-id='type-id-18' id='type-id-19'/>
+ <typedef-decl name='size_t' type-id='type-id-18' id='type-id-20'/>
+ <type-decl name='unsigned int' size-in-bits='32' id='type-id-21'/>
+ <typedef-decl name='regex_t' type-id='type-id-16' id='type-id-23'/>
+ <pointer-type-def type-id='type-id-23' size-in-bits='64' id='type-id-24'/>
+ <function-decl name='nvpair_value_match_regex' mangled-name='nvpair_value_match_regex' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_match_regex'>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <parameter type-id='type-id-1' name='ai'/>
+ <parameter type-id='type-id-14' name='value'/>
+ <parameter type-id='type-id-24' name='value_regex'/>
+ <parameter type-id='type-id-15' name='ep'/>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <type-decl name='void' id='type-id-25'/>
+ <class-decl name='nvlist' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-26'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='nvl_version' type-id='type-id-3' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='32'>
+ <var-decl name='nvl_nvflag' type-id='type-id-27' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='nvl_priv' type-id='type-id-28' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='nvl_flag' type-id='type-id-27' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='160'>
+ <var-decl name='nvl_pad' type-id='type-id-3' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='__uint32_t' type-id='type-id-21' id='type-id-29'/>
+ <typedef-decl name='uint32_t' type-id='type-id-29' id='type-id-27'/>
+ <typedef-decl name='__uint64_t' type-id='type-id-18' id='type-id-30'/>
+ <typedef-decl name='uint64_t' type-id='type-id-30' id='type-id-28'/>
+ <typedef-decl name='nvlist_t' type-id='type-id-26' id='type-id-31'/>
+ <pointer-type-def type-id='type-id-31' size-in-bits='64' id='type-id-32'/>
+ <function-decl name='dump_nvlist' mangled-name='dump_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='dump_nvlist'>
+ <parameter type-id='type-id-32' name='list'/>
+ <parameter type-id='type-id-1' name='indent'/>
+ <return type-id='type-id-25'/>
+ </function-decl>
+ <class-decl name='nvlist_prtctl' size-in-bits='576' is-struct='yes' visibility='default' id='type-id-33'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='nvprt_fp' type-id='type-id-34' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='nvprt_indent_mode' type-id='type-id-4' visibility='default'/>
+ <var-decl name='nvprt_indent_mode' type-id='type-id-35' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='96'>
- <var-decl name='nvprt_indent' type-id='type-id-5' visibility='default'/>
+ <var-decl name='nvprt_indent' type-id='type-id-1' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='nvprt_indentinc' type-id='type-id-5' visibility='default'/>
+ <var-decl name='nvprt_indentinc' type-id='type-id-1' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='nvprt_nmfmt' type-id='type-id-6' visibility='default'/>
+ <var-decl name='nvprt_nmfmt' type-id='type-id-36' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='nvprt_eomfmt' type-id='type-id-6' visibility='default'/>
+ <var-decl name='nvprt_eomfmt' type-id='type-id-36' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='nvprt_btwnarrfmt' type-id='type-id-6' visibility='default'/>
+ <var-decl name='nvprt_btwnarrfmt' type-id='type-id-36' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='nvprt_btwnarrfmt_nl' type-id='type-id-5' visibility='default'/>
+ <var-decl name='nvprt_btwnarrfmt_nl' type-id='type-id-1' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='nvprt_dfltops' type-id='type-id-7' visibility='default'/>
+ <var-decl name='nvprt_dfltops' type-id='type-id-37' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='nvprt_custops' type-id='type-id-7' visibility='default'/>
+ <var-decl name='nvprt_custops' type-id='type-id-37' visibility='default'/>
</data-member>
</class-decl>
- <class-decl name='_IO_FILE' size-in-bits='1728' is-struct='yes' visibility='default' id='type-id-8'>
+ <class-decl name='_IO_FILE' size-in-bits='1728' is-struct='yes' visibility='default' id='type-id-38'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='_flags' type-id='type-id-5' visibility='default'/>
+ <var-decl name='_flags' type-id='type-id-1' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='_IO_read_ptr' type-id='type-id-9' visibility='default'/>
+ <var-decl name='_IO_read_ptr' type-id='type-id-14' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='_IO_read_end' type-id='type-id-9' visibility='default'/>
+ <var-decl name='_IO_read_end' type-id='type-id-14' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='_IO_read_base' type-id='type-id-9' visibility='default'/>
+ <var-decl name='_IO_read_base' type-id='type-id-14' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='_IO_write_base' type-id='type-id-9' visibility='default'/>
+ <var-decl name='_IO_write_base' type-id='type-id-14' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='_IO_write_ptr' type-id='type-id-9' visibility='default'/>
+ <var-decl name='_IO_write_ptr' type-id='type-id-14' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='_IO_write_end' type-id='type-id-9' visibility='default'/>
+ <var-decl name='_IO_write_end' type-id='type-id-14' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='_IO_buf_base' type-id='type-id-9' visibility='default'/>
+ <var-decl name='_IO_buf_base' type-id='type-id-14' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='_IO_buf_end' type-id='type-id-9' visibility='default'/>
+ <var-decl name='_IO_buf_end' type-id='type-id-14' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='576'>
- <var-decl name='_IO_save_base' type-id='type-id-9' visibility='default'/>
+ <var-decl name='_IO_save_base' type-id='type-id-14' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='640'>
- <var-decl name='_IO_backup_base' type-id='type-id-9' visibility='default'/>
+ <var-decl name='_IO_backup_base' type-id='type-id-14' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='704'>
- <var-decl name='_IO_save_end' type-id='type-id-9' visibility='default'/>
+ <var-decl name='_IO_save_end' type-id='type-id-14' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='768'>
- <var-decl name='_markers' type-id='type-id-10' visibility='default'/>
+ <var-decl name='_markers' type-id='type-id-39' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='832'>
- <var-decl name='_chain' type-id='type-id-11' visibility='default'/>
+ <var-decl name='_chain' type-id='type-id-40' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='896'>
- <var-decl name='_fileno' type-id='type-id-5' visibility='default'/>
+ <var-decl name='_fileno' type-id='type-id-1' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='928'>
- <var-decl name='_flags2' type-id='type-id-5' visibility='default'/>
+ <var-decl name='_flags2' type-id='type-id-1' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='960'>
- <var-decl name='_old_offset' type-id='type-id-12' visibility='default'/>
+ <var-decl name='_old_offset' type-id='type-id-41' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1024'>
- <var-decl name='_cur_column' type-id='type-id-13' visibility='default'/>
+ <var-decl name='_cur_column' type-id='type-id-42' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1040'>
- <var-decl name='_vtable_offset' type-id='type-id-14' visibility='default'/>
+ <var-decl name='_vtable_offset' type-id='type-id-43' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1048'>
- <var-decl name='_shortbuf' type-id='type-id-15' visibility='default'/>
+ <var-decl name='_shortbuf' type-id='type-id-44' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1152'>
- <var-decl name='_offset' type-id='type-id-16' visibility='default'/>
+ <var-decl name='_offset' type-id='type-id-45' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1216'>
- <var-decl name='_codecvt' type-id='type-id-17' visibility='default'/>
+ <var-decl name='__pad1' type-id='type-id-46' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1280'>
- <var-decl name='_wide_data' type-id='type-id-18' visibility='default'/>
+ <var-decl name='__pad2' type-id='type-id-46' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1344'>
- <var-decl name='_freeres_list' type-id='type-id-11' visibility='default'/>
+ <var-decl name='__pad3' type-id='type-id-46' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1408'>
- <var-decl name='_freeres_buf' type-id='type-id-19' visibility='default'/>
+ <var-decl name='__pad4' type-id='type-id-46' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1472'>
<var-decl name='__pad5' type-id='type-id-20' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1536'>
- <var-decl name='_mode' type-id='type-id-5' visibility='default'/>
+ <var-decl name='_mode' type-id='type-id-1' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1568'>
- <var-decl name='_unused2' type-id='type-id-21' visibility='default'/>
+ <var-decl name='_unused2' type-id='type-id-47' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='_IO_marker' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-48'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='_next' type-id='type-id-39' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='_sbuf' type-id='type-id-40' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='_pos' type-id='type-id-1' visibility='default'/>
</data-member>
</class-decl>
- <type-decl name='int' size-in-bits='32' id='type-id-5'/>
- <type-decl name='char' size-in-bits='8' id='type-id-22'/>
- <pointer-type-def type-id='type-id-22' size-in-bits='64' id='type-id-9'/>
- <class-decl name='_IO_marker' is-struct='yes' visibility='default' is-declaration-only='yes' id='type-id-23'/>
- <pointer-type-def type-id='type-id-23' size-in-bits='64' id='type-id-10'/>
- <pointer-type-def type-id='type-id-8' size-in-bits='64' id='type-id-11'/>
- <type-decl name='long int' size-in-bits='64' id='type-id-24'/>
- <typedef-decl name='__off_t' type-id='type-id-24' id='type-id-12'/>
- <type-decl name='unsigned short int' size-in-bits='16' id='type-id-13'/>
- <type-decl name='signed char' size-in-bits='8' id='type-id-14'/>
- <type-decl name='__ARRAY_SIZE_TYPE__' size-in-bits='64' id='type-id-25'/>
+ <pointer-type-def type-id='type-id-48' size-in-bits='64' id='type-id-39'/>
+ <pointer-type-def type-id='type-id-38' size-in-bits='64' id='type-id-40'/>
+ <type-decl name='long int' size-in-bits='64' id='type-id-49'/>
+ <typedef-decl name='__off_t' type-id='type-id-49' id='type-id-41'/>
+ <type-decl name='unsigned short int' size-in-bits='16' id='type-id-42'/>
+ <type-decl name='signed char' size-in-bits='8' id='type-id-43'/>
- <array-type-def dimensions='1' type-id='type-id-22' size-in-bits='8' id='type-id-15'>
- <subrange length='1' type-id='type-id-25' id='type-id-26'/>
+ <array-type-def dimensions='1' type-id='type-id-13' size-in-bits='8' id='type-id-44'>
+ <subrange length='1' type-id='type-id-18' id='type-id-50'/>
</array-type-def>
- <typedef-decl name='__off64_t' type-id='type-id-24' id='type-id-16'/>
- <class-decl name='_IO_codecvt' is-struct='yes' visibility='default' is-declaration-only='yes' id='type-id-27'/>
- <pointer-type-def type-id='type-id-27' size-in-bits='64' id='type-id-17'/>
- <class-decl name='_IO_wide_data' is-struct='yes' visibility='default' is-declaration-only='yes' id='type-id-28'/>
- <pointer-type-def type-id='type-id-28' size-in-bits='64' id='type-id-18'/>
- <pointer-type-def type-id='type-id-1' size-in-bits='64' id='type-id-19'/>
- <type-decl name='unsigned long int' size-in-bits='64' id='type-id-29'/>
- <typedef-decl name='size_t' type-id='type-id-29' id='type-id-20'/>
+ <typedef-decl name='__off64_t' type-id='type-id-49' id='type-id-45'/>
+ <pointer-type-def type-id='type-id-25' size-in-bits='64' id='type-id-46'/>
- <array-type-def dimensions='1' type-id='type-id-22' size-in-bits='160' id='type-id-21'>
- <subrange length='20' type-id='type-id-25' id='type-id-30'/>
+ <array-type-def dimensions='1' type-id='type-id-13' size-in-bits='160' id='type-id-47'>
+ <subrange length='20' type-id='type-id-18' id='type-id-51'/>
</array-type-def>
- <typedef-decl name='FILE' type-id='type-id-8' id='type-id-31'/>
- <pointer-type-def type-id='type-id-31' size-in-bits='64' id='type-id-3'/>
- <type-decl name='unnamed-enum-underlying-type' is-anonymous='yes' size-in-bits='32' alignment-in-bits='32' id='type-id-32'/>
- <enum-decl name='nvlist_indent_mode' id='type-id-4'>
- <underlying-type type-id='type-id-32'/>
+ <typedef-decl name='FILE' type-id='type-id-38' id='type-id-52'/>
+ <pointer-type-def type-id='type-id-52' size-in-bits='64' id='type-id-34'/>
+ <enum-decl name='nvlist_indent_mode' id='type-id-35'>
+ <underlying-type type-id='type-id-9'/>
<enumerator name='NVLIST_INDENT_ABS' value='0'/>
<enumerator name='NVLIST_INDENT_TABBED' value='1'/>
</enum-decl>
- <qualified-type-def type-id='type-id-22' const='yes' id='type-id-33'/>
- <pointer-type-def type-id='type-id-33' size-in-bits='64' id='type-id-6'/>
- <class-decl name='nvlist_printops' size-in-bits='3456' is-struct='yes' visibility='default' id='type-id-34'>
+ <qualified-type-def type-id='type-id-13' const='yes' id='type-id-53'/>
+ <pointer-type-def type-id='type-id-53' size-in-bits='64' id='type-id-36'/>
+ <class-decl name='nvlist_printops' size-in-bits='3456' is-struct='yes' visibility='default' id='type-id-54'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='print_boolean' type-id='type-id-35' visibility='default'/>
+ <var-decl name='print_boolean' type-id='type-id-55' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='print_boolean_value' type-id='type-id-36' visibility='default'/>
+ <var-decl name='print_boolean_value' type-id='type-id-56' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='print_byte' type-id='type-id-37' visibility='default'/>
+ <var-decl name='print_byte' type-id='type-id-57' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='print_int8' type-id='type-id-38' visibility='default'/>
+ <var-decl name='print_int8' type-id='type-id-58' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='print_uint8' type-id='type-id-39' visibility='default'/>
+ <var-decl name='print_uint8' type-id='type-id-59' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='640'>
- <var-decl name='print_int16' type-id='type-id-40' visibility='default'/>
+ <var-decl name='print_int16' type-id='type-id-60' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='768'>
- <var-decl name='print_uint16' type-id='type-id-41' visibility='default'/>
+ <var-decl name='print_uint16' type-id='type-id-61' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='896'>
- <var-decl name='print_int32' type-id='type-id-42' visibility='default'/>
+ <var-decl name='print_int32' type-id='type-id-62' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1024'>
- <var-decl name='print_uint32' type-id='type-id-43' visibility='default'/>
+ <var-decl name='print_uint32' type-id='type-id-63' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1152'>
- <var-decl name='print_int64' type-id='type-id-44' visibility='default'/>
+ <var-decl name='print_int64' type-id='type-id-64' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1280'>
- <var-decl name='print_uint64' type-id='type-id-45' visibility='default'/>
+ <var-decl name='print_uint64' type-id='type-id-65' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1408'>
- <var-decl name='print_double' type-id='type-id-46' visibility='default'/>
+ <var-decl name='print_double' type-id='type-id-66' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1536'>
- <var-decl name='print_string' type-id='type-id-47' visibility='default'/>
+ <var-decl name='print_string' type-id='type-id-67' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1664'>
- <var-decl name='print_hrtime' type-id='type-id-48' visibility='default'/>
+ <var-decl name='print_hrtime' type-id='type-id-68' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1792'>
- <var-decl name='print_nvlist' type-id='type-id-49' visibility='default'/>
+ <var-decl name='print_nvlist' type-id='type-id-69' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1920'>
- <var-decl name='print_boolean_array' type-id='type-id-50' visibility='default'/>
+ <var-decl name='print_boolean_array' type-id='type-id-70' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2048'>
- <var-decl name='print_byte_array' type-id='type-id-51' visibility='default'/>
+ <var-decl name='print_byte_array' type-id='type-id-71' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2176'>
- <var-decl name='print_int8_array' type-id='type-id-52' visibility='default'/>
+ <var-decl name='print_int8_array' type-id='type-id-72' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2304'>
- <var-decl name='print_uint8_array' type-id='type-id-53' visibility='default'/>
+ <var-decl name='print_uint8_array' type-id='type-id-73' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2432'>
- <var-decl name='print_int16_array' type-id='type-id-54' visibility='default'/>
+ <var-decl name='print_int16_array' type-id='type-id-74' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2560'>
- <var-decl name='print_uint16_array' type-id='type-id-55' visibility='default'/>
+ <var-decl name='print_uint16_array' type-id='type-id-75' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2688'>
- <var-decl name='print_int32_array' type-id='type-id-56' visibility='default'/>
+ <var-decl name='print_int32_array' type-id='type-id-76' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2816'>
- <var-decl name='print_uint32_array' type-id='type-id-57' visibility='default'/>
+ <var-decl name='print_uint32_array' type-id='type-id-77' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2944'>
- <var-decl name='print_int64_array' type-id='type-id-58' visibility='default'/>
+ <var-decl name='print_int64_array' type-id='type-id-78' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='3072'>
- <var-decl name='print_uint64_array' type-id='type-id-59' visibility='default'/>
+ <var-decl name='print_uint64_array' type-id='type-id-79' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='3200'>
- <var-decl name='print_string_array' type-id='type-id-60' visibility='default'/>
+ <var-decl name='print_string_array' type-id='type-id-80' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='3328'>
- <var-decl name='print_nvlist_array' type-id='type-id-61' visibility='default'/>
- </data-member>
- </class-decl>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-35'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='op' type-id='type-id-62' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-19' visibility='default'/>
+ <var-decl name='print_nvlist_array' type-id='type-id-81' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-2' size-in-bits='64' id='type-id-63'/>
- <class-decl name='nvlist' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-64'>
+ <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-55'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='nvl_version' type-id='type-id-65' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='nvl_nvflag' type-id='type-id-66' visibility='default'/>
+ <var-decl name='op' type-id='type-id-82' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='nvl_priv' type-id='type-id-67' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='nvl_flag' type-id='type-id-66' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='160'>
- <var-decl name='nvl_pad' type-id='type-id-65' visibility='default'/>
+ <var-decl name='arg' type-id='type-id-46' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='__int32_t' type-id='type-id-5' id='type-id-68'/>
- <typedef-decl name='int32_t' type-id='type-id-68' id='type-id-65'/>
- <type-decl name='unsigned int' size-in-bits='32' id='type-id-69'/>
- <typedef-decl name='__uint32_t' type-id='type-id-69' id='type-id-70'/>
- <typedef-decl name='uint32_t' type-id='type-id-70' id='type-id-66'/>
- <typedef-decl name='__uint64_t' type-id='type-id-29' id='type-id-71'/>
- <typedef-decl name='uint64_t' type-id='type-id-71' id='type-id-67'/>
- <typedef-decl name='nvlist_t' type-id='type-id-64' id='type-id-72'/>
- <pointer-type-def type-id='type-id-72' size-in-bits='64' id='type-id-73'/>
- <pointer-type-def type-id='type-id-74' size-in-bits='64' id='type-id-62'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-36'>
+ <pointer-type-def type-id='type-id-33' size-in-bits='64' id='type-id-83'/>
+ <pointer-type-def type-id='type-id-84' size-in-bits='64' id='type-id-82'/>
+ <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-56'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='op' type-id='type-id-75' visibility='default'/>
+ <var-decl name='op' type-id='type-id-85' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-19' visibility='default'/>
+ <var-decl name='arg' type-id='type-id-46' visibility='default'/>
</data-member>
</class-decl>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-76'>
- <underlying-type type-id='type-id-32'/>
+ <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-86'>
+ <underlying-type type-id='type-id-9'/>
<enumerator name='B_FALSE' value='0'/>
<enumerator name='B_TRUE' value='1'/>
</enum-decl>
- <typedef-decl name='boolean_t' type-id='type-id-76' id='type-id-77'/>
- <pointer-type-def type-id='type-id-78' size-in-bits='64' id='type-id-75'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-37'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='op' type-id='type-id-79' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-19' visibility='default'/>
- </data-member>
- </class-decl>
- <type-decl name='unsigned char' size-in-bits='8' id='type-id-80'/>
- <typedef-decl name='uchar_t' type-id='type-id-80' id='type-id-81'/>
- <pointer-type-def type-id='type-id-82' size-in-bits='64' id='type-id-79'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-38'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='op' type-id='type-id-83' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-19' visibility='default'/>
- </data-member>
- </class-decl>
- <typedef-decl name='__int8_t' type-id='type-id-14' id='type-id-84'/>
- <typedef-decl name='int8_t' type-id='type-id-84' id='type-id-85'/>
- <pointer-type-def type-id='type-id-86' size-in-bits='64' id='type-id-83'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-39'>
+ <typedef-decl name='boolean_t' type-id='type-id-86' id='type-id-87'/>
+ <pointer-type-def type-id='type-id-88' size-in-bits='64' id='type-id-85'/>
+ <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-57'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='op' type-id='type-id-87' visibility='default'/>
+ <var-decl name='op' type-id='type-id-89' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-19' visibility='default'/>
+ <var-decl name='arg' type-id='type-id-46' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='__uint8_t' type-id='type-id-80' id='type-id-88'/>
- <typedef-decl name='uint8_t' type-id='type-id-88' id='type-id-89'/>
- <pointer-type-def type-id='type-id-90' size-in-bits='64' id='type-id-87'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-40'>
+ <typedef-decl name='uchar_t' type-id='type-id-22' id='type-id-90'/>
+ <pointer-type-def type-id='type-id-91' size-in-bits='64' id='type-id-89'/>
+ <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-58'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='op' type-id='type-id-91' visibility='default'/>
+ <var-decl name='op' type-id='type-id-92' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-19' visibility='default'/>
+ <var-decl name='arg' type-id='type-id-46' visibility='default'/>
</data-member>
</class-decl>
- <type-decl name='short int' size-in-bits='16' id='type-id-92'/>
- <typedef-decl name='__int16_t' type-id='type-id-92' id='type-id-93'/>
- <typedef-decl name='int16_t' type-id='type-id-93' id='type-id-94'/>
- <pointer-type-def type-id='type-id-95' size-in-bits='64' id='type-id-91'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-41'>
+ <typedef-decl name='__int8_t' type-id='type-id-43' id='type-id-93'/>
+ <typedef-decl name='int8_t' type-id='type-id-93' id='type-id-94'/>
+ <pointer-type-def type-id='type-id-95' size-in-bits='64' id='type-id-92'/>
+ <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-59'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='op' type-id='type-id-96' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-19' visibility='default'/>
+ <var-decl name='arg' type-id='type-id-46' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='__uint16_t' type-id='type-id-13' id='type-id-97'/>
- <typedef-decl name='uint16_t' type-id='type-id-97' id='type-id-98'/>
+ <typedef-decl name='__uint8_t' type-id='type-id-22' id='type-id-97'/>
+ <typedef-decl name='uint8_t' type-id='type-id-97' id='type-id-98'/>
<pointer-type-def type-id='type-id-99' size-in-bits='64' id='type-id-96'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-42'>
+ <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-60'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='op' type-id='type-id-100' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-19' visibility='default'/>
+ <var-decl name='arg' type-id='type-id-46' visibility='default'/>
</data-member>
</class-decl>
<pointer-type-def type-id='type-id-101' size-in-bits='64' id='type-id-100'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-43'>
+ <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-61'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='op' type-id='type-id-102' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-19' visibility='default'/>
+ <var-decl name='arg' type-id='type-id-46' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-103' size-in-bits='64' id='type-id-102'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-44'>
+ <typedef-decl name='__uint16_t' type-id='type-id-42' id='type-id-103'/>
+ <typedef-decl name='uint16_t' type-id='type-id-103' id='type-id-104'/>
+ <pointer-type-def type-id='type-id-105' size-in-bits='64' id='type-id-102'/>
+ <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-62'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='op' type-id='type-id-104' visibility='default'/>
+ <var-decl name='op' type-id='type-id-106' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-19' visibility='default'/>
+ <var-decl name='arg' type-id='type-id-46' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='__int64_t' type-id='type-id-24' id='type-id-105'/>
- <typedef-decl name='int64_t' type-id='type-id-105' id='type-id-106'/>
- <pointer-type-def type-id='type-id-107' size-in-bits='64' id='type-id-104'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-45'>
+ <pointer-type-def type-id='type-id-107' size-in-bits='64' id='type-id-106'/>
+ <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-63'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='op' type-id='type-id-108' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-19' visibility='default'/>
+ <var-decl name='arg' type-id='type-id-46' visibility='default'/>
</data-member>
</class-decl>
<pointer-type-def type-id='type-id-109' size-in-bits='64' id='type-id-108'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-46'>
+ <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-64'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='op' type-id='type-id-110' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-19' visibility='default'/>
+ <var-decl name='arg' type-id='type-id-46' visibility='default'/>
</data-member>
</class-decl>
- <type-decl name='double' size-in-bits='64' id='type-id-111'/>
- <pointer-type-def type-id='type-id-112' size-in-bits='64' id='type-id-110'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-47'>
+ <typedef-decl name='__int64_t' type-id='type-id-49' id='type-id-111'/>
+ <typedef-decl name='int64_t' type-id='type-id-111' id='type-id-112'/>
+ <pointer-type-def type-id='type-id-113' size-in-bits='64' id='type-id-110'/>
+ <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-65'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='op' type-id='type-id-113' visibility='default'/>
+ <var-decl name='op' type-id='type-id-114' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-19' visibility='default'/>
+ <var-decl name='arg' type-id='type-id-46' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-114' size-in-bits='64' id='type-id-113'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-48'>
+ <pointer-type-def type-id='type-id-115' size-in-bits='64' id='type-id-114'/>
+ <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-66'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='op' type-id='type-id-115' visibility='default'/>
+ <var-decl name='op' type-id='type-id-116' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-19' visibility='default'/>
+ <var-decl name='arg' type-id='type-id-46' visibility='default'/>
</data-member>
</class-decl>
- <type-decl name='long long int' size-in-bits='64' id='type-id-116'/>
- <typedef-decl name='hrtime_t' type-id='type-id-116' id='type-id-117'/>
- <pointer-type-def type-id='type-id-118' size-in-bits='64' id='type-id-115'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-49'>
+ <type-decl name='double' size-in-bits='64' id='type-id-117'/>
+ <pointer-type-def type-id='type-id-118' size-in-bits='64' id='type-id-116'/>
+ <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-67'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='op' type-id='type-id-119' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-19' visibility='default'/>
+ <var-decl name='arg' type-id='type-id-46' visibility='default'/>
</data-member>
</class-decl>
<pointer-type-def type-id='type-id-120' size-in-bits='64' id='type-id-119'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-50'>
+ <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-68'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='op' type-id='type-id-121' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-19' visibility='default'/>
+ <var-decl name='arg' type-id='type-id-46' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-77' size-in-bits='64' id='type-id-122'/>
- <typedef-decl name='uint_t' type-id='type-id-69' id='type-id-123'/>
+ <type-decl name='long long int' size-in-bits='64' id='type-id-122'/>
+ <typedef-decl name='hrtime_t' type-id='type-id-122' id='type-id-123'/>
<pointer-type-def type-id='type-id-124' size-in-bits='64' id='type-id-121'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-51'>
+ <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-69'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='op' type-id='type-id-125' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-19' visibility='default'/>
+ <var-decl name='arg' type-id='type-id-46' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-81' size-in-bits='64' id='type-id-126'/>
- <pointer-type-def type-id='type-id-127' size-in-bits='64' id='type-id-125'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-52'>
+ <pointer-type-def type-id='type-id-126' size-in-bits='64' id='type-id-125'/>
+ <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-70'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='op' type-id='type-id-128' visibility='default'/>
+ <var-decl name='op' type-id='type-id-127' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-19' visibility='default'/>
+ <var-decl name='arg' type-id='type-id-46' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-85' size-in-bits='64' id='type-id-129'/>
- <pointer-type-def type-id='type-id-130' size-in-bits='64' id='type-id-128'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-53'>
+ <pointer-type-def type-id='type-id-87' size-in-bits='64' id='type-id-128'/>
+ <typedef-decl name='uint_t' type-id='type-id-21' id='type-id-129'/>
+ <pointer-type-def type-id='type-id-130' size-in-bits='64' id='type-id-127'/>
+ <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-71'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='op' type-id='type-id-131' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-19' visibility='default'/>
+ <var-decl name='arg' type-id='type-id-46' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-89' size-in-bits='64' id='type-id-132'/>
+ <pointer-type-def type-id='type-id-90' size-in-bits='64' id='type-id-132'/>
<pointer-type-def type-id='type-id-133' size-in-bits='64' id='type-id-131'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-54'>
+ <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-72'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='op' type-id='type-id-134' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-19' visibility='default'/>
+ <var-decl name='arg' type-id='type-id-46' visibility='default'/>
</data-member>
</class-decl>
<pointer-type-def type-id='type-id-94' size-in-bits='64' id='type-id-135'/>
<pointer-type-def type-id='type-id-136' size-in-bits='64' id='type-id-134'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-55'>
+ <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-73'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='op' type-id='type-id-137' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-19' visibility='default'/>
+ <var-decl name='arg' type-id='type-id-46' visibility='default'/>
</data-member>
</class-decl>
<pointer-type-def type-id='type-id-98' size-in-bits='64' id='type-id-138'/>
<pointer-type-def type-id='type-id-139' size-in-bits='64' id='type-id-137'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-56'>
+ <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-74'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='op' type-id='type-id-140' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-19' visibility='default'/>
+ <var-decl name='arg' type-id='type-id-46' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-65' size-in-bits='64' id='type-id-141'/>
+ <pointer-type-def type-id='type-id-4' size-in-bits='64' id='type-id-141'/>
<pointer-type-def type-id='type-id-142' size-in-bits='64' id='type-id-140'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-57'>
+ <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-75'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='op' type-id='type-id-143' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-19' visibility='default'/>
+ <var-decl name='arg' type-id='type-id-46' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-66' size-in-bits='64' id='type-id-144'/>
+ <pointer-type-def type-id='type-id-104' size-in-bits='64' id='type-id-144'/>
<pointer-type-def type-id='type-id-145' size-in-bits='64' id='type-id-143'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-58'>
+ <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-76'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='op' type-id='type-id-146' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-19' visibility='default'/>
+ <var-decl name='arg' type-id='type-id-46' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-106' size-in-bits='64' id='type-id-147'/>
+ <pointer-type-def type-id='type-id-3' size-in-bits='64' id='type-id-147'/>
<pointer-type-def type-id='type-id-148' size-in-bits='64' id='type-id-146'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-59'>
+ <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-77'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='op' type-id='type-id-149' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-19' visibility='default'/>
+ <var-decl name='arg' type-id='type-id-46' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-67' size-in-bits='64' id='type-id-150'/>
+ <pointer-type-def type-id='type-id-27' size-in-bits='64' id='type-id-150'/>
<pointer-type-def type-id='type-id-151' size-in-bits='64' id='type-id-149'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-60'>
+ <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-78'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='op' type-id='type-id-152' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-19' visibility='default'/>
+ <var-decl name='arg' type-id='type-id-46' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-9' size-in-bits='64' id='type-id-153'/>
+ <pointer-type-def type-id='type-id-112' size-in-bits='64' id='type-id-153'/>
<pointer-type-def type-id='type-id-154' size-in-bits='64' id='type-id-152'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-61'>
+ <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-79'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='op' type-id='type-id-155' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-19' visibility='default'/>
+ <var-decl name='arg' type-id='type-id-46' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-73' size-in-bits='64' id='type-id-156'/>
+ <pointer-type-def type-id='type-id-28' size-in-bits='64' id='type-id-156'/>
<pointer-type-def type-id='type-id-157' size-in-bits='64' id='type-id-155'/>
- <pointer-type-def type-id='type-id-34' size-in-bits='64' id='type-id-7'/>
- <typedef-decl name='nvlist_prtctl_t' type-id='type-id-63' id='type-id-158'/>
- <function-decl name='nvlist_prtctl_setdest' mangled-name='nvlist_prtctl_setdest' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctl_setdest'>
- <parameter type-id='type-id-158' name='pctl'/>
- <parameter type-id='type-id-3' name='fp'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_prtctl_getdest' mangled-name='nvlist_prtctl_getdest' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctl_getdest'>
- <parameter type-id='type-id-158' name='pctl'/>
- <return type-id='type-id-3'/>
- </function-decl>
- <function-decl name='nvlist_prtctl_setindent' mangled-name='nvlist_prtctl_setindent' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctl_setindent'>
- <parameter type-id='type-id-158' name='pctl'/>
- <parameter type-id='type-id-4' name='mode'/>
- <parameter type-id='type-id-5' name='start'/>
- <parameter type-id='type-id-5' name='inc'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_prtctl_doindent' mangled-name='nvlist_prtctl_doindent' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctl_doindent'>
- <parameter type-id='type-id-158' name='pctl'/>
- <parameter type-id='type-id-5' name='onemore'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <enum-decl name='nvlist_prtctl_fmt' id='type-id-159'>
- <underlying-type type-id='type-id-32'/>
- <enumerator name='NVLIST_FMT_MEMBER_NAME' value='0'/>
- <enumerator name='NVLIST_FMT_MEMBER_POSTAMBLE' value='1'/>
- <enumerator name='NVLIST_FMT_BTWN_ARRAY' value='2'/>
- </enum-decl>
- <function-decl name='nvlist_prtctl_setfmt' mangled-name='nvlist_prtctl_setfmt' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctl_setfmt'>
- <parameter type-id='type-id-158' name='pctl'/>
- <parameter type-id='type-id-159' name='which'/>
- <parameter type-id='type-id-6' name='fmt'/>
- <return type-id='type-id-1'/>
+ <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-80'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='type-id-158' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='type-id-46' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <pointer-type-def type-id='type-id-159' size-in-bits='64' id='type-id-158'/>
+ <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-81'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='type-id-160' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='type-id-46' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <pointer-type-def type-id='type-id-32' size-in-bits='64' id='type-id-161'/>
+ <pointer-type-def type-id='type-id-162' size-in-bits='64' id='type-id-160'/>
+ <pointer-type-def type-id='type-id-54' size-in-bits='64' id='type-id-37'/>
+ <typedef-decl name='nvlist_prtctl_t' type-id='type-id-83' id='type-id-163'/>
+ <function-decl name='nvlist_prt' mangled-name='nvlist_prt' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prt'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-163' name='pctl'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvlist_prtctl_dofmt' mangled-name='nvlist_prtctl_dofmt' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctl_dofmt'>
- <parameter type-id='type-id-158' name='pctl'/>
- <parameter type-id='type-id-159' name='which'/>
- <parameter is-variadic='yes'/>
- <return type-id='type-id-1'/>
+ <function-decl name='nvlist_print' mangled-name='nvlist_print' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_print'>
+ <parameter type-id='type-id-34' name='fp'/>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <pointer-type-def type-id='type-id-160' size-in-bits='64' id='type-id-161'/>
- <function-decl name='nvlist_prtctlop_boolean' mangled-name='nvlist_prtctlop_boolean' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_boolean'>
- <parameter type-id='type-id-158' name='pctl'/>
- <parameter type-id='type-id-161' name='func'/>
- <parameter type-id='type-id-19' name='private'/>
- <return type-id='type-id-1'/>
+ <function-decl name='nvlist_prtctl_free' mangled-name='nvlist_prtctl_free' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctl_free'>
+ <parameter type-id='type-id-163' name='pctl'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <pointer-type-def type-id='type-id-162' size-in-bits='64' id='type-id-163'/>
- <function-decl name='nvlist_prtctlop_boolean_value' mangled-name='nvlist_prtctlop_boolean_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_boolean_value'>
- <parameter type-id='type-id-158' name='pctl'/>
- <parameter type-id='type-id-163' name='func'/>
- <parameter type-id='type-id-19' name='private'/>
- <return type-id='type-id-1'/>
+ <function-decl name='nvlist_prtctl_alloc' mangled-name='nvlist_prtctl_alloc' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctl_alloc'>
+ <return type-id='type-id-163'/>
</function-decl>
<pointer-type-def type-id='type-id-164' size-in-bits='64' id='type-id-165'/>
- <function-decl name='nvlist_prtctlop_byte' mangled-name='nvlist_prtctlop_byte' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_byte'>
- <parameter type-id='type-id-158' name='pctl'/>
+ <function-decl name='nvlist_prtctlop_nvlist_array' mangled-name='nvlist_prtctlop_nvlist_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_nvlist_array'>
+ <parameter type-id='type-id-163' name='pctl'/>
<parameter type-id='type-id-165' name='func'/>
- <parameter type-id='type-id-19' name='private'/>
- <return type-id='type-id-1'/>
+ <parameter type-id='type-id-46' name='private'/>
+ <return type-id='type-id-25'/>
</function-decl>
<pointer-type-def type-id='type-id-166' size-in-bits='64' id='type-id-167'/>
- <function-decl name='nvlist_prtctlop_int8' mangled-name='nvlist_prtctlop_int8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_int8'>
- <parameter type-id='type-id-158' name='pctl'/>
+ <function-decl name='nvlist_prtctlop_string_array' mangled-name='nvlist_prtctlop_string_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_string_array'>
+ <parameter type-id='type-id-163' name='pctl'/>
<parameter type-id='type-id-167' name='func'/>
- <parameter type-id='type-id-19' name='private'/>
- <return type-id='type-id-1'/>
+ <parameter type-id='type-id-46' name='private'/>
+ <return type-id='type-id-25'/>
</function-decl>
<pointer-type-def type-id='type-id-168' size-in-bits='64' id='type-id-169'/>
- <function-decl name='nvlist_prtctlop_uint8' mangled-name='nvlist_prtctlop_uint8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_uint8'>
- <parameter type-id='type-id-158' name='pctl'/>
+ <function-decl name='nvlist_prtctlop_uint64_array' mangled-name='nvlist_prtctlop_uint64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_uint64_array'>
+ <parameter type-id='type-id-163' name='pctl'/>
<parameter type-id='type-id-169' name='func'/>
- <parameter type-id='type-id-19' name='private'/>
- <return type-id='type-id-1'/>
+ <parameter type-id='type-id-46' name='private'/>
+ <return type-id='type-id-25'/>
</function-decl>
<pointer-type-def type-id='type-id-170' size-in-bits='64' id='type-id-171'/>
- <function-decl name='nvlist_prtctlop_int16' mangled-name='nvlist_prtctlop_int16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_int16'>
- <parameter type-id='type-id-158' name='pctl'/>
+ <function-decl name='nvlist_prtctlop_int64_array' mangled-name='nvlist_prtctlop_int64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_int64_array'>
+ <parameter type-id='type-id-163' name='pctl'/>
<parameter type-id='type-id-171' name='func'/>
- <parameter type-id='type-id-19' name='private'/>
- <return type-id='type-id-1'/>
+ <parameter type-id='type-id-46' name='private'/>
+ <return type-id='type-id-25'/>
</function-decl>
<pointer-type-def type-id='type-id-172' size-in-bits='64' id='type-id-173'/>
- <function-decl name='nvlist_prtctlop_uint16' mangled-name='nvlist_prtctlop_uint16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_uint16'>
- <parameter type-id='type-id-158' name='pctl'/>
+ <function-decl name='nvlist_prtctlop_uint32_array' mangled-name='nvlist_prtctlop_uint32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_uint32_array'>
+ <parameter type-id='type-id-163' name='pctl'/>
<parameter type-id='type-id-173' name='func'/>
- <parameter type-id='type-id-19' name='private'/>
- <return type-id='type-id-1'/>
+ <parameter type-id='type-id-46' name='private'/>
+ <return type-id='type-id-25'/>
</function-decl>
<pointer-type-def type-id='type-id-174' size-in-bits='64' id='type-id-175'/>
- <function-decl name='nvlist_prtctlop_int32' mangled-name='nvlist_prtctlop_int32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_int32'>
- <parameter type-id='type-id-158' name='pctl'/>
+ <function-decl name='nvlist_prtctlop_int32_array' mangled-name='nvlist_prtctlop_int32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_int32_array'>
+ <parameter type-id='type-id-163' name='pctl'/>
<parameter type-id='type-id-175' name='func'/>
- <parameter type-id='type-id-19' name='private'/>
- <return type-id='type-id-1'/>
+ <parameter type-id='type-id-46' name='private'/>
+ <return type-id='type-id-25'/>
</function-decl>
<pointer-type-def type-id='type-id-176' size-in-bits='64' id='type-id-177'/>
- <function-decl name='nvlist_prtctlop_uint32' mangled-name='nvlist_prtctlop_uint32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_uint32'>
- <parameter type-id='type-id-158' name='pctl'/>
+ <function-decl name='nvlist_prtctlop_uint16_array' mangled-name='nvlist_prtctlop_uint16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_uint16_array'>
+ <parameter type-id='type-id-163' name='pctl'/>
<parameter type-id='type-id-177' name='func'/>
- <parameter type-id='type-id-19' name='private'/>
- <return type-id='type-id-1'/>
+ <parameter type-id='type-id-46' name='private'/>
+ <return type-id='type-id-25'/>
</function-decl>
<pointer-type-def type-id='type-id-178' size-in-bits='64' id='type-id-179'/>
- <function-decl name='nvlist_prtctlop_int64' mangled-name='nvlist_prtctlop_int64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_int64'>
- <parameter type-id='type-id-158' name='pctl'/>
+ <function-decl name='nvlist_prtctlop_int16_array' mangled-name='nvlist_prtctlop_int16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_int16_array'>
+ <parameter type-id='type-id-163' name='pctl'/>
<parameter type-id='type-id-179' name='func'/>
- <parameter type-id='type-id-19' name='private'/>
- <return type-id='type-id-1'/>
+ <parameter type-id='type-id-46' name='private'/>
+ <return type-id='type-id-25'/>
</function-decl>
<pointer-type-def type-id='type-id-180' size-in-bits='64' id='type-id-181'/>
- <function-decl name='nvlist_prtctlop_uint64' mangled-name='nvlist_prtctlop_uint64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_uint64'>
- <parameter type-id='type-id-158' name='pctl'/>
+ <function-decl name='nvlist_prtctlop_uint8_array' mangled-name='nvlist_prtctlop_uint8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_uint8_array'>
+ <parameter type-id='type-id-163' name='pctl'/>
<parameter type-id='type-id-181' name='func'/>
- <parameter type-id='type-id-19' name='private'/>
- <return type-id='type-id-1'/>
+ <parameter type-id='type-id-46' name='private'/>
+ <return type-id='type-id-25'/>
</function-decl>
<pointer-type-def type-id='type-id-182' size-in-bits='64' id='type-id-183'/>
- <function-decl name='nvlist_prtctlop_double' mangled-name='nvlist_prtctlop_double' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_double'>
- <parameter type-id='type-id-158' name='pctl'/>
+ <function-decl name='nvlist_prtctlop_int8_array' mangled-name='nvlist_prtctlop_int8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_int8_array'>
+ <parameter type-id='type-id-163' name='pctl'/>
<parameter type-id='type-id-183' name='func'/>
- <parameter type-id='type-id-19' name='private'/>
- <return type-id='type-id-1'/>
+ <parameter type-id='type-id-46' name='private'/>
+ <return type-id='type-id-25'/>
</function-decl>
<pointer-type-def type-id='type-id-184' size-in-bits='64' id='type-id-185'/>
- <function-decl name='nvlist_prtctlop_string' mangled-name='nvlist_prtctlop_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_string'>
- <parameter type-id='type-id-158' name='pctl'/>
+ <function-decl name='nvlist_prtctlop_byte_array' mangled-name='nvlist_prtctlop_byte_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_byte_array'>
+ <parameter type-id='type-id-163' name='pctl'/>
<parameter type-id='type-id-185' name='func'/>
- <parameter type-id='type-id-19' name='private'/>
- <return type-id='type-id-1'/>
+ <parameter type-id='type-id-46' name='private'/>
+ <return type-id='type-id-25'/>
</function-decl>
<pointer-type-def type-id='type-id-186' size-in-bits='64' id='type-id-187'/>
- <function-decl name='nvlist_prtctlop_hrtime' mangled-name='nvlist_prtctlop_hrtime' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_hrtime'>
- <parameter type-id='type-id-158' name='pctl'/>
+ <function-decl name='nvlist_prtctlop_boolean_array' mangled-name='nvlist_prtctlop_boolean_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_boolean_array'>
+ <parameter type-id='type-id-163' name='pctl'/>
<parameter type-id='type-id-187' name='func'/>
- <parameter type-id='type-id-19' name='private'/>
- <return type-id='type-id-1'/>
+ <parameter type-id='type-id-46' name='private'/>
+ <return type-id='type-id-25'/>
</function-decl>
<pointer-type-def type-id='type-id-188' size-in-bits='64' id='type-id-189'/>
<function-decl name='nvlist_prtctlop_nvlist' mangled-name='nvlist_prtctlop_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_nvlist'>
- <parameter type-id='type-id-158' name='pctl'/>
+ <parameter type-id='type-id-163' name='pctl'/>
<parameter type-id='type-id-189' name='func'/>
- <parameter type-id='type-id-19' name='private'/>
- <return type-id='type-id-1'/>
+ <parameter type-id='type-id-46' name='private'/>
+ <return type-id='type-id-25'/>
</function-decl>
<pointer-type-def type-id='type-id-190' size-in-bits='64' id='type-id-191'/>
- <function-decl name='nvlist_prtctlop_boolean_array' mangled-name='nvlist_prtctlop_boolean_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_boolean_array'>
- <parameter type-id='type-id-158' name='pctl'/>
+ <function-decl name='nvlist_prtctlop_hrtime' mangled-name='nvlist_prtctlop_hrtime' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_hrtime'>
+ <parameter type-id='type-id-163' name='pctl'/>
<parameter type-id='type-id-191' name='func'/>
- <parameter type-id='type-id-19' name='private'/>
- <return type-id='type-id-1'/>
+ <parameter type-id='type-id-46' name='private'/>
+ <return type-id='type-id-25'/>
</function-decl>
<pointer-type-def type-id='type-id-192' size-in-bits='64' id='type-id-193'/>
- <function-decl name='nvlist_prtctlop_byte_array' mangled-name='nvlist_prtctlop_byte_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_byte_array'>
- <parameter type-id='type-id-158' name='pctl'/>
+ <function-decl name='nvlist_prtctlop_string' mangled-name='nvlist_prtctlop_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_string'>
+ <parameter type-id='type-id-163' name='pctl'/>
<parameter type-id='type-id-193' name='func'/>
- <parameter type-id='type-id-19' name='private'/>
- <return type-id='type-id-1'/>
+ <parameter type-id='type-id-46' name='private'/>
+ <return type-id='type-id-25'/>
</function-decl>
<pointer-type-def type-id='type-id-194' size-in-bits='64' id='type-id-195'/>
- <function-decl name='nvlist_prtctlop_int8_array' mangled-name='nvlist_prtctlop_int8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_int8_array'>
- <parameter type-id='type-id-158' name='pctl'/>
+ <function-decl name='nvlist_prtctlop_double' mangled-name='nvlist_prtctlop_double' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_double'>
+ <parameter type-id='type-id-163' name='pctl'/>
<parameter type-id='type-id-195' name='func'/>
- <parameter type-id='type-id-19' name='private'/>
- <return type-id='type-id-1'/>
+ <parameter type-id='type-id-46' name='private'/>
+ <return type-id='type-id-25'/>
</function-decl>
<pointer-type-def type-id='type-id-196' size-in-bits='64' id='type-id-197'/>
- <function-decl name='nvlist_prtctlop_uint8_array' mangled-name='nvlist_prtctlop_uint8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_uint8_array'>
- <parameter type-id='type-id-158' name='pctl'/>
+ <function-decl name='nvlist_prtctlop_uint64' mangled-name='nvlist_prtctlop_uint64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_uint64'>
+ <parameter type-id='type-id-163' name='pctl'/>
<parameter type-id='type-id-197' name='func'/>
- <parameter type-id='type-id-19' name='private'/>
- <return type-id='type-id-1'/>
+ <parameter type-id='type-id-46' name='private'/>
+ <return type-id='type-id-25'/>
</function-decl>
<pointer-type-def type-id='type-id-198' size-in-bits='64' id='type-id-199'/>
- <function-decl name='nvlist_prtctlop_int16_array' mangled-name='nvlist_prtctlop_int16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_int16_array'>
- <parameter type-id='type-id-158' name='pctl'/>
+ <function-decl name='nvlist_prtctlop_int64' mangled-name='nvlist_prtctlop_int64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_int64'>
+ <parameter type-id='type-id-163' name='pctl'/>
<parameter type-id='type-id-199' name='func'/>
- <parameter type-id='type-id-19' name='private'/>
- <return type-id='type-id-1'/>
+ <parameter type-id='type-id-46' name='private'/>
+ <return type-id='type-id-25'/>
</function-decl>
<pointer-type-def type-id='type-id-200' size-in-bits='64' id='type-id-201'/>
- <function-decl name='nvlist_prtctlop_uint16_array' mangled-name='nvlist_prtctlop_uint16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_uint16_array'>
- <parameter type-id='type-id-158' name='pctl'/>
+ <function-decl name='nvlist_prtctlop_uint32' mangled-name='nvlist_prtctlop_uint32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_uint32'>
+ <parameter type-id='type-id-163' name='pctl'/>
<parameter type-id='type-id-201' name='func'/>
- <parameter type-id='type-id-19' name='private'/>
- <return type-id='type-id-1'/>
+ <parameter type-id='type-id-46' name='private'/>
+ <return type-id='type-id-25'/>
</function-decl>
<pointer-type-def type-id='type-id-202' size-in-bits='64' id='type-id-203'/>
- <function-decl name='nvlist_prtctlop_int32_array' mangled-name='nvlist_prtctlop_int32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_int32_array'>
- <parameter type-id='type-id-158' name='pctl'/>
+ <function-decl name='nvlist_prtctlop_int32' mangled-name='nvlist_prtctlop_int32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_int32'>
+ <parameter type-id='type-id-163' name='pctl'/>
<parameter type-id='type-id-203' name='func'/>
- <parameter type-id='type-id-19' name='private'/>
- <return type-id='type-id-1'/>
+ <parameter type-id='type-id-46' name='private'/>
+ <return type-id='type-id-25'/>
</function-decl>
<pointer-type-def type-id='type-id-204' size-in-bits='64' id='type-id-205'/>
- <function-decl name='nvlist_prtctlop_uint32_array' mangled-name='nvlist_prtctlop_uint32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_uint32_array'>
- <parameter type-id='type-id-158' name='pctl'/>
+ <function-decl name='nvlist_prtctlop_uint16' mangled-name='nvlist_prtctlop_uint16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_uint16'>
+ <parameter type-id='type-id-163' name='pctl'/>
<parameter type-id='type-id-205' name='func'/>
- <parameter type-id='type-id-19' name='private'/>
- <return type-id='type-id-1'/>
+ <parameter type-id='type-id-46' name='private'/>
+ <return type-id='type-id-25'/>
</function-decl>
<pointer-type-def type-id='type-id-206' size-in-bits='64' id='type-id-207'/>
- <function-decl name='nvlist_prtctlop_int64_array' mangled-name='nvlist_prtctlop_int64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_int64_array'>
- <parameter type-id='type-id-158' name='pctl'/>
+ <function-decl name='nvlist_prtctlop_int16' mangled-name='nvlist_prtctlop_int16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_int16'>
+ <parameter type-id='type-id-163' name='pctl'/>
<parameter type-id='type-id-207' name='func'/>
- <parameter type-id='type-id-19' name='private'/>
- <return type-id='type-id-1'/>
+ <parameter type-id='type-id-46' name='private'/>
+ <return type-id='type-id-25'/>
</function-decl>
<pointer-type-def type-id='type-id-208' size-in-bits='64' id='type-id-209'/>
- <function-decl name='nvlist_prtctlop_uint64_array' mangled-name='nvlist_prtctlop_uint64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_uint64_array'>
- <parameter type-id='type-id-158' name='pctl'/>
+ <function-decl name='nvlist_prtctlop_uint8' mangled-name='nvlist_prtctlop_uint8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_uint8'>
+ <parameter type-id='type-id-163' name='pctl'/>
<parameter type-id='type-id-209' name='func'/>
- <parameter type-id='type-id-19' name='private'/>
- <return type-id='type-id-1'/>
+ <parameter type-id='type-id-46' name='private'/>
+ <return type-id='type-id-25'/>
</function-decl>
<pointer-type-def type-id='type-id-210' size-in-bits='64' id='type-id-211'/>
- <function-decl name='nvlist_prtctlop_string_array' mangled-name='nvlist_prtctlop_string_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_string_array'>
- <parameter type-id='type-id-158' name='pctl'/>
+ <function-decl name='nvlist_prtctlop_int8' mangled-name='nvlist_prtctlop_int8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_int8'>
+ <parameter type-id='type-id-163' name='pctl'/>
<parameter type-id='type-id-211' name='func'/>
- <parameter type-id='type-id-19' name='private'/>
- <return type-id='type-id-1'/>
+ <parameter type-id='type-id-46' name='private'/>
+ <return type-id='type-id-25'/>
</function-decl>
<pointer-type-def type-id='type-id-212' size-in-bits='64' id='type-id-213'/>
- <function-decl name='nvlist_prtctlop_nvlist_array' mangled-name='nvlist_prtctlop_nvlist_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_nvlist_array'>
- <parameter type-id='type-id-158' name='pctl'/>
+ <function-decl name='nvlist_prtctlop_byte' mangled-name='nvlist_prtctlop_byte' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_byte'>
+ <parameter type-id='type-id-163' name='pctl'/>
<parameter type-id='type-id-213' name='func'/>
- <parameter type-id='type-id-19' name='private'/>
- <return type-id='type-id-1'/>
+ <parameter type-id='type-id-46' name='private'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvlist_prtctl_alloc' mangled-name='nvlist_prtctl_alloc' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctl_alloc'>
- <return type-id='type-id-158'/>
- </function-decl>
- <function-decl name='nvlist_prtctl_free' mangled-name='nvlist_prtctl_free' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctl_free'>
- <parameter type-id='type-id-158' name='pctl'/>
- <return type-id='type-id-1'/>
+ <pointer-type-def type-id='type-id-214' size-in-bits='64' id='type-id-215'/>
+ <function-decl name='nvlist_prtctlop_boolean_value' mangled-name='nvlist_prtctlop_boolean_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_boolean_value'>
+ <parameter type-id='type-id-163' name='pctl'/>
+ <parameter type-id='type-id-215' name='func'/>
+ <parameter type-id='type-id-46' name='private'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvlist_print' mangled-name='nvlist_print' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_print'>
- <parameter type-id='type-id-3' name='fp'/>
- <parameter type-id='type-id-73' name='nvl'/>
- <return type-id='type-id-1'/>
+ <pointer-type-def type-id='type-id-216' size-in-bits='64' id='type-id-217'/>
+ <function-decl name='nvlist_prtctlop_boolean' mangled-name='nvlist_prtctlop_boolean' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_boolean'>
+ <parameter type-id='type-id-163' name='pctl'/>
+ <parameter type-id='type-id-217' name='func'/>
+ <parameter type-id='type-id-46' name='private'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <class-decl name='nvpair' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-214'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='nvp_size' type-id='type-id-65' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='nvp_name_sz' type-id='type-id-94' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='48'>
- <var-decl name='nvp_reserve' type-id='type-id-94' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='nvp_value_elem' type-id='type-id-65' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='96'>
- <var-decl name='nvp_type' type-id='type-id-215' visibility='default'/>
- </data-member>
- </class-decl>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-216'>
- <underlying-type type-id='type-id-32'/>
- <enumerator name='DATA_TYPE_DONTCARE' value='-1'/>
- <enumerator name='DATA_TYPE_UNKNOWN' value='0'/>
- <enumerator name='DATA_TYPE_BOOLEAN' value='1'/>
- <enumerator name='DATA_TYPE_BYTE' value='2'/>
- <enumerator name='DATA_TYPE_INT16' value='3'/>
- <enumerator name='DATA_TYPE_UINT16' value='4'/>
- <enumerator name='DATA_TYPE_INT32' value='5'/>
- <enumerator name='DATA_TYPE_UINT32' value='6'/>
- <enumerator name='DATA_TYPE_INT64' value='7'/>
- <enumerator name='DATA_TYPE_UINT64' value='8'/>
- <enumerator name='DATA_TYPE_STRING' value='9'/>
- <enumerator name='DATA_TYPE_BYTE_ARRAY' value='10'/>
- <enumerator name='DATA_TYPE_INT16_ARRAY' value='11'/>
- <enumerator name='DATA_TYPE_UINT16_ARRAY' value='12'/>
- <enumerator name='DATA_TYPE_INT32_ARRAY' value='13'/>
- <enumerator name='DATA_TYPE_UINT32_ARRAY' value='14'/>
- <enumerator name='DATA_TYPE_INT64_ARRAY' value='15'/>
- <enumerator name='DATA_TYPE_UINT64_ARRAY' value='16'/>
- <enumerator name='DATA_TYPE_STRING_ARRAY' value='17'/>
- <enumerator name='DATA_TYPE_HRTIME' value='18'/>
- <enumerator name='DATA_TYPE_NVLIST' value='19'/>
- <enumerator name='DATA_TYPE_NVLIST_ARRAY' value='20'/>
- <enumerator name='DATA_TYPE_BOOLEAN_VALUE' value='21'/>
- <enumerator name='DATA_TYPE_INT8' value='22'/>
- <enumerator name='DATA_TYPE_UINT8' value='23'/>
- <enumerator name='DATA_TYPE_BOOLEAN_ARRAY' value='24'/>
- <enumerator name='DATA_TYPE_INT8_ARRAY' value='25'/>
- <enumerator name='DATA_TYPE_UINT8_ARRAY' value='26'/>
- <enumerator name='DATA_TYPE_DOUBLE' value='27'/>
+ <enum-decl name='nvlist_prtctl_fmt' id='type-id-218'>
+ <underlying-type type-id='type-id-9'/>
+ <enumerator name='NVLIST_FMT_MEMBER_NAME' value='0'/>
+ <enumerator name='NVLIST_FMT_MEMBER_POSTAMBLE' value='1'/>
+ <enumerator name='NVLIST_FMT_BTWN_ARRAY' value='2'/>
</enum-decl>
- <typedef-decl name='data_type_t' type-id='type-id-216' id='type-id-215'/>
- <pointer-type-def type-id='type-id-214' size-in-bits='64' id='type-id-217'/>
- <pointer-type-def type-id='type-id-64' size-in-bits='64' id='type-id-218'/>
- <function-decl name='nvlist_next_nvpair' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-217'/>
- <return type-id='type-id-217'/>
- </function-decl>
- <function-decl name='nvpair_type' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-217'/>
- <return type-id='type-id-216'/>
- </function-decl>
- <function-decl name='nvpair_name' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-217'/>
- <return type-id='type-id-9'/>
- </function-decl>
- <pointer-type-def type-id='type-id-80' size-in-bits='64' id='type-id-219'/>
- <function-decl name='nvpair_value_byte' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-217'/>
- <parameter type-id='type-id-219'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_prtctl_dofmt' mangled-name='nvlist_prtctl_dofmt' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctl_dofmt'>
+ <parameter type-id='type-id-163' name='pctl'/>
+ <parameter type-id='type-id-218' name='which'/>
+ <parameter is-variadic='yes'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <pointer-type-def type-id='type-id-92' size-in-bits='64' id='type-id-220'/>
- <function-decl name='nvpair_value_int16' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-217'/>
- <parameter type-id='type-id-220'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_prtctl_setfmt' mangled-name='nvlist_prtctl_setfmt' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctl_setfmt'>
+ <parameter type-id='type-id-163' name='pctl'/>
+ <parameter type-id='type-id-218' name='which'/>
+ <parameter type-id='type-id-36' name='fmt'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <pointer-type-def type-id='type-id-13' size-in-bits='64' id='type-id-221'/>
- <function-decl name='nvpair_value_uint16' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-217'/>
- <parameter type-id='type-id-221'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_prtctl_doindent' mangled-name='nvlist_prtctl_doindent' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctl_doindent'>
+ <parameter type-id='type-id-163' name='pctl'/>
+ <parameter type-id='type-id-1' name='onemore'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <pointer-type-def type-id='type-id-5' size-in-bits='64' id='type-id-222'/>
- <function-decl name='nvpair_value_int32' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-217'/>
- <parameter type-id='type-id-222'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_prtctl_setindent' mangled-name='nvlist_prtctl_setindent' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctl_setindent'>
+ <parameter type-id='type-id-163' name='pctl'/>
+ <parameter type-id='type-id-35' name='mode'/>
+ <parameter type-id='type-id-1' name='start'/>
+ <parameter type-id='type-id-1' name='inc'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <pointer-type-def type-id='type-id-69' size-in-bits='64' id='type-id-223'/>
- <function-decl name='nvpair_value_uint32' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-217'/>
- <parameter type-id='type-id-223'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_prtctl_getdest' mangled-name='nvlist_prtctl_getdest' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctl_getdest'>
+ <parameter type-id='type-id-163' name='pctl'/>
+ <return type-id='type-id-34'/>
</function-decl>
- <pointer-type-def type-id='type-id-24' size-in-bits='64' id='type-id-224'/>
- <function-decl name='nvpair_value_int64' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-217'/>
- <parameter type-id='type-id-224'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_prtctl_setdest' mangled-name='nvlist_prtctl_setdest' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctl_setdest'>
+ <parameter type-id='type-id-163' name='pctl'/>
+ <parameter type-id='type-id-34' name='fp'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <pointer-type-def type-id='type-id-29' size-in-bits='64' id='type-id-225'/>
- <function-decl name='nvpair_value_uint64' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-217'/>
- <parameter type-id='type-id-225'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvpair_value_string' mangled-name='nvpair_value_string' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvpair_value_string' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-217'/>
- <parameter type-id='type-id-153'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvpair_value_string_array' mangled-name='nvpair_value_string_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <pointer-type-def type-id='type-id-219' size-in-bits='64' id='type-id-226'/>
- <function-decl name='nvpair_value_byte_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-217'/>
- <parameter type-id='type-id-226'/>
- <parameter type-id='type-id-223'/>
- <return type-id='type-id-5'/>
+ <function-decl name='regexec' mangled-name='regexec' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <pointer-type-def type-id='type-id-220' size-in-bits='64' id='type-id-227'/>
- <function-decl name='nvpair_value_int16_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-217'/>
- <parameter type-id='type-id-227'/>
- <parameter type-id='type-id-223'/>
- <return type-id='type-id-5'/>
+ <function-decl name='strcmp' mangled-name='strcmp' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <pointer-type-def type-id='type-id-221' size-in-bits='64' id='type-id-228'/>
- <function-decl name='nvpair_value_uint16_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-217'/>
- <parameter type-id='type-id-228'/>
- <parameter type-id='type-id-223'/>
- <return type-id='type-id-5'/>
+ <function-decl name='sscanf' mangled-name='sscanf' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <pointer-type-def type-id='type-id-222' size-in-bits='64' id='type-id-229'/>
- <function-decl name='nvpair_value_int32_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-217'/>
- <parameter type-id='type-id-229'/>
- <parameter type-id='type-id-223'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvpair_value_byte' mangled-name='nvpair_value_byte' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <pointer-type-def type-id='type-id-223' size-in-bits='64' id='type-id-230'/>
- <function-decl name='nvpair_value_uint32_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-217'/>
- <parameter type-id='type-id-230'/>
- <parameter type-id='type-id-223'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvpair_value_byte_array' mangled-name='nvpair_value_byte_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <pointer-type-def type-id='type-id-224' size-in-bits='64' id='type-id-231'/>
- <function-decl name='nvpair_value_int64_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-217'/>
- <parameter type-id='type-id-231'/>
- <parameter type-id='type-id-223'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvpair_value_int8' mangled-name='nvpair_value_int8' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <pointer-type-def type-id='type-id-225' size-in-bits='64' id='type-id-232'/>
- <function-decl name='nvpair_value_uint64_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-217'/>
- <parameter type-id='type-id-232'/>
- <parameter type-id='type-id-223'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvpair_value_int8_array' mangled-name='nvpair_value_int8_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <pointer-type-def type-id='type-id-153' size-in-bits='64' id='type-id-233'/>
- <function-decl name='nvpair_value_string_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-217'/>
- <parameter type-id='type-id-233'/>
- <parameter type-id='type-id-223'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvpair_value_uint8' mangled-name='nvpair_value_uint8' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <pointer-type-def type-id='type-id-116' size-in-bits='64' id='type-id-234'/>
- <function-decl name='nvpair_value_hrtime' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-217'/>
- <parameter type-id='type-id-234'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvpair_value_uint8_array' mangled-name='nvpair_value_uint8_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <pointer-type-def type-id='type-id-218' size-in-bits='64' id='type-id-235'/>
- <function-decl name='nvpair_value_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-217'/>
- <parameter type-id='type-id-235'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvpair_value_int16' mangled-name='nvpair_value_int16' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <pointer-type-def type-id='type-id-235' size-in-bits='64' id='type-id-236'/>
- <function-decl name='nvpair_value_nvlist_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-217'/>
- <parameter type-id='type-id-236'/>
- <parameter type-id='type-id-223'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvpair_value_int16_array' mangled-name='nvpair_value_int16_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <pointer-type-def type-id='type-id-76' size-in-bits='64' id='type-id-237'/>
- <function-decl name='nvpair_value_boolean_value' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-217'/>
- <parameter type-id='type-id-237'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvpair_value_uint16' mangled-name='nvpair_value_uint16' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <pointer-type-def type-id='type-id-14' size-in-bits='64' id='type-id-238'/>
- <function-decl name='nvpair_value_int8' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-217'/>
- <parameter type-id='type-id-238'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvpair_value_uint16_array' mangled-name='nvpair_value_uint16_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvpair_value_uint8' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-217'/>
- <parameter type-id='type-id-219'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvpair_value_int32' mangled-name='nvpair_value_int32' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <pointer-type-def type-id='type-id-237' size-in-bits='64' id='type-id-239'/>
- <function-decl name='nvpair_value_boolean_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-217'/>
- <parameter type-id='type-id-239'/>
- <parameter type-id='type-id-223'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvpair_value_int32_array' mangled-name='nvpair_value_int32_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <pointer-type-def type-id='type-id-238' size-in-bits='64' id='type-id-240'/>
- <function-decl name='nvpair_value_int8_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-217'/>
- <parameter type-id='type-id-240'/>
- <parameter type-id='type-id-223'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvpair_value_uint32' mangled-name='nvpair_value_uint32' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvpair_value_uint8_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-217'/>
- <parameter type-id='type-id-226'/>
- <parameter type-id='type-id-223'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvpair_value_uint32_array' mangled-name='nvpair_value_uint32_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <pointer-type-def type-id='type-id-111' size-in-bits='64' id='type-id-241'/>
- <function-decl name='nvpair_value_double' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-217'/>
- <parameter type-id='type-id-241'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvpair_value_int64' mangled-name='nvpair_value_int64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvlist_prt' mangled-name='nvlist_prt' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prt'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-158' name='pctl'/>
- <return type-id='type-id-1'/>
+ <function-decl name='nvpair_value_int64_array' mangled-name='nvpair_value_int64_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='dump_nvlist' mangled-name='dump_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='dump_nvlist'>
- <parameter type-id='type-id-73' name='list'/>
- <parameter type-id='type-id-5' name='indent'/>
- <return type-id='type-id-1'/>
+ <function-decl name='nvpair_value_uint64' mangled-name='nvpair_value_uint64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='dcgettext' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-5'/>
- <return type-id='type-id-9'/>
+ <function-decl name='nvpair_value_uint64_array' mangled-name='nvpair_value_uint64_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <typedef-decl name='nvpair_t' type-id='type-id-214' id='type-id-242'/>
- <pointer-type-def type-id='type-id-242' size-in-bits='64' id='type-id-243'/>
- <class-decl name='re_pattern_buffer' size-in-bits='512' is-struct='yes' visibility='default' id='type-id-244'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='buffer' type-id='type-id-245' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='allocated' type-id='type-id-246' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='used' type-id='type-id-246' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='syntax' type-id='type-id-247' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='fastmap' type-id='type-id-9' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='translate' type-id='type-id-219' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='re_nsub' type-id='type-id-20' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='31'>
- <var-decl name='can_be_null' type-id='type-id-69' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='29'>
- <var-decl name='regs_allocated' type-id='type-id-69' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='28'>
- <var-decl name='fastmap_accurate' type-id='type-id-69' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='27'>
- <var-decl name='no_sub' type-id='type-id-69' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='26'>
- <var-decl name='not_bol' type-id='type-id-69' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='25'>
- <var-decl name='not_eol' type-id='type-id-69' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='24'>
- <var-decl name='newline_anchor' type-id='type-id-69' visibility='default'/>
- </data-member>
- </class-decl>
- <class-decl name='re_dfa_t' is-struct='yes' visibility='default' is-declaration-only='yes' id='type-id-248'/>
- <pointer-type-def type-id='type-id-248' size-in-bits='64' id='type-id-245'/>
- <typedef-decl name='__re_long_size_t' type-id='type-id-29' id='type-id-246'/>
- <typedef-decl name='reg_syntax_t' type-id='type-id-29' id='type-id-247'/>
- <typedef-decl name='regex_t' type-id='type-id-244' id='type-id-249'/>
- <pointer-type-def type-id='type-id-249' size-in-bits='64' id='type-id-250'/>
- <function-decl name='nvpair_value_match_regex' mangled-name='nvpair_value_match_regex' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_match_regex'>
- <parameter type-id='type-id-243' name='nvp'/>
- <parameter type-id='type-id-5' name='ai'/>
- <parameter type-id='type-id-9' name='value'/>
- <parameter type-id='type-id-250' name='value_regex'/>
- <parameter type-id='type-id-153' name='ep'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvpair_value_boolean_value' mangled-name='nvpair_value_boolean_value' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvpair_type_is_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-217'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvpair_value_boolean_array' mangled-name='nvpair_value_boolean_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <qualified-type-def type-id='type-id-244' const='yes' id='type-id-251'/>
- <pointer-type-def type-id='type-id-251' size-in-bits='64' id='type-id-252'/>
- <class-decl name='__anonymous_struct__' size-in-bits='64' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-253'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='rm_so' type-id='type-id-254' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='rm_eo' type-id='type-id-254' visibility='default'/>
- </data-member>
- </class-decl>
- <typedef-decl name='regoff_t' type-id='type-id-5' id='type-id-254'/>
- <pointer-type-def type-id='type-id-253' size-in-bits='64' id='type-id-255'/>
- <function-decl name='regexec' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-252'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-29'/>
- <parameter type-id='type-id-255'/>
- <parameter type-id='type-id-5'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvpair_type_is_array' mangled-name='nvpair_type_is_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvpair_value_match' mangled-name='nvpair_value_match' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_match'>
- <parameter type-id='type-id-243' name='nvp'/>
- <parameter type-id='type-id-5' name='ai'/>
- <parameter type-id='type-id-9' name='value'/>
- <parameter type-id='type-id-153' name='ep'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvpair_type' mangled-name='nvpair_type' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-type size-in-bits='64' id='type-id-124'>
- <parameter type-id='type-id-63'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-122'/>
- <parameter type-id='type-id-123'/>
- <return type-id='type-id-5'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-114'>
- <parameter type-id='type-id-63'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-9'/>
- <return type-id='type-id-5'/>
+ <function-decl name='strspn' mangled-name='strspn' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
+ </function-decl>
+ <function-decl name='strcspn' mangled-name='strcspn' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
+ </function-decl>
+ <function-decl name='__stack_chk_fail' mangled-name='__stack_chk_fail' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
+ </function-decl>
+ <function-decl name='__printf_chk' mangled-name='__printf_chk' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
+ </function-decl>
+ <function-decl name='nvpair_name' mangled-name='nvpair_name' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
+ </function-decl>
+ <function-decl name='nvlist_next_nvpair' mangled-name='nvlist_next_nvpair' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
+ </function-decl>
+ <function-decl name='nvpair_value_nvlist' mangled-name='nvpair_value_nvlist' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
+ </function-decl>
+ <function-decl name='nvpair_value_nvlist_array' mangled-name='nvpair_value_nvlist_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
+ </function-decl>
+ <function-decl name='dcgettext' mangled-name='dcgettext' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
+ </function-decl>
+ <function-decl name='nvpair_value_double' mangled-name='nvpair_value_double' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
+ </function-decl>
+ <function-decl name='nvpair_value_hrtime' mangled-name='nvpair_value_hrtime' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
+ </function-decl>
+ <function-decl name='__builtin_fputs' mangled-name='fputs' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
+ </function-decl>
+ <function-decl name='__fprintf_chk' mangled-name='__fprintf_chk' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
+ </function-decl>
+ <function-decl name='free' mangled-name='free' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
+ </function-decl>
+ <function-decl name='malloc' mangled-name='malloc' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
+ </function-decl>
+ <function-decl name='calloc' mangled-name='calloc' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
+ </function-decl>
+ <function-decl name='__builtin_strchr' mangled-name='strchr' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
+ </function-decl>
+ <function-decl name='__builtin_fputc' mangled-name='fputc' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
+ </function-decl>
+ <function-type size-in-bits='64' id='type-id-130'>
+ <parameter type-id='type-id-83'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-128'/>
+ <parameter type-id='type-id-129'/>
+ <return type-id='type-id-1'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-154'>
- <parameter type-id='type-id-63'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-153'/>
- <parameter type-id='type-id-123'/>
- <return type-id='type-id-5'/>
+ <function-type size-in-bits='64' id='type-id-120'>
+ <parameter type-id='type-id-83'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-14'/>
+ <return type-id='type-id-1'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-112'>
- <parameter type-id='type-id-63'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-111'/>
- <return type-id='type-id-5'/>
+ <function-type size-in-bits='64' id='type-id-159'>
+ <parameter type-id='type-id-83'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-15'/>
+ <parameter type-id='type-id-129'/>
+ <return type-id='type-id-1'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-74'>
- <parameter type-id='type-id-63'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-5'/>
- <return type-id='type-id-5'/>
+ <function-type size-in-bits='64' id='type-id-118'>
+ <parameter type-id='type-id-83'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-117'/>
+ <return type-id='type-id-1'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-136'>
- <parameter type-id='type-id-63'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-135'/>
- <parameter type-id='type-id-123'/>
- <return type-id='type-id-5'/>
+ <function-type size-in-bits='64' id='type-id-84'>
+ <parameter type-id='type-id-83'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-1'/>
+ <return type-id='type-id-1'/>
</function-type>
<function-type size-in-bits='64' id='type-id-142'>
- <parameter type-id='type-id-63'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
+ <parameter type-id='type-id-83'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
<parameter type-id='type-id-141'/>
- <parameter type-id='type-id-123'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='type-id-129'/>
+ <return type-id='type-id-1'/>
</function-type>
<function-type size-in-bits='64' id='type-id-148'>
- <parameter type-id='type-id-63'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
+ <parameter type-id='type-id-83'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
<parameter type-id='type-id-147'/>
- <parameter type-id='type-id-123'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='type-id-129'/>
+ <return type-id='type-id-1'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-130'>
- <parameter type-id='type-id-63'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
+ <function-type size-in-bits='64' id='type-id-154'>
+ <parameter type-id='type-id-83'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-153'/>
<parameter type-id='type-id-129'/>
- <parameter type-id='type-id-123'/>
- <return type-id='type-id-5'/>
+ <return type-id='type-id-1'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-120'>
- <parameter type-id='type-id-63'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-73'/>
- <return type-id='type-id-5'/>
+ <function-type size-in-bits='64' id='type-id-136'>
+ <parameter type-id='type-id-83'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-135'/>
+ <parameter type-id='type-id-129'/>
+ <return type-id='type-id-1'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-157'>
- <parameter type-id='type-id-63'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-156'/>
- <parameter type-id='type-id-123'/>
- <return type-id='type-id-5'/>
+ <function-type size-in-bits='64' id='type-id-126'>
+ <parameter type-id='type-id-83'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-32'/>
+ <return type-id='type-id-1'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-78'>
- <parameter type-id='type-id-63'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-77'/>
- <return type-id='type-id-5'/>
+ <function-type size-in-bits='64' id='type-id-162'>
+ <parameter type-id='type-id-83'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-161'/>
+ <parameter type-id='type-id-129'/>
+ <return type-id='type-id-1'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-118'>
- <parameter type-id='type-id-63'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-117'/>
- <return type-id='type-id-5'/>
+ <function-type size-in-bits='64' id='type-id-88'>
+ <parameter type-id='type-id-83'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-87'/>
+ <return type-id='type-id-1'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-95'>
- <parameter type-id='type-id-63'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-94'/>
- <return type-id='type-id-5'/>
+ <function-type size-in-bits='64' id='type-id-124'>
+ <parameter type-id='type-id-83'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-123'/>
+ <return type-id='type-id-1'/>
</function-type>
<function-type size-in-bits='64' id='type-id-101'>
- <parameter type-id='type-id-63'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-65'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='type-id-83'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-4'/>
+ <return type-id='type-id-1'/>
</function-type>
<function-type size-in-bits='64' id='type-id-107'>
- <parameter type-id='type-id-63'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-106'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='type-id-83'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-3'/>
+ <return type-id='type-id-1'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-86'>
- <parameter type-id='type-id-63'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-85'/>
- <return type-id='type-id-5'/>
+ <function-type size-in-bits='64' id='type-id-113'>
+ <parameter type-id='type-id-83'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-112'/>
+ <return type-id='type-id-1'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-82'>
- <parameter type-id='type-id-63'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-81'/>
- <return type-id='type-id-5'/>
+ <function-type size-in-bits='64' id='type-id-95'>
+ <parameter type-id='type-id-83'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-94'/>
+ <return type-id='type-id-1'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-99'>
- <parameter type-id='type-id-63'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-98'/>
- <return type-id='type-id-5'/>
+ <function-type size-in-bits='64' id='type-id-91'>
+ <parameter type-id='type-id-83'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-90'/>
+ <return type-id='type-id-1'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-103'>
- <parameter type-id='type-id-63'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-66'/>
- <return type-id='type-id-5'/>
+ <function-type size-in-bits='64' id='type-id-105'>
+ <parameter type-id='type-id-83'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-104'/>
+ <return type-id='type-id-1'/>
</function-type>
<function-type size-in-bits='64' id='type-id-109'>
- <parameter type-id='type-id-63'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-67'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='type-id-83'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-27'/>
+ <return type-id='type-id-1'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-90'>
- <parameter type-id='type-id-63'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-89'/>
- <return type-id='type-id-5'/>
+ <function-type size-in-bits='64' id='type-id-115'>
+ <parameter type-id='type-id-83'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-28'/>
+ <return type-id='type-id-1'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-127'>
- <parameter type-id='type-id-63'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-126'/>
- <parameter type-id='type-id-123'/>
- <return type-id='type-id-5'/>
+ <function-type size-in-bits='64' id='type-id-99'>
+ <parameter type-id='type-id-83'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-98'/>
+ <return type-id='type-id-1'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-139'>
- <parameter type-id='type-id-63'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-138'/>
- <parameter type-id='type-id-123'/>
- <return type-id='type-id-5'/>
+ <function-type size-in-bits='64' id='type-id-133'>
+ <parameter type-id='type-id-83'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-132'/>
+ <parameter type-id='type-id-129'/>
+ <return type-id='type-id-1'/>
</function-type>
<function-type size-in-bits='64' id='type-id-145'>
- <parameter type-id='type-id-63'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
+ <parameter type-id='type-id-83'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
<parameter type-id='type-id-144'/>
- <parameter type-id='type-id-123'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='type-id-129'/>
+ <return type-id='type-id-1'/>
</function-type>
<function-type size-in-bits='64' id='type-id-151'>
- <parameter type-id='type-id-63'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
+ <parameter type-id='type-id-83'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
<parameter type-id='type-id-150'/>
- <parameter type-id='type-id-123'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='type-id-129'/>
+ <return type-id='type-id-1'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-133'>
- <parameter type-id='type-id-63'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-132'/>
- <parameter type-id='type-id-123'/>
- <return type-id='type-id-5'/>
+ <function-type size-in-bits='64' id='type-id-157'>
+ <parameter type-id='type-id-83'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-156'/>
+ <parameter type-id='type-id-129'/>
+ <return type-id='type-id-1'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-190'>
- <parameter type-id='type-id-158'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-122'/>
- <parameter type-id='type-id-123'/>
- <return type-id='type-id-5'/>
+ <function-type size-in-bits='64' id='type-id-139'>
+ <parameter type-id='type-id-83'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-138'/>
+ <parameter type-id='type-id-129'/>
+ <return type-id='type-id-1'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-184'>
- <parameter type-id='type-id-158'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-9'/>
- <return type-id='type-id-5'/>
+ <function-type size-in-bits='64' id='type-id-186'>
+ <parameter type-id='type-id-163'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-128'/>
+ <parameter type-id='type-id-129'/>
+ <return type-id='type-id-1'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-210'>
- <parameter type-id='type-id-158'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-153'/>
- <parameter type-id='type-id-123'/>
- <return type-id='type-id-5'/>
+ <function-type size-in-bits='64' id='type-id-192'>
+ <parameter type-id='type-id-163'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-14'/>
+ <return type-id='type-id-1'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-182'>
- <parameter type-id='type-id-158'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-111'/>
- <return type-id='type-id-5'/>
+ <function-type size-in-bits='64' id='type-id-166'>
+ <parameter type-id='type-id-163'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-15'/>
+ <parameter type-id='type-id-129'/>
+ <return type-id='type-id-1'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-160'>
- <parameter type-id='type-id-158'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-5'/>
- <return type-id='type-id-5'/>
+ <function-type size-in-bits='64' id='type-id-194'>
+ <parameter type-id='type-id-163'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-117'/>
+ <return type-id='type-id-1'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-198'>
- <parameter type-id='type-id-158'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-135'/>
- <parameter type-id='type-id-123'/>
- <return type-id='type-id-5'/>
+ <function-type size-in-bits='64' id='type-id-216'>
+ <parameter type-id='type-id-163'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-1'/>
+ <return type-id='type-id-1'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-202'>
- <parameter type-id='type-id-158'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
+ <function-type size-in-bits='64' id='type-id-178'>
+ <parameter type-id='type-id-163'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
<parameter type-id='type-id-141'/>
- <parameter type-id='type-id-123'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='type-id-129'/>
+ <return type-id='type-id-1'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-206'>
- <parameter type-id='type-id-158'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
+ <function-type size-in-bits='64' id='type-id-174'>
+ <parameter type-id='type-id-163'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
<parameter type-id='type-id-147'/>
- <parameter type-id='type-id-123'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='type-id-129'/>
+ <return type-id='type-id-1'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-194'>
- <parameter type-id='type-id-158'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
+ <function-type size-in-bits='64' id='type-id-170'>
+ <parameter type-id='type-id-163'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-153'/>
<parameter type-id='type-id-129'/>
- <parameter type-id='type-id-123'/>
- <return type-id='type-id-5'/>
+ <return type-id='type-id-1'/>
+ </function-type>
+ <function-type size-in-bits='64' id='type-id-182'>
+ <parameter type-id='type-id-163'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-135'/>
+ <parameter type-id='type-id-129'/>
+ <return type-id='type-id-1'/>
</function-type>
<function-type size-in-bits='64' id='type-id-188'>
- <parameter type-id='type-id-158'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-73'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='type-id-163'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-32'/>
+ <return type-id='type-id-1'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-212'>
- <parameter type-id='type-id-158'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-156'/>
+ <function-type size-in-bits='64' id='type-id-164'>
+ <parameter type-id='type-id-163'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-161'/>
+ <parameter type-id='type-id-129'/>
+ <return type-id='type-id-1'/>
+ </function-type>
+ <function-type size-in-bits='64' id='type-id-214'>
+ <parameter type-id='type-id-163'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-87'/>
+ <return type-id='type-id-1'/>
+ </function-type>
+ <function-type size-in-bits='64' id='type-id-190'>
+ <parameter type-id='type-id-163'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
<parameter type-id='type-id-123'/>
- <return type-id='type-id-5'/>
+ <return type-id='type-id-1'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-162'>
- <parameter type-id='type-id-158'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-77'/>
- <return type-id='type-id-5'/>
+ <function-type size-in-bits='64' id='type-id-206'>
+ <parameter type-id='type-id-163'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-4'/>
+ <return type-id='type-id-1'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-186'>
- <parameter type-id='type-id-158'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-117'/>
- <return type-id='type-id-5'/>
+ <function-type size-in-bits='64' id='type-id-202'>
+ <parameter type-id='type-id-163'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-3'/>
+ <return type-id='type-id-1'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-170'>
- <parameter type-id='type-id-158'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
+ <function-type size-in-bits='64' id='type-id-198'>
+ <parameter type-id='type-id-163'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-112'/>
+ <return type-id='type-id-1'/>
+ </function-type>
+ <function-type size-in-bits='64' id='type-id-210'>
+ <parameter type-id='type-id-163'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
<parameter type-id='type-id-94'/>
- <return type-id='type-id-5'/>
+ <return type-id='type-id-1'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-174'>
- <parameter type-id='type-id-158'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-65'/>
- <return type-id='type-id-5'/>
+ <function-type size-in-bits='64' id='type-id-212'>
+ <parameter type-id='type-id-163'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-90'/>
+ <return type-id='type-id-1'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-178'>
- <parameter type-id='type-id-158'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-106'/>
- <return type-id='type-id-5'/>
+ <function-type size-in-bits='64' id='type-id-204'>
+ <parameter type-id='type-id-163'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-104'/>
+ <return type-id='type-id-1'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-166'>
- <parameter type-id='type-id-158'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-85'/>
- <return type-id='type-id-5'/>
+ <function-type size-in-bits='64' id='type-id-200'>
+ <parameter type-id='type-id-163'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-27'/>
+ <return type-id='type-id-1'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-164'>
- <parameter type-id='type-id-158'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-81'/>
- <return type-id='type-id-5'/>
+ <function-type size-in-bits='64' id='type-id-196'>
+ <parameter type-id='type-id-163'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-28'/>
+ <return type-id='type-id-1'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-172'>
- <parameter type-id='type-id-158'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
+ <function-type size-in-bits='64' id='type-id-208'>
+ <parameter type-id='type-id-163'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
<parameter type-id='type-id-98'/>
- <return type-id='type-id-5'/>
+ <return type-id='type-id-1'/>
+ </function-type>
+ <function-type size-in-bits='64' id='type-id-184'>
+ <parameter type-id='type-id-163'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-132'/>
+ <parameter type-id='type-id-129'/>
+ <return type-id='type-id-1'/>
</function-type>
<function-type size-in-bits='64' id='type-id-176'>
- <parameter type-id='type-id-158'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-66'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='type-id-163'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-144'/>
+ <parameter type-id='type-id-129'/>
+ <return type-id='type-id-1'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-180'>
- <parameter type-id='type-id-158'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-67'/>
- <return type-id='type-id-5'/>
+ <function-type size-in-bits='64' id='type-id-172'>
+ <parameter type-id='type-id-163'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-150'/>
+ <parameter type-id='type-id-129'/>
+ <return type-id='type-id-1'/>
</function-type>
<function-type size-in-bits='64' id='type-id-168'>
- <parameter type-id='type-id-158'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-89'/>
- <return type-id='type-id-5'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-192'>
- <parameter type-id='type-id-158'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-126'/>
- <parameter type-id='type-id-123'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='type-id-163'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-156'/>
+ <parameter type-id='type-id-129'/>
+ <return type-id='type-id-1'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-200'>
- <parameter type-id='type-id-158'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
+ <function-type size-in-bits='64' id='type-id-180'>
+ <parameter type-id='type-id-163'/>
+ <parameter type-id='type-id-46'/>
+ <parameter type-id='type-id-32'/>
+ <parameter type-id='type-id-36'/>
<parameter type-id='type-id-138'/>
- <parameter type-id='type-id-123'/>
- <return type-id='type-id-5'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-204'>
- <parameter type-id='type-id-158'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-144'/>
- <parameter type-id='type-id-123'/>
- <return type-id='type-id-5'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-208'>
- <parameter type-id='type-id-158'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-150'/>
- <parameter type-id='type-id-123'/>
- <return type-id='type-id-5'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-196'>
- <parameter type-id='type-id-158'/>
- <parameter type-id='type-id-19'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-132'/>
- <parameter type-id='type-id-123'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='type-id-129'/>
+ <return type-id='type-id-1'/>
</function-type>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='libnvpair_json.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libnvpair' language='LANG_C99'>
+ <abi-instr version='1.0' address-size='64' path='libnvpair_json.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libnvpair' language='LANG_C99'>
<function-decl name='nvlist_print_json' mangled-name='nvlist_print_json' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_print_json'>
- <parameter type-id='type-id-3' name='fp'/>
- <parameter type-id='type-id-73' name='nvl'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='type-id-34' name='fp'/>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='fnvpair_value_byte' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-217'/>
- <return type-id='type-id-80'/>
+ <function-decl name='fnvpair_value_string' mangled-name='fnvpair_value_string' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvpair_value_int16' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-217'/>
- <return type-id='type-id-92'/>
+ <function-decl name='libspl_assertf' mangled-name='libspl_assertf' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvpair_value_uint16' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-217'/>
- <return type-id='type-id-13'/>
+ <function-decl name='fnvpair_value_byte' mangled-name='fnvpair_value_byte' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvpair_value_int32' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-217'/>
- <return type-id='type-id-5'/>
+ <function-decl name='fnvpair_value_int16' mangled-name='fnvpair_value_int16' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvpair_value_uint32' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-217'/>
- <return type-id='type-id-69'/>
+ <function-decl name='fnvpair_value_uint16' mangled-name='fnvpair_value_uint16' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvpair_value_int64' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-217'/>
- <return type-id='type-id-24'/>
+ <function-decl name='fnvpair_value_int32' mangled-name='fnvpair_value_int32' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvpair_value_uint64' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-217'/>
- <return type-id='type-id-29'/>
+ <function-decl name='fnvpair_value_uint32' mangled-name='fnvpair_value_uint32' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvpair_value_string' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-217'/>
- <return type-id='type-id-9'/>
+ <function-decl name='fnvpair_value_int64' mangled-name='fnvpair_value_int64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='libspl_assertf' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-5'/>
- <parameter type-id='type-id-6'/>
- <parameter is-variadic='yes'/>
- <return type-id='type-id-1'/>
+ <function-decl name='fnvpair_value_uint64' mangled-name='fnvpair_value_uint64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvpair_value_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-217'/>
- <return type-id='type-id-218'/>
+ <function-decl name='fnvpair_value_nvlist' mangled-name='fnvpair_value_nvlist' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvpair_value_boolean_value' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-217'/>
- <return type-id='type-id-76'/>
+ <function-decl name='fnvpair_value_boolean_value' mangled-name='fnvpair_value_boolean_value' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvpair_value_int8' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-217'/>
- <return type-id='type-id-14'/>
+ <function-decl name='fnvpair_value_int8' mangled-name='fnvpair_value_int8' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvpair_value_uint8' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-217'/>
- <return type-id='type-id-80'/>
+ <function-decl name='fnvpair_value_uint8' mangled-name='fnvpair_value_uint8' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <class-decl name='__anonymous_struct__' size-in-bits='64' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-256'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='__count' type-id='type-id-5' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='__value' type-id='type-id-257' visibility='default'/>
- </data-member>
- </class-decl>
- <union-decl name='__anonymous_union__' size-in-bits='32' is-anonymous='yes' visibility='default' id='type-id-257'>
- <data-member access='private'>
- <var-decl name='__wch' type-id='type-id-69' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='__wchb' type-id='type-id-258' visibility='default'/>
- </data-member>
- </union-decl>
-
- <array-type-def dimensions='1' type-id='type-id-22' size-in-bits='32' id='type-id-258'>
- <subrange length='4' type-id='type-id-25' id='type-id-259'/>
-
- </array-type-def>
- <pointer-type-def type-id='type-id-256' size-in-bits='64' id='type-id-260'/>
- <function-decl name='mbrtowc' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-222'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-29'/>
- <parameter type-id='type-id-260'/>
- <return type-id='type-id-29'/>
+ <function-decl name='__ctype_get_mb_cur_max' mangled-name='__ctype_get_mb_cur_max' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
+ </function-decl>
+ <function-decl name='mbrtowc' mangled-name='mbrtowc' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='nvpair_alloc_system.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libnvpair' language='LANG_C99'>
- <class-decl name='nv_alloc' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-261'>
+ <abi-instr version='1.0' address-size='64' path='nvpair_alloc_system.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libnvpair' language='LANG_C99'>
+ <class-decl name='nv_alloc' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-219'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='nva_ops' type-id='type-id-262' visibility='default'/>
+ <var-decl name='nva_ops' type-id='type-id-220' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='nva_arg' type-id='type-id-19' visibility='default'/>
+ <var-decl name='nva_arg' type-id='type-id-46' visibility='default'/>
</data-member>
</class-decl>
- <class-decl name='nv_alloc_ops' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-263'>
+ <class-decl name='nv_alloc_ops' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-221'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='nv_ao_init' type-id='type-id-264' visibility='default'/>
+ <var-decl name='nv_ao_init' type-id='type-id-222' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='nv_ao_fini' type-id='type-id-265' visibility='default'/>
+ <var-decl name='nv_ao_fini' type-id='type-id-223' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='nv_ao_alloc' type-id='type-id-266' visibility='default'/>
+ <var-decl name='nv_ao_alloc' type-id='type-id-224' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='nv_ao_free' type-id='type-id-267' visibility='default'/>
+ <var-decl name='nv_ao_free' type-id='type-id-225' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='nv_ao_reset' type-id='type-id-265' visibility='default'/>
+ <var-decl name='nv_ao_reset' type-id='type-id-223' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='nv_alloc_t' type-id='type-id-261' id='type-id-268'/>
- <pointer-type-def type-id='type-id-268' size-in-bits='64' id='type-id-269'/>
- <class-decl name='__va_list_tag' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-270'>
+ <typedef-decl name='nv_alloc_t' type-id='type-id-219' id='type-id-226'/>
+ <pointer-type-def type-id='type-id-226' size-in-bits='64' id='type-id-227'/>
+ <class-decl name='__va_list_tag' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-228'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='gp_offset' type-id='type-id-69' visibility='default'/>
+ <var-decl name='gp_offset' type-id='type-id-21' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='fp_offset' type-id='type-id-69' visibility='default'/>
+ <var-decl name='fp_offset' type-id='type-id-21' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='overflow_arg_area' type-id='type-id-19' visibility='default'/>
+ <var-decl name='overflow_arg_area' type-id='type-id-46' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='reg_save_area' type-id='type-id-19' visibility='default'/>
+ <var-decl name='reg_save_area' type-id='type-id-46' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-270' size-in-bits='64' id='type-id-271'/>
- <pointer-type-def type-id='type-id-272' size-in-bits='64' id='type-id-264'/>
- <pointer-type-def type-id='type-id-273' size-in-bits='64' id='type-id-265'/>
- <pointer-type-def type-id='type-id-274' size-in-bits='64' id='type-id-266'/>
- <pointer-type-def type-id='type-id-275' size-in-bits='64' id='type-id-267'/>
- <typedef-decl name='nv_alloc_ops_t' type-id='type-id-263' id='type-id-276'/>
- <qualified-type-def type-id='type-id-276' const='yes' id='type-id-277'/>
- <pointer-type-def type-id='type-id-277' size-in-bits='64' id='type-id-262'/>
- <var-decl name='nv_alloc_nosleep' type-id='type-id-269' mangled-name='nv_alloc_nosleep' visibility='default' elf-symbol-id='nv_alloc_nosleep'/>
- <function-type size-in-bits='64' id='type-id-272'>
- <parameter type-id='type-id-269'/>
- <parameter type-id='type-id-271'/>
- <return type-id='type-id-5'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-273'>
- <parameter type-id='type-id-269'/>
+ <pointer-type-def type-id='type-id-228' size-in-bits='64' id='type-id-229'/>
+ <pointer-type-def type-id='type-id-230' size-in-bits='64' id='type-id-222'/>
+ <pointer-type-def type-id='type-id-231' size-in-bits='64' id='type-id-223'/>
+ <pointer-type-def type-id='type-id-232' size-in-bits='64' id='type-id-224'/>
+ <pointer-type-def type-id='type-id-233' size-in-bits='64' id='type-id-225'/>
+ <typedef-decl name='nv_alloc_ops_t' type-id='type-id-221' id='type-id-234'/>
+ <qualified-type-def type-id='type-id-234' const='yes' id='type-id-235'/>
+ <pointer-type-def type-id='type-id-235' size-in-bits='64' id='type-id-220'/>
+ <var-decl name='nv_alloc_nosleep' type-id='type-id-227' mangled-name='nv_alloc_nosleep' visibility='default' elf-symbol-id='nv_alloc_nosleep'/>
+ <function-type size-in-bits='64' id='type-id-230'>
+ <parameter type-id='type-id-227'/>
+ <parameter type-id='type-id-229'/>
<return type-id='type-id-1'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-275'>
- <parameter type-id='type-id-269'/>
- <parameter type-id='type-id-19'/>
+ <function-type size-in-bits='64' id='type-id-231'>
+ <parameter type-id='type-id-227'/>
+ <return type-id='type-id-25'/>
+ </function-type>
+ <function-type size-in-bits='64' id='type-id-233'>
+ <parameter type-id='type-id-227'/>
+ <parameter type-id='type-id-46'/>
<parameter type-id='type-id-20'/>
- <return type-id='type-id-1'/>
+ <return type-id='type-id-25'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-274'>
- <parameter type-id='type-id-269'/>
+ <function-type size-in-bits='64' id='type-id-232'>
+ <parameter type-id='type-id-227'/>
<parameter type-id='type-id-20'/>
- <return type-id='type-id-19'/>
+ <return type-id='type-id-46'/>
</function-type>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='../../module/nvpair/nvpair_alloc_fixed.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libnvpair' language='LANG_C99'>
- <var-decl name='nv_fixed_ops' type-id='type-id-262' mangled-name='nv_fixed_ops' visibility='default' elf-symbol-id='nv_fixed_ops'/>
+ <abi-instr version='1.0' address-size='64' path='../../module/nvpair/nvpair_alloc_fixed.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libnvpair' language='LANG_C99'>
+ <var-decl name='nv_fixed_ops' type-id='type-id-220' mangled-name='nv_fixed_ops' visibility='default' elf-symbol-id='nv_fixed_ops'/>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='../../module/nvpair/nvpair.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libnvpair' language='LANG_C99'>
- <function-decl name='nv_alloc_init' mangled-name='nv_alloc_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nv_alloc_init'>
- <parameter type-id='type-id-269' name='nva'/>
- <parameter type-id='type-id-262' name='nvo'/>
- <parameter is-variadic='yes'/>
- <return type-id='type-id-5'/>
+ <abi-instr version='1.0' address-size='64' path='../../module/nvpair/nvpair.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libnvpair' language='LANG_C99'>
+ <function-decl name='nvlist_xunpack' mangled-name='nvlist_xunpack' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_xunpack'>
+ <parameter type-id='type-id-14' name='buf'/>
+ <parameter type-id='type-id-20' name='buflen'/>
+ <parameter type-id='type-id-161' name='nvlp'/>
+ <parameter type-id='type-id-227' name='nva'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nv_alloc_reset' mangled-name='nv_alloc_reset' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nv_alloc_reset'>
- <parameter type-id='type-id-269' name='nva'/>
+ <function-decl name='nvlist_unpack' mangled-name='nvlist_unpack' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_unpack'>
+ <parameter type-id='type-id-14' name='buf'/>
+ <parameter type-id='type-id-20' name='buflen'/>
+ <parameter type-id='type-id-161' name='nvlp'/>
+ <parameter type-id='type-id-1' name='kmflag'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nv_alloc_fini' mangled-name='nv_alloc_fini' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nv_alloc_fini'>
- <parameter type-id='type-id-269' name='nva'/>
+ <pointer-type-def type-id='type-id-20' size-in-bits='64' id='type-id-236'/>
+ <function-decl name='nvlist_pack' mangled-name='nvlist_pack' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_pack'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-15' name='bufp'/>
+ <parameter type-id='type-id-236' name='buflen'/>
+ <parameter type-id='type-id-1' name='encoding'/>
+ <parameter type-id='type-id-1' name='kmflag'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_lookup_nv_alloc' mangled-name='nvlist_lookup_nv_alloc' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_nv_alloc'>
- <parameter type-id='type-id-73' name='nvl'/>
- <return type-id='type-id-269'/>
+ <function-decl name='nvlist_merge' mangled-name='nvlist_merge' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_merge'>
+ <parameter type-id='type-id-32' name='dst'/>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-1' name='flag'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_nvflag' mangled-name='nvlist_nvflag' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_nvflag'>
- <parameter type-id='type-id-73' name='nvl'/>
- <return type-id='type-id-123'/>
+ <function-decl name='nvlist_add_nvpair' mangled-name='nvlist_add_nvpair' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_nvpair'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_alloc' mangled-name='nvlist_alloc' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_alloc'>
- <parameter type-id='type-id-156' name='nvlp'/>
- <parameter type-id='type-id-123' name='nvflag'/>
- <parameter type-id='type-id-5' name='kmflag'/>
- <return type-id='type-id-5'/>
+ <pointer-type-def type-id='type-id-123' size-in-bits='64' id='type-id-237'/>
+ <function-decl name='nvpair_value_hrtime' mangled-name='nvpair_value_hrtime' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_hrtime'>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <parameter type-id='type-id-237' name='val'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_xalloc' mangled-name='nvlist_xalloc' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_xalloc'>
- <parameter type-id='type-id-156' name='nvlp'/>
- <parameter type-id='type-id-123' name='nvflag'/>
- <parameter type-id='type-id-269' name='nva'/>
- <return type-id='type-id-5'/>
+ <pointer-type-def type-id='type-id-161' size-in-bits='64' id='type-id-238'/>
+ <pointer-type-def type-id='type-id-129' size-in-bits='64' id='type-id-239'/>
+ <function-decl name='nvpair_value_nvlist_array' mangled-name='nvpair_value_nvlist_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_nvlist_array'>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <parameter type-id='type-id-238' name='val'/>
+ <parameter type-id='type-id-239' name='nelem'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_free' mangled-name='nvlist_free' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_free'>
- <parameter type-id='type-id-73' name='nvl'/>
+ <pointer-type-def type-id='type-id-15' size-in-bits='64' id='type-id-240'/>
+ <function-decl name='nvpair_value_string_array' mangled-name='nvpair_value_string_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_string_array'>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <parameter type-id='type-id-240' name='val'/>
+ <parameter type-id='type-id-239' name='nelem'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_dup' mangled-name='nvlist_dup' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_dup'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-156' name='nvlp'/>
- <parameter type-id='type-id-5' name='kmflag'/>
- <return type-id='type-id-5'/>
+ <pointer-type-def type-id='type-id-156' size-in-bits='64' id='type-id-241'/>
+ <function-decl name='nvpair_value_uint64_array' mangled-name='nvpair_value_uint64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_uint64_array'>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <parameter type-id='type-id-241' name='val'/>
+ <parameter type-id='type-id-239' name='nelem'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_xdup' mangled-name='nvlist_xdup' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_xdup'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-156' name='nvlp'/>
- <parameter type-id='type-id-269' name='nva'/>
- <return type-id='type-id-5'/>
+ <pointer-type-def type-id='type-id-153' size-in-bits='64' id='type-id-242'/>
+ <function-decl name='nvpair_value_int64_array' mangled-name='nvpair_value_int64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_int64_array'>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <parameter type-id='type-id-242' name='val'/>
+ <parameter type-id='type-id-239' name='nelem'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_remove_all' mangled-name='nvlist_remove_all' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_remove_all'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <return type-id='type-id-5'/>
+ <pointer-type-def type-id='type-id-150' size-in-bits='64' id='type-id-243'/>
+ <function-decl name='nvpair_value_uint32_array' mangled-name='nvpair_value_uint32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_uint32_array'>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <parameter type-id='type-id-243' name='val'/>
+ <parameter type-id='type-id-239' name='nelem'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_remove_nvpair' mangled-name='nvlist_remove_nvpair' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_remove_nvpair'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-243' name='nvp'/>
- <return type-id='type-id-5'/>
+ <pointer-type-def type-id='type-id-147' size-in-bits='64' id='type-id-244'/>
+ <function-decl name='nvpair_value_int32_array' mangled-name='nvpair_value_int32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_int32_array'>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <parameter type-id='type-id-244' name='val'/>
+ <parameter type-id='type-id-239' name='nelem'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_remove' mangled-name='nvlist_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_remove'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-215' name='type'/>
- <return type-id='type-id-5'/>
+ <pointer-type-def type-id='type-id-144' size-in-bits='64' id='type-id-245'/>
+ <function-decl name='nvpair_value_uint16_array' mangled-name='nvpair_value_uint16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_uint16_array'>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <parameter type-id='type-id-245' name='val'/>
+ <parameter type-id='type-id-239' name='nelem'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_add_boolean' mangled-name='nvlist_add_boolean' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_boolean'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <return type-id='type-id-5'/>
+ <pointer-type-def type-id='type-id-141' size-in-bits='64' id='type-id-246'/>
+ <function-decl name='nvpair_value_int16_array' mangled-name='nvpair_value_int16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_int16_array'>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <parameter type-id='type-id-246' name='val'/>
+ <parameter type-id='type-id-239' name='nelem'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_add_boolean_value' mangled-name='nvlist_add_boolean_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_boolean_value'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-77' name='val'/>
- <return type-id='type-id-5'/>
+ <pointer-type-def type-id='type-id-138' size-in-bits='64' id='type-id-247'/>
+ <function-decl name='nvpair_value_uint8_array' mangled-name='nvpair_value_uint8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_uint8_array'>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <parameter type-id='type-id-247' name='val'/>
+ <parameter type-id='type-id-239' name='nelem'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_add_byte' mangled-name='nvlist_add_byte' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_byte'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-81' name='val'/>
- <return type-id='type-id-5'/>
+ <pointer-type-def type-id='type-id-135' size-in-bits='64' id='type-id-248'/>
+ <function-decl name='nvpair_value_int8_array' mangled-name='nvpair_value_int8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_int8_array'>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <parameter type-id='type-id-248' name='val'/>
+ <parameter type-id='type-id-239' name='nelem'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_add_int8' mangled-name='nvlist_add_int8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_int8'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-85' name='val'/>
- <return type-id='type-id-5'/>
+ <pointer-type-def type-id='type-id-132' size-in-bits='64' id='type-id-249'/>
+ <function-decl name='nvpair_value_byte_array' mangled-name='nvpair_value_byte_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_byte_array'>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <parameter type-id='type-id-249' name='val'/>
+ <parameter type-id='type-id-239' name='nelem'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_add_uint8' mangled-name='nvlist_add_uint8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_uint8'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-89' name='val'/>
- <return type-id='type-id-5'/>
+ <pointer-type-def type-id='type-id-128' size-in-bits='64' id='type-id-250'/>
+ <function-decl name='nvpair_value_boolean_array' mangled-name='nvpair_value_boolean_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_boolean_array'>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <parameter type-id='type-id-250' name='val'/>
+ <parameter type-id='type-id-239' name='nelem'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_add_int16' mangled-name='nvlist_add_int16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_int16'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-94' name='val'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvpair_value_nvlist' mangled-name='nvpair_value_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_nvlist'>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <parameter type-id='type-id-161' name='val'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_add_uint16' mangled-name='nvlist_add_uint16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_uint16'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-98' name='val'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvpair_value_string' mangled-name='nvpair_value_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_string'>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <parameter type-id='type-id-15' name='val'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_add_int32' mangled-name='nvlist_add_int32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_int32'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-65' name='val'/>
- <return type-id='type-id-5'/>
+ <pointer-type-def type-id='type-id-117' size-in-bits='64' id='type-id-251'/>
+ <function-decl name='nvpair_value_double' mangled-name='nvpair_value_double' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_double'>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <parameter type-id='type-id-251' name='val'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_add_uint32' mangled-name='nvlist_add_uint32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_uint32'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-66' name='val'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvpair_value_uint64' mangled-name='nvpair_value_uint64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_uint64'>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <parameter type-id='type-id-156' name='val'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_add_int64' mangled-name='nvlist_add_int64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_int64'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-106' name='val'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvpair_value_int64' mangled-name='nvpair_value_int64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_int64'>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <parameter type-id='type-id-153' name='val'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_add_uint64' mangled-name='nvlist_add_uint64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_uint64'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-67' name='val'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvpair_value_uint32' mangled-name='nvpair_value_uint32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_uint32'>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <parameter type-id='type-id-150' name='val'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_add_double' mangled-name='nvlist_add_double' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_double'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-111' name='val'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvpair_value_int32' mangled-name='nvpair_value_int32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_int32'>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <parameter type-id='type-id-147' name='val'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_add_string' mangled-name='nvlist_add_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_string'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-6' name='val'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvpair_value_uint16' mangled-name='nvpair_value_uint16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_uint16'>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <parameter type-id='type-id-144' name='val'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_add_boolean_array' mangled-name='nvlist_add_boolean_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_boolean_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-122' name='a'/>
- <parameter type-id='type-id-123' name='n'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvpair_value_int16' mangled-name='nvpair_value_int16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_int16'>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <parameter type-id='type-id-141' name='val'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_add_byte_array' mangled-name='nvlist_add_byte_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_byte_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-126' name='a'/>
- <parameter type-id='type-id-123' name='n'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvpair_value_uint8' mangled-name='nvpair_value_uint8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_uint8'>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <parameter type-id='type-id-138' name='val'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_add_int8_array' mangled-name='nvlist_add_int8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_int8_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-129' name='a'/>
- <parameter type-id='type-id-123' name='n'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvpair_value_int8' mangled-name='nvpair_value_int8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_int8'>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <parameter type-id='type-id-135' name='val'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_add_uint8_array' mangled-name='nvlist_add_uint8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_uint8_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-132' name='a'/>
- <parameter type-id='type-id-123' name='n'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvpair_value_byte' mangled-name='nvpair_value_byte' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_byte'>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <parameter type-id='type-id-132' name='val'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_add_int16_array' mangled-name='nvlist_add_int16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_int16_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-135' name='a'/>
- <parameter type-id='type-id-123' name='n'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvpair_value_boolean_value' mangled-name='nvpair_value_boolean_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_boolean_value'>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <parameter type-id='type-id-128' name='val'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_add_uint16_array' mangled-name='nvlist_add_uint16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_uint16_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-138' name='a'/>
- <parameter type-id='type-id-123' name='n'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_exists' mangled-name='nvlist_exists' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_exists'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <return type-id='type-id-87'/>
</function-decl>
- <function-decl name='nvlist_add_int32_array' mangled-name='nvlist_add_int32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_int32_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-141' name='a'/>
- <parameter type-id='type-id-123' name='n'/>
- <return type-id='type-id-5'/>
+ <pointer-type-def type-id='type-id-12' size-in-bits='64' id='type-id-252'/>
+ <pointer-type-def type-id='type-id-1' size-in-bits='64' id='type-id-253'/>
+ <function-decl name='nvlist_lookup_nvpair_embedded_index' mangled-name='nvlist_lookup_nvpair_embedded_index' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_nvpair_embedded_index'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-252' name='ret'/>
+ <parameter type-id='type-id-253' name='ip'/>
+ <parameter type-id='type-id-15' name='ep'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_add_uint32_array' mangled-name='nvlist_add_uint32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_uint32_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-144' name='a'/>
- <parameter type-id='type-id-123' name='n'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_lookup_nvpair' mangled-name='nvlist_lookup_nvpair' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_nvpair'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-252' name='ret'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_add_int64_array' mangled-name='nvlist_add_int64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_int64_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-147' name='a'/>
- <parameter type-id='type-id-123' name='n'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_lookup_pairs' mangled-name='nvlist_lookup_pairs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_pairs'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-1' name='flag'/>
+ <parameter is-variadic='yes'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_add_uint64_array' mangled-name='nvlist_add_uint64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_uint64_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-150' name='a'/>
- <parameter type-id='type-id-123' name='n'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_lookup_hrtime' mangled-name='nvlist_lookup_hrtime' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_hrtime'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-237' name='val'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <qualified-type-def type-id='type-id-9' const='yes' id='type-id-278'/>
- <pointer-type-def type-id='type-id-278' size-in-bits='64' id='type-id-279'/>
- <function-decl name='nvlist_add_string_array' mangled-name='nvlist_add_string_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_string_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-279' name='a'/>
- <parameter type-id='type-id-123' name='n'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_lookup_nvlist_array' mangled-name='nvlist_lookup_nvlist_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_nvlist_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-238' name='a'/>
+ <parameter type-id='type-id-239' name='n'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_add_hrtime' mangled-name='nvlist_add_hrtime' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_hrtime'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-117' name='val'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_lookup_string_array' mangled-name='nvlist_lookup_string_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_string_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-240' name='a'/>
+ <parameter type-id='type-id-239' name='n'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_add_nvlist' mangled-name='nvlist_add_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_nvlist'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-73' name='val'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_lookup_uint64_array' mangled-name='nvlist_lookup_uint64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_uint64_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-241' name='a'/>
+ <parameter type-id='type-id-239' name='n'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_add_nvlist_array' mangled-name='nvlist_add_nvlist_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_nvlist_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-156' name='a'/>
- <parameter type-id='type-id-123' name='n'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_lookup_int64_array' mangled-name='nvlist_lookup_int64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_int64_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-242' name='a'/>
+ <parameter type-id='type-id-239' name='n'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_next_nvpair' mangled-name='nvlist_next_nvpair' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_next_nvpair'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-243' name='nvp'/>
- <return type-id='type-id-243'/>
+ <function-decl name='nvlist_lookup_uint32_array' mangled-name='nvlist_lookup_uint32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_uint32_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-243' name='a'/>
+ <parameter type-id='type-id-239' name='n'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_prev_nvpair' mangled-name='nvlist_prev_nvpair' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prev_nvpair'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-243' name='nvp'/>
- <return type-id='type-id-243'/>
+ <function-decl name='nvlist_lookup_int32_array' mangled-name='nvlist_lookup_int32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_int32_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-244' name='a'/>
+ <parameter type-id='type-id-239' name='n'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_empty' mangled-name='nvlist_empty' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_empty'>
- <parameter type-id='type-id-73' name='nvl'/>
- <return type-id='type-id-77'/>
+ <function-decl name='nvlist_lookup_uint16_array' mangled-name='nvlist_lookup_uint16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_uint16_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-245' name='a'/>
+ <parameter type-id='type-id-239' name='n'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvpair_name' mangled-name='nvpair_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_name'>
- <parameter type-id='type-id-243' name='nvp'/>
- <return type-id='type-id-9'/>
+ <function-decl name='nvlist_lookup_int16_array' mangled-name='nvlist_lookup_int16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_int16_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-246' name='a'/>
+ <parameter type-id='type-id-239' name='n'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvpair_type' mangled-name='nvpair_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_type'>
- <parameter type-id='type-id-243' name='nvp'/>
- <return type-id='type-id-215'/>
+ <function-decl name='nvlist_lookup_uint8_array' mangled-name='nvlist_lookup_uint8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_uint8_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-247' name='a'/>
+ <parameter type-id='type-id-239' name='n'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvpair_type_is_array' mangled-name='nvpair_type_is_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_type_is_array'>
- <parameter type-id='type-id-243' name='nvp'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_lookup_int8_array' mangled-name='nvlist_lookup_int8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_int8_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-248' name='a'/>
+ <parameter type-id='type-id-239' name='n'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_lookup_boolean' mangled-name='nvlist_lookup_boolean' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_boolean'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_lookup_byte_array' mangled-name='nvlist_lookup_byte_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_byte_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-249' name='a'/>
+ <parameter type-id='type-id-239' name='n'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_lookup_boolean_value' mangled-name='nvlist_lookup_boolean_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_boolean_value'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-122' name='val'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_lookup_boolean_array' mangled-name='nvlist_lookup_boolean_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_boolean_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-250' name='a'/>
+ <parameter type-id='type-id-239' name='n'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_lookup_byte' mangled-name='nvlist_lookup_byte' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_byte'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-126' name='val'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_lookup_nvlist' mangled-name='nvlist_lookup_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_nvlist'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-161' name='val'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_lookup_int8' mangled-name='nvlist_lookup_int8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_int8'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-129' name='val'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_lookup_string' mangled-name='nvlist_lookup_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_string'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-15' name='val'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_lookup_uint8' mangled-name='nvlist_lookup_uint8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_uint8'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-132' name='val'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_lookup_double' mangled-name='nvlist_lookup_double' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_double'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-251' name='val'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_lookup_int16' mangled-name='nvlist_lookup_int16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_int16'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-135' name='val'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_lookup_uint64' mangled-name='nvlist_lookup_uint64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_uint64'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-156' name='val'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_lookup_uint16' mangled-name='nvlist_lookup_uint16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_uint16'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-138' name='val'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_lookup_int64' mangled-name='nvlist_lookup_int64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_int64'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-153' name='val'/>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_uint32' mangled-name='nvlist_lookup_uint32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_uint32'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-150' name='val'/>
+ <return type-id='type-id-1'/>
</function-decl>
<function-decl name='nvlist_lookup_int32' mangled-name='nvlist_lookup_int32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_int32'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-141' name='val'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-147' name='val'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_lookup_uint32' mangled-name='nvlist_lookup_uint32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_uint32'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
+ <function-decl name='nvlist_lookup_uint16' mangled-name='nvlist_lookup_uint16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_uint16'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
<parameter type-id='type-id-144' name='val'/>
- <return type-id='type-id-5'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_lookup_int64' mangled-name='nvlist_lookup_int64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_int64'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-147' name='val'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_lookup_int16' mangled-name='nvlist_lookup_int16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_int16'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-141' name='val'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_lookup_uint64' mangled-name='nvlist_lookup_uint64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_uint64'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-150' name='val'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_lookup_uint8' mangled-name='nvlist_lookup_uint8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_uint8'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-138' name='val'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_lookup_double' mangled-name='nvlist_lookup_double' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_double'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-241' name='val'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_lookup_int8' mangled-name='nvlist_lookup_int8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_int8'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-135' name='val'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_lookup_string' mangled-name='nvlist_lookup_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_string'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-153' name='val'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_lookup_byte' mangled-name='nvlist_lookup_byte' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_byte'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-132' name='val'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_lookup_nvlist' mangled-name='nvlist_lookup_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_nvlist'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-156' name='val'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_lookup_boolean_value' mangled-name='nvlist_lookup_boolean_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_boolean_value'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-128' name='val'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <pointer-type-def type-id='type-id-122' size-in-bits='64' id='type-id-280'/>
- <pointer-type-def type-id='type-id-123' size-in-bits='64' id='type-id-281'/>
- <function-decl name='nvlist_lookup_boolean_array' mangled-name='nvlist_lookup_boolean_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_boolean_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-280' name='a'/>
- <parameter type-id='type-id-281' name='n'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_lookup_boolean' mangled-name='nvlist_lookup_boolean' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_boolean'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <pointer-type-def type-id='type-id-126' size-in-bits='64' id='type-id-282'/>
- <function-decl name='nvlist_lookup_byte_array' mangled-name='nvlist_lookup_byte_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_byte_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-282' name='a'/>
- <parameter type-id='type-id-281' name='n'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvpair_type_is_array' mangled-name='nvpair_type_is_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_type_is_array'>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <pointer-type-def type-id='type-id-129' size-in-bits='64' id='type-id-283'/>
- <function-decl name='nvlist_lookup_int8_array' mangled-name='nvlist_lookup_int8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_int8_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-283' name='a'/>
- <parameter type-id='type-id-281' name='n'/>
+ <function-decl name='nvpair_type' mangled-name='nvpair_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_type'>
+ <parameter type-id='type-id-12' name='nvp'/>
<return type-id='type-id-5'/>
</function-decl>
- <pointer-type-def type-id='type-id-132' size-in-bits='64' id='type-id-284'/>
- <function-decl name='nvlist_lookup_uint8_array' mangled-name='nvlist_lookup_uint8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_uint8_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-284' name='a'/>
- <parameter type-id='type-id-281' name='n'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvpair_name' mangled-name='nvpair_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_name'>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <return type-id='type-id-14'/>
</function-decl>
- <pointer-type-def type-id='type-id-135' size-in-bits='64' id='type-id-285'/>
- <function-decl name='nvlist_lookup_int16_array' mangled-name='nvlist_lookup_int16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_int16_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-285' name='a'/>
- <parameter type-id='type-id-281' name='n'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_empty' mangled-name='nvlist_empty' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_empty'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <return type-id='type-id-87'/>
</function-decl>
- <pointer-type-def type-id='type-id-138' size-in-bits='64' id='type-id-286'/>
- <function-decl name='nvlist_lookup_uint16_array' mangled-name='nvlist_lookup_uint16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_uint16_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-286' name='a'/>
- <parameter type-id='type-id-281' name='n'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_prev_nvpair' mangled-name='nvlist_prev_nvpair' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prev_nvpair'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <return type-id='type-id-12'/>
</function-decl>
- <pointer-type-def type-id='type-id-141' size-in-bits='64' id='type-id-287'/>
- <function-decl name='nvlist_lookup_int32_array' mangled-name='nvlist_lookup_int32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_int32_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-287' name='a'/>
- <parameter type-id='type-id-281' name='n'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_next_nvpair' mangled-name='nvlist_next_nvpair' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_next_nvpair'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <return type-id='type-id-12'/>
+ </function-decl>
+ <function-decl name='nvlist_add_nvlist_array' mangled-name='nvlist_add_nvlist_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_nvlist_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-161' name='a'/>
+ <parameter type-id='type-id-129' name='n'/>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='nvlist_add_nvlist' mangled-name='nvlist_add_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_nvlist'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-32' name='val'/>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='nvlist_add_hrtime' mangled-name='nvlist_add_hrtime' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_hrtime'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-123' name='val'/>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <qualified-type-def type-id='type-id-14' const='yes' id='type-id-254'/>
+ <pointer-type-def type-id='type-id-254' size-in-bits='64' id='type-id-255'/>
+ <function-decl name='nvlist_add_string_array' mangled-name='nvlist_add_string_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_string_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-255' name='a'/>
+ <parameter type-id='type-id-129' name='n'/>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='nvlist_add_uint64_array' mangled-name='nvlist_add_uint64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_uint64_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-156' name='a'/>
+ <parameter type-id='type-id-129' name='n'/>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='nvlist_add_int64_array' mangled-name='nvlist_add_int64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_int64_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-153' name='a'/>
+ <parameter type-id='type-id-129' name='n'/>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='nvlist_add_uint32_array' mangled-name='nvlist_add_uint32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_uint32_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-150' name='a'/>
+ <parameter type-id='type-id-129' name='n'/>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='nvlist_add_int32_array' mangled-name='nvlist_add_int32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_int32_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-147' name='a'/>
+ <parameter type-id='type-id-129' name='n'/>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='nvlist_add_uint16_array' mangled-name='nvlist_add_uint16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_uint16_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-144' name='a'/>
+ <parameter type-id='type-id-129' name='n'/>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='nvlist_add_int16_array' mangled-name='nvlist_add_int16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_int16_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-141' name='a'/>
+ <parameter type-id='type-id-129' name='n'/>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='nvlist_add_uint8_array' mangled-name='nvlist_add_uint8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_uint8_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-138' name='a'/>
+ <parameter type-id='type-id-129' name='n'/>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='nvlist_add_int8_array' mangled-name='nvlist_add_int8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_int8_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-135' name='a'/>
+ <parameter type-id='type-id-129' name='n'/>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='nvlist_add_byte_array' mangled-name='nvlist_add_byte_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_byte_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-132' name='a'/>
+ <parameter type-id='type-id-129' name='n'/>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='nvlist_add_boolean_array' mangled-name='nvlist_add_boolean_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_boolean_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-128' name='a'/>
+ <parameter type-id='type-id-129' name='n'/>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='nvlist_add_string' mangled-name='nvlist_add_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_string'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-36' name='val'/>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='nvlist_add_double' mangled-name='nvlist_add_double' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_double'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-117' name='val'/>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='nvlist_add_uint64' mangled-name='nvlist_add_uint64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_uint64'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-28' name='val'/>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='nvlist_add_int64' mangled-name='nvlist_add_int64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_int64'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-112' name='val'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <pointer-type-def type-id='type-id-144' size-in-bits='64' id='type-id-288'/>
- <function-decl name='nvlist_lookup_uint32_array' mangled-name='nvlist_lookup_uint32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_uint32_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-288' name='a'/>
- <parameter type-id='type-id-281' name='n'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_add_uint32' mangled-name='nvlist_add_uint32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_uint32'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-27' name='val'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <pointer-type-def type-id='type-id-147' size-in-bits='64' id='type-id-289'/>
- <function-decl name='nvlist_lookup_int64_array' mangled-name='nvlist_lookup_int64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_int64_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-289' name='a'/>
- <parameter type-id='type-id-281' name='n'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_add_int32' mangled-name='nvlist_add_int32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_int32'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-3' name='val'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <pointer-type-def type-id='type-id-150' size-in-bits='64' id='type-id-290'/>
- <function-decl name='nvlist_lookup_uint64_array' mangled-name='nvlist_lookup_uint64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_uint64_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-290' name='a'/>
- <parameter type-id='type-id-281' name='n'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_add_uint16' mangled-name='nvlist_add_uint16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_uint16'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-104' name='val'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_lookup_string_array' mangled-name='nvlist_lookup_string_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_string_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-233' name='a'/>
- <parameter type-id='type-id-281' name='n'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_add_int16' mangled-name='nvlist_add_int16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_int16'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-4' name='val'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <pointer-type-def type-id='type-id-156' size-in-bits='64' id='type-id-291'/>
- <function-decl name='nvlist_lookup_nvlist_array' mangled-name='nvlist_lookup_nvlist_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_nvlist_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-291' name='a'/>
- <parameter type-id='type-id-281' name='n'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_add_uint8' mangled-name='nvlist_add_uint8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_uint8'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-98' name='val'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <pointer-type-def type-id='type-id-117' size-in-bits='64' id='type-id-292'/>
- <function-decl name='nvlist_lookup_hrtime' mangled-name='nvlist_lookup_hrtime' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_hrtime'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-292' name='val'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_add_int8' mangled-name='nvlist_add_int8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_int8'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-94' name='val'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_lookup_pairs' mangled-name='nvlist_lookup_pairs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_pairs'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-5' name='flag'/>
- <parameter is-variadic='yes'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_add_byte' mangled-name='nvlist_add_byte' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_byte'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-90' name='val'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <pointer-type-def type-id='type-id-243' size-in-bits='64' id='type-id-293'/>
- <function-decl name='nvlist_lookup_nvpair' mangled-name='nvlist_lookup_nvpair' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_nvpair'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-293' name='ret'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_add_boolean_value' mangled-name='nvlist_add_boolean_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_boolean_value'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-87' name='val'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_lookup_nvpair_embedded_index' mangled-name='nvlist_lookup_nvpair_embedded_index' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_nvpair_embedded_index'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-293' name='ret'/>
- <parameter type-id='type-id-222' name='ip'/>
- <parameter type-id='type-id-153' name='ep'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_add_boolean' mangled-name='nvlist_add_boolean' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_boolean'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_exists' mangled-name='nvlist_exists' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_exists'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <return type-id='type-id-77'/>
+ <function-decl name='nvlist_dup' mangled-name='nvlist_dup' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_dup'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-161' name='nvlp'/>
+ <parameter type-id='type-id-1' name='kmflag'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvpair_value_boolean_value' mangled-name='nvpair_value_boolean_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_boolean_value'>
- <parameter type-id='type-id-243' name='nvp'/>
- <parameter type-id='type-id-122' name='val'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_free' mangled-name='nvlist_free' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_free'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvpair_value_byte' mangled-name='nvpair_value_byte' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_byte'>
- <parameter type-id='type-id-243' name='nvp'/>
- <parameter type-id='type-id-126' name='val'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_alloc' mangled-name='nvlist_alloc' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_alloc'>
+ <parameter type-id='type-id-161' name='nvlp'/>
+ <parameter type-id='type-id-129' name='nvflag'/>
+ <parameter type-id='type-id-1' name='kmflag'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvpair_value_int8' mangled-name='nvpair_value_int8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_int8'>
- <parameter type-id='type-id-243' name='nvp'/>
- <parameter type-id='type-id-129' name='val'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_nvflag' mangled-name='nvlist_nvflag' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_nvflag'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <return type-id='type-id-129'/>
</function-decl>
- <function-decl name='nvpair_value_uint8' mangled-name='nvpair_value_uint8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_uint8'>
- <parameter type-id='type-id-243' name='nvp'/>
- <parameter type-id='type-id-132' name='val'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_lookup_nv_alloc' mangled-name='nvlist_lookup_nv_alloc' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_nv_alloc'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <return type-id='type-id-227'/>
</function-decl>
- <function-decl name='nvpair_value_int16' mangled-name='nvpair_value_int16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_int16'>
- <parameter type-id='type-id-243' name='nvp'/>
- <parameter type-id='type-id-135' name='val'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nv_alloc_fini' mangled-name='nv_alloc_fini' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nv_alloc_fini'>
+ <parameter type-id='type-id-227' name='nva'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvpair_value_uint16' mangled-name='nvpair_value_uint16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_uint16'>
- <parameter type-id='type-id-243' name='nvp'/>
- <parameter type-id='type-id-138' name='val'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nv_alloc_reset' mangled-name='nv_alloc_reset' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nv_alloc_reset'>
+ <parameter type-id='type-id-227' name='nva'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvpair_value_int32' mangled-name='nvpair_value_int32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_int32'>
- <parameter type-id='type-id-243' name='nvp'/>
- <parameter type-id='type-id-141' name='val'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nv_alloc_init' mangled-name='nv_alloc_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nv_alloc_init'>
+ <parameter type-id='type-id-227' name='nva'/>
+ <parameter type-id='type-id-220' name='nvo'/>
+ <parameter is-variadic='yes'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvpair_value_uint32' mangled-name='nvpair_value_uint32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_uint32'>
- <parameter type-id='type-id-243' name='nvp'/>
- <parameter type-id='type-id-144' name='val'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_xalloc' mangled-name='nvlist_xalloc' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_xalloc'>
+ <parameter type-id='type-id-161' name='nvlp'/>
+ <parameter type-id='type-id-129' name='nvflag'/>
+ <parameter type-id='type-id-227' name='nva'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvpair_value_int64' mangled-name='nvpair_value_int64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_int64'>
- <parameter type-id='type-id-243' name='nvp'/>
- <parameter type-id='type-id-147' name='val'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_remove_nvpair' mangled-name='nvlist_remove_nvpair' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_remove_nvpair'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvpair_value_uint64' mangled-name='nvpair_value_uint64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_uint64'>
- <parameter type-id='type-id-243' name='nvp'/>
- <parameter type-id='type-id-150' name='val'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_remove_all' mangled-name='nvlist_remove_all' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_remove_all'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvpair_value_double' mangled-name='nvpair_value_double' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_double'>
- <parameter type-id='type-id-243' name='nvp'/>
- <parameter type-id='type-id-241' name='val'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_remove' mangled-name='nvlist_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_remove'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-5' name='type'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvpair_value_string' mangled-name='nvpair_value_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_string'>
- <parameter type-id='type-id-243' name='nvp'/>
- <parameter type-id='type-id-153' name='val'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_xdup' mangled-name='nvlist_xdup' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_xdup'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-161' name='nvlp'/>
+ <parameter type-id='type-id-227' name='nva'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvpair_value_nvlist' mangled-name='nvpair_value_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_nvlist'>
- <parameter type-id='type-id-243' name='nvp'/>
- <parameter type-id='type-id-156' name='val'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_size' mangled-name='nvlist_size' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_size'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-236' name='size'/>
+ <parameter type-id='type-id-1' name='encoding'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvpair_value_boolean_array' mangled-name='nvpair_value_boolean_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_boolean_array'>
- <parameter type-id='type-id-243' name='nvp'/>
- <parameter type-id='type-id-280' name='val'/>
- <parameter type-id='type-id-281' name='nelem'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_xpack' mangled-name='nvlist_xpack' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_xpack'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-15' name='bufp'/>
+ <parameter type-id='type-id-236' name='buflen'/>
+ <parameter type-id='type-id-1' name='encoding'/>
+ <parameter type-id='type-id-227' name='nva'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvpair_value_byte_array' mangled-name='nvpair_value_byte_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_byte_array'>
- <parameter type-id='type-id-243' name='nvp'/>
- <parameter type-id='type-id-282' name='val'/>
- <parameter type-id='type-id-281' name='nelem'/>
- <return type-id='type-id-5'/>
+ <function-decl name='xdr_int' mangled-name='xdr_int' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvpair_value_int8_array' mangled-name='nvpair_value_int8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_int8_array'>
- <parameter type-id='type-id-243' name='nvp'/>
- <parameter type-id='type-id-283' name='val'/>
- <parameter type-id='type-id-281' name='nelem'/>
- <return type-id='type-id-5'/>
+ <function-decl name='strlen' mangled-name='strlen' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvpair_value_uint8_array' mangled-name='nvpair_value_uint8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_uint8_array'>
- <parameter type-id='type-id-243' name='nvp'/>
- <parameter type-id='type-id-284' name='val'/>
- <parameter type-id='type-id-281' name='nelem'/>
- <return type-id='type-id-5'/>
+ <function-decl name='__builtin_memset' mangled-name='memset' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvpair_value_int16_array' mangled-name='nvpair_value_int16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_int16_array'>
- <parameter type-id='type-id-243' name='nvp'/>
- <parameter type-id='type-id-285' name='val'/>
- <parameter type-id='type-id-281' name='nelem'/>
- <return type-id='type-id-5'/>
+ <function-decl name='xdr_string' mangled-name='xdr_string' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvpair_value_uint16_array' mangled-name='nvpair_value_uint16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_uint16_array'>
- <parameter type-id='type-id-243' name='nvp'/>
- <parameter type-id='type-id-286' name='val'/>
- <parameter type-id='type-id-281' name='nelem'/>
- <return type-id='type-id-5'/>
+ <function-decl name='xdr_longlong_t' mangled-name='xdr_longlong_t' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvpair_value_int32_array' mangled-name='nvpair_value_int32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_int32_array'>
- <parameter type-id='type-id-243' name='nvp'/>
- <parameter type-id='type-id-287' name='val'/>
- <parameter type-id='type-id-281' name='nelem'/>
- <return type-id='type-id-5'/>
+ <function-decl name='xdr_array' mangled-name='xdr_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvpair_value_uint32_array' mangled-name='nvpair_value_uint32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_uint32_array'>
- <parameter type-id='type-id-243' name='nvp'/>
- <parameter type-id='type-id-288' name='val'/>
- <parameter type-id='type-id-281' name='nelem'/>
- <return type-id='type-id-5'/>
+ <function-decl name='xdr_opaque' mangled-name='xdr_opaque' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvpair_value_int64_array' mangled-name='nvpair_value_int64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_int64_array'>
- <parameter type-id='type-id-243' name='nvp'/>
- <parameter type-id='type-id-289' name='val'/>
- <parameter type-id='type-id-281' name='nelem'/>
- <return type-id='type-id-5'/>
+ <function-decl name='xdr_u_longlong_t' mangled-name='xdr_u_longlong_t' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvpair_value_uint64_array' mangled-name='nvpair_value_uint64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_uint64_array'>
- <parameter type-id='type-id-243' name='nvp'/>
- <parameter type-id='type-id-290' name='val'/>
- <parameter type-id='type-id-281' name='nelem'/>
- <return type-id='type-id-5'/>
+ <function-decl name='xdr_double' mangled-name='xdr_double' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvpair_value_string_array' mangled-name='nvpair_value_string_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_string_array'>
- <parameter type-id='type-id-243' name='nvp'/>
- <parameter type-id='type-id-233' name='val'/>
- <parameter type-id='type-id-281' name='nelem'/>
- <return type-id='type-id-5'/>
+ <function-decl name='xdr_u_int' mangled-name='xdr_u_int' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvpair_value_nvlist_array' mangled-name='nvpair_value_nvlist_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_nvlist_array'>
- <parameter type-id='type-id-243' name='nvp'/>
- <parameter type-id='type-id-291' name='val'/>
- <parameter type-id='type-id-281' name='nelem'/>
- <return type-id='type-id-5'/>
+ <function-decl name='xdr_u_short' mangled-name='xdr_u_short' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvpair_value_hrtime' mangled-name='nvpair_value_hrtime' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_hrtime'>
- <parameter type-id='type-id-243' name='nvp'/>
- <parameter type-id='type-id-292' name='val'/>
- <return type-id='type-id-5'/>
+ <function-decl name='xdr_short' mangled-name='xdr_short' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvlist_add_nvpair' mangled-name='nvlist_add_nvpair' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_nvpair'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-243' name='nvp'/>
- <return type-id='type-id-5'/>
+ <function-decl name='xdr_char' mangled-name='xdr_char' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvlist_merge' mangled-name='nvlist_merge' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_merge'>
- <parameter type-id='type-id-73' name='dst'/>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-5' name='flag'/>
- <return type-id='type-id-5'/>
+ <function-decl name='__builtin_memmove' mangled-name='memmove' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <pointer-type-def type-id='type-id-20' size-in-bits='64' id='type-id-294'/>
- <function-decl name='nvlist_size' mangled-name='nvlist_size' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_size'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-294' name='size'/>
- <parameter type-id='type-id-5' name='encoding'/>
- <return type-id='type-id-5'/>
+ <function-decl name='xdrmem_create' mangled-name='xdrmem_create' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <class-decl name='XDR' size-in-bits='384' is-struct='yes' visibility='default' id='type-id-295'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='x_op' type-id='type-id-296' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='x_ops' type-id='type-id-297' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='x_public' type-id='type-id-298' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='x_private' type-id='type-id-298' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='x_base' type-id='type-id-298' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='x_handy' type-id='type-id-299' visibility='default'/>
- </data-member>
- </class-decl>
- <enum-decl name='xdr_op' id='type-id-296'>
- <underlying-type type-id='type-id-32'/>
- <enumerator name='XDR_ENCODE' value='0'/>
- <enumerator name='XDR_DECODE' value='1'/>
- <enumerator name='XDR_FREE' value='2'/>
- </enum-decl>
- <class-decl name='xdr_ops' size-in-bits='640' is-struct='yes' visibility='default' id='type-id-300'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='x_getlong' type-id='type-id-301' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='x_putlong' type-id='type-id-302' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='x_getbytes' type-id='type-id-303' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='x_putbytes' type-id='type-id-304' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='x_getpostn' type-id='type-id-305' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='x_setpostn' type-id='type-id-306' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='x_inline' type-id='type-id-307' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='x_destroy' type-id='type-id-308' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='x_getint32' type-id='type-id-309' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='576'>
- <var-decl name='x_putint32' type-id='type-id-310' visibility='default'/>
- </data-member>
- </class-decl>
- <typedef-decl name='bool_t' type-id='type-id-5' id='type-id-311'/>
- <typedef-decl name='XDR' type-id='type-id-295' id='type-id-312'/>
- <pointer-type-def type-id='type-id-312' size-in-bits='64' id='type-id-313'/>
- <pointer-type-def type-id='type-id-314' size-in-bits='64' id='type-id-301'/>
- <qualified-type-def type-id='type-id-24' const='yes' id='type-id-315'/>
- <pointer-type-def type-id='type-id-315' size-in-bits='64' id='type-id-316'/>
- <pointer-type-def type-id='type-id-317' size-in-bits='64' id='type-id-302'/>
- <typedef-decl name='__caddr_t' type-id='type-id-9' id='type-id-318'/>
- <typedef-decl name='caddr_t' type-id='type-id-318' id='type-id-298'/>
- <typedef-decl name='__u_int' type-id='type-id-69' id='type-id-319'/>
- <typedef-decl name='u_int' type-id='type-id-319' id='type-id-299'/>
- <pointer-type-def type-id='type-id-320' size-in-bits='64' id='type-id-303'/>
- <pointer-type-def type-id='type-id-321' size-in-bits='64' id='type-id-304'/>
- <qualified-type-def type-id='type-id-312' const='yes' id='type-id-322'/>
- <pointer-type-def type-id='type-id-322' size-in-bits='64' id='type-id-323'/>
- <pointer-type-def type-id='type-id-324' size-in-bits='64' id='type-id-305'/>
- <pointer-type-def type-id='type-id-325' size-in-bits='64' id='type-id-306'/>
- <pointer-type-def type-id='type-id-326' size-in-bits='64' id='type-id-307'/>
- <pointer-type-def type-id='type-id-327' size-in-bits='64' id='type-id-308'/>
- <pointer-type-def type-id='type-id-328' size-in-bits='64' id='type-id-309'/>
- <qualified-type-def type-id='type-id-65' const='yes' id='type-id-329'/>
- <pointer-type-def type-id='type-id-329' size-in-bits='64' id='type-id-330'/>
- <pointer-type-def type-id='type-id-331' size-in-bits='64' id='type-id-310'/>
- <pointer-type-def type-id='type-id-300' size-in-bits='64' id='type-id-297'/>
- <function-decl name='xdrmem_create' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-313'/>
- <parameter type-id='type-id-9'/>
- <parameter type-id='type-id-69'/>
- <parameter type-id='type-id-296'/>
- <return type-id='type-id-1'/>
+ <function-decl name='strncmp' mangled-name='strncmp' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvlist_pack' mangled-name='nvlist_pack' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_pack'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-153' name='bufp'/>
- <parameter type-id='type-id-294' name='buflen'/>
- <parameter type-id='type-id-5' name='encoding'/>
- <parameter type-id='type-id-5' name='kmflag'/>
- <return type-id='type-id-5'/>
+ <function-decl name='strtol' mangled-name='strtol' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvlist_xpack' mangled-name='nvlist_xpack' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_xpack'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-153' name='bufp'/>
- <parameter type-id='type-id-294' name='buflen'/>
- <parameter type-id='type-id-5' name='encoding'/>
- <parameter type-id='type-id-269' name='nva'/>
- <return type-id='type-id-5'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='../../module/nvpair/fnvpair.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libnvpair' language='LANG_C99'>
+ <function-decl name='fnvpair_value_nvlist' mangled-name='fnvpair_value_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_nvlist'>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <return type-id='type-id-32'/>
</function-decl>
- <function-decl name='nvlist_unpack' mangled-name='nvlist_unpack' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_unpack'>
- <parameter type-id='type-id-9' name='buf'/>
- <parameter type-id='type-id-20' name='buflen'/>
- <parameter type-id='type-id-156' name='nvlp'/>
- <parameter type-id='type-id-5' name='kmflag'/>
- <return type-id='type-id-5'/>
+ <function-decl name='fnvpair_value_string' mangled-name='fnvpair_value_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_string'>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <return type-id='type-id-14'/>
</function-decl>
- <function-decl name='nvlist_xunpack' mangled-name='nvlist_xunpack' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_xunpack'>
- <parameter type-id='type-id-9' name='buf'/>
- <parameter type-id='type-id-20' name='buflen'/>
- <parameter type-id='type-id-156' name='nvlp'/>
- <parameter type-id='type-id-269' name='nva'/>
- <return type-id='type-id-5'/>
+ <function-decl name='fnvpair_value_uint64' mangled-name='fnvpair_value_uint64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_uint64'>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <return type-id='type-id-28'/>
</function-decl>
- <function-decl name='xdr_int' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-313'/>
- <parameter type-id='type-id-222'/>
- <return type-id='type-id-5'/>
+ <function-decl name='fnvpair_value_uint32' mangled-name='fnvpair_value_uint32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_uint32'>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <return type-id='type-id-27'/>
</function-decl>
- <function-decl name='xdr_u_int' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-313'/>
- <parameter type-id='type-id-223'/>
- <return type-id='type-id-5'/>
+ <function-decl name='fnvpair_value_uint16' mangled-name='fnvpair_value_uint16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_uint16'>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <return type-id='type-id-104'/>
</function-decl>
- <function-decl name='xdr_string' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-313'/>
- <parameter type-id='type-id-153'/>
- <parameter type-id='type-id-69'/>
- <return type-id='type-id-5'/>
+ <function-decl name='fnvpair_value_uint8' mangled-name='fnvpair_value_uint8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_uint8'>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <return type-id='type-id-98'/>
</function-decl>
- <function-decl name='xdr_char' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-313'/>
- <parameter type-id='type-id-9'/>
- <return type-id='type-id-5'/>
+ <function-decl name='fnvpair_value_int64' mangled-name='fnvpair_value_int64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_int64'>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <return type-id='type-id-112'/>
</function-decl>
- <function-decl name='xdr_longlong_t' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-313'/>
- <parameter type-id='type-id-224'/>
- <return type-id='type-id-5'/>
+ <function-decl name='fnvpair_value_int32' mangled-name='fnvpair_value_int32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_int32'>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <return type-id='type-id-3'/>
</function-decl>
- <pointer-type-def type-id='type-id-332' size-in-bits='64' id='type-id-333'/>
- <function-decl name='xdr_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-313'/>
- <parameter type-id='type-id-153'/>
- <parameter type-id='type-id-223'/>
- <parameter type-id='type-id-69'/>
- <parameter type-id='type-id-69'/>
- <parameter type-id='type-id-333'/>
- <return type-id='type-id-5'/>
+ <function-decl name='fnvpair_value_int16' mangled-name='fnvpair_value_int16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_int16'>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <return type-id='type-id-4'/>
</function-decl>
- <function-decl name='xdr_short' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-313'/>
- <parameter type-id='type-id-220'/>
- <return type-id='type-id-5'/>
+ <function-decl name='fnvpair_value_int8' mangled-name='fnvpair_value_int8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_int8'>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <return type-id='type-id-94'/>
</function-decl>
- <function-decl name='xdr_u_short' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-313'/>
- <parameter type-id='type-id-221'/>
- <return type-id='type-id-5'/>
+ <function-decl name='fnvpair_value_byte' mangled-name='fnvpair_value_byte' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_byte'>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <return type-id='type-id-90'/>
</function-decl>
- <function-decl name='xdr_u_longlong_t' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-313'/>
- <parameter type-id='type-id-225'/>
- <return type-id='type-id-5'/>
+ <function-decl name='fnvpair_value_boolean_value' mangled-name='fnvpair_value_boolean_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_boolean_value'>
+ <parameter type-id='type-id-12' name='nvp'/>
+ <return type-id='type-id-87'/>
</function-decl>
- <function-decl name='xdr_opaque' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-313'/>
- <parameter type-id='type-id-9'/>
- <parameter type-id='type-id-69'/>
- <return type-id='type-id-5'/>
+ <function-decl name='fnvlist_lookup_uint64_array' mangled-name='fnvlist_lookup_uint64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_uint64_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-239' name='n'/>
+ <return type-id='type-id-156'/>
</function-decl>
- <function-decl name='xdr_double' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-313'/>
- <parameter type-id='type-id-241'/>
- <return type-id='type-id-5'/>
+ <function-decl name='fnvlist_lookup_int64_array' mangled-name='fnvlist_lookup_int64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_int64_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-239' name='n'/>
+ <return type-id='type-id-153'/>
</function-decl>
- <function-type size-in-bits='64' id='type-id-332'>
- <parameter type-id='type-id-313'/>
- <parameter type-id='type-id-19'/>
- <parameter is-variadic='yes'/>
- <return type-id='type-id-5'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-326'>
- <parameter type-id='type-id-313'/>
- <parameter type-id='type-id-299'/>
- <return type-id='type-id-141'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-321'>
- <parameter type-id='type-id-313'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-299'/>
- <return type-id='type-id-311'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-331'>
- <parameter type-id='type-id-313'/>
- <parameter type-id='type-id-330'/>
- <return type-id='type-id-311'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-317'>
- <parameter type-id='type-id-313'/>
- <parameter type-id='type-id-316'/>
- <return type-id='type-id-311'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-328'>
- <parameter type-id='type-id-313'/>
- <parameter type-id='type-id-141'/>
- <return type-id='type-id-311'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-314'>
- <parameter type-id='type-id-313'/>
- <parameter type-id='type-id-224'/>
- <return type-id='type-id-311'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-320'>
- <parameter type-id='type-id-313'/>
- <parameter type-id='type-id-298'/>
- <parameter type-id='type-id-299'/>
- <return type-id='type-id-311'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-325'>
- <parameter type-id='type-id-313'/>
- <parameter type-id='type-id-299'/>
- <return type-id='type-id-311'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-324'>
- <parameter type-id='type-id-323'/>
- <return type-id='type-id-299'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-327'>
- <parameter type-id='type-id-313'/>
- <return type-id='type-id-1'/>
- </function-type>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='../../module/nvpair/fnvpair.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libnvpair' language='LANG_C99'>
- <function-decl name='fnvlist_alloc' mangled-name='fnvlist_alloc' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_alloc'>
- <return type-id='type-id-73'/>
+ <function-decl name='fnvlist_lookup_uint32_array' mangled-name='fnvlist_lookup_uint32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_uint32_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-239' name='n'/>
+ <return type-id='type-id-150'/>
</function-decl>
- <function-decl name='nvlist_alloc' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-235'/>
- <parameter type-id='type-id-69'/>
- <parameter type-id='type-id-5'/>
- <return type-id='type-id-5'/>
+ <function-decl name='fnvlist_lookup_int32_array' mangled-name='fnvlist_lookup_int32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_int32_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-239' name='n'/>
+ <return type-id='type-id-147'/>
</function-decl>
- <function-decl name='fnvlist_free' mangled-name='fnvlist_free' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_free'>
- <parameter type-id='type-id-73' name='nvl'/>
- <return type-id='type-id-1'/>
+ <function-decl name='fnvlist_lookup_uint16_array' mangled-name='fnvlist_lookup_uint16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_uint16_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-239' name='n'/>
+ <return type-id='type-id-144'/>
</function-decl>
- <function-decl name='nvlist_free' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <return type-id='type-id-1'/>
+ <function-decl name='fnvlist_lookup_int16_array' mangled-name='fnvlist_lookup_int16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_int16_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-239' name='n'/>
+ <return type-id='type-id-141'/>
</function-decl>
- <function-decl name='fnvlist_size' mangled-name='fnvlist_size' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_size'>
- <parameter type-id='type-id-73' name='nvl'/>
- <return type-id='type-id-20'/>
+ <function-decl name='fnvlist_lookup_uint8_array' mangled-name='fnvlist_lookup_uint8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_uint8_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-239' name='n'/>
+ <return type-id='type-id-138'/>
</function-decl>
- <function-decl name='nvlist_size' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-225'/>
- <parameter type-id='type-id-5'/>
- <return type-id='type-id-5'/>
+ <function-decl name='fnvlist_lookup_int8_array' mangled-name='fnvlist_lookup_int8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_int8_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-239' name='n'/>
+ <return type-id='type-id-135'/>
</function-decl>
- <function-decl name='fnvlist_pack' mangled-name='fnvlist_pack' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_pack'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-294' name='sizep'/>
- <return type-id='type-id-9'/>
+ <function-decl name='fnvlist_lookup_byte_array' mangled-name='fnvlist_lookup_byte_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_byte_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-239' name='n'/>
+ <return type-id='type-id-132'/>
</function-decl>
- <function-decl name='nvlist_pack' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-153'/>
- <parameter type-id='type-id-225'/>
- <parameter type-id='type-id-5'/>
- <parameter type-id='type-id-5'/>
- <return type-id='type-id-5'/>
+ <function-decl name='fnvlist_lookup_boolean_array' mangled-name='fnvlist_lookup_boolean_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_boolean_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-239' name='n'/>
+ <return type-id='type-id-128'/>
</function-decl>
- <function-decl name='fnvlist_pack_free' mangled-name='fnvlist_pack_free' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_pack_free'>
- <parameter type-id='type-id-9' name='pack'/>
- <parameter type-id='type-id-20' name='size'/>
- <return type-id='type-id-1'/>
+ <function-decl name='fnvlist_lookup_nvlist' mangled-name='fnvlist_lookup_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_nvlist'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <return type-id='type-id-32'/>
</function-decl>
- <function-decl name='fnvlist_unpack' mangled-name='fnvlist_unpack' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_unpack'>
- <parameter type-id='type-id-9' name='buf'/>
- <parameter type-id='type-id-20' name='buflen'/>
- <return type-id='type-id-73'/>
+ <function-decl name='fnvlist_lookup_string' mangled-name='fnvlist_lookup_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_string'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <return type-id='type-id-14'/>
</function-decl>
- <function-decl name='nvlist_unpack' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-9'/>
- <parameter type-id='type-id-29'/>
- <parameter type-id='type-id-235'/>
- <parameter type-id='type-id-5'/>
- <return type-id='type-id-5'/>
+ <function-decl name='fnvlist_lookup_uint64' mangled-name='fnvlist_lookup_uint64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_uint64'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <return type-id='type-id-28'/>
</function-decl>
- <function-decl name='fnvlist_dup' mangled-name='fnvlist_dup' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_dup'>
- <parameter type-id='type-id-73' name='nvl'/>
- <return type-id='type-id-73'/>
+ <function-decl name='fnvlist_lookup_uint32' mangled-name='fnvlist_lookup_uint32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_uint32'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <return type-id='type-id-27'/>
</function-decl>
- <function-decl name='nvlist_dup' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-235'/>
- <parameter type-id='type-id-5'/>
- <return type-id='type-id-5'/>
+ <function-decl name='fnvlist_lookup_uint16' mangled-name='fnvlist_lookup_uint16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_uint16'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <return type-id='type-id-104'/>
</function-decl>
- <function-decl name='fnvlist_merge' mangled-name='fnvlist_merge' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_merge'>
- <parameter type-id='type-id-73' name='dst'/>
- <parameter type-id='type-id-73' name='src'/>
- <return type-id='type-id-1'/>
+ <function-decl name='fnvlist_lookup_uint8' mangled-name='fnvlist_lookup_uint8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_uint8'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <return type-id='type-id-98'/>
</function-decl>
- <function-decl name='nvlist_merge' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-5'/>
- <return type-id='type-id-5'/>
+ <function-decl name='fnvlist_lookup_int64' mangled-name='fnvlist_lookup_int64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_int64'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <return type-id='type-id-112'/>
</function-decl>
- <function-decl name='fnvlist_num_pairs' mangled-name='fnvlist_num_pairs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_num_pairs'>
- <parameter type-id='type-id-73' name='nvl'/>
- <return type-id='type-id-20'/>
+ <function-decl name='fnvlist_lookup_int32' mangled-name='fnvlist_lookup_int32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_int32'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <return type-id='type-id-3'/>
</function-decl>
- <function-decl name='fnvlist_add_boolean' mangled-name='fnvlist_add_boolean' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_boolean'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <return type-id='type-id-1'/>
+ <function-decl name='fnvlist_lookup_int16' mangled-name='fnvlist_lookup_int16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_int16'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <return type-id='type-id-4'/>
</function-decl>
- <function-decl name='nvlist_add_boolean' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <return type-id='type-id-5'/>
+ <function-decl name='fnvlist_lookup_int8' mangled-name='fnvlist_lookup_int8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_int8'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <return type-id='type-id-94'/>
</function-decl>
- <function-decl name='fnvlist_add_boolean_value' mangled-name='fnvlist_add_boolean_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_boolean_value'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-77' name='val'/>
- <return type-id='type-id-1'/>
+ <function-decl name='fnvlist_lookup_byte' mangled-name='fnvlist_lookup_byte' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_byte'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <return type-id='type-id-90'/>
</function-decl>
- <function-decl name='nvlist_add_boolean_value' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-76'/>
- <return type-id='type-id-5'/>
+ <function-decl name='fnvlist_lookup_boolean_value' mangled-name='fnvlist_lookup_boolean_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_boolean_value'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <return type-id='type-id-87'/>
</function-decl>
- <function-decl name='fnvlist_add_byte' mangled-name='fnvlist_add_byte' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_byte'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-81' name='val'/>
- <return type-id='type-id-1'/>
+ <function-decl name='fnvlist_lookup_boolean' mangled-name='fnvlist_lookup_boolean' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_boolean'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <return type-id='type-id-87'/>
</function-decl>
- <function-decl name='nvlist_add_byte' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-80'/>
- <return type-id='type-id-5'/>
+ <function-decl name='fnvlist_lookup_nvpair' mangled-name='fnvlist_lookup_nvpair' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_nvpair'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <return type-id='type-id-12'/>
</function-decl>
- <function-decl name='fnvlist_add_int8' mangled-name='fnvlist_add_int8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_int8'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-85' name='val'/>
- <return type-id='type-id-1'/>
+ <function-decl name='fnvlist_remove_nvpair' mangled-name='fnvlist_remove_nvpair' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_remove_nvpair'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-12' name='pair'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvlist_add_int8' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-14'/>
- <return type-id='type-id-5'/>
+ <function-decl name='fnvlist_remove' mangled-name='fnvlist_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_remove'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_add_uint8' mangled-name='fnvlist_add_uint8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_uint8'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-89' name='val'/>
- <return type-id='type-id-1'/>
+ <function-decl name='fnvlist_add_nvlist_array' mangled-name='fnvlist_add_nvlist_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_nvlist_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-161' name='val'/>
+ <parameter type-id='type-id-129' name='n'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvlist_add_uint8' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-80'/>
- <return type-id='type-id-5'/>
+ <function-decl name='fnvlist_add_string_array' mangled-name='fnvlist_add_string_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_string_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-255' name='val'/>
+ <parameter type-id='type-id-129' name='n'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_add_int16' mangled-name='fnvlist_add_int16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_int16'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-94' name='val'/>
- <return type-id='type-id-1'/>
+ <function-decl name='fnvlist_add_uint64_array' mangled-name='fnvlist_add_uint64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_uint64_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-156' name='val'/>
+ <parameter type-id='type-id-129' name='n'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvlist_add_int16' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-92'/>
- <return type-id='type-id-5'/>
+ <function-decl name='fnvlist_add_int64_array' mangled-name='fnvlist_add_int64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_int64_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-153' name='val'/>
+ <parameter type-id='type-id-129' name='n'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_add_uint16' mangled-name='fnvlist_add_uint16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_uint16'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-98' name='val'/>
- <return type-id='type-id-1'/>
+ <function-decl name='fnvlist_add_uint32_array' mangled-name='fnvlist_add_uint32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_uint32_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-150' name='val'/>
+ <parameter type-id='type-id-129' name='n'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvlist_add_uint16' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-13'/>
- <return type-id='type-id-5'/>
+ <function-decl name='fnvlist_add_int32_array' mangled-name='fnvlist_add_int32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_int32_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-147' name='val'/>
+ <parameter type-id='type-id-129' name='n'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_add_int32' mangled-name='fnvlist_add_int32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_int32'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-65' name='val'/>
- <return type-id='type-id-1'/>
+ <function-decl name='fnvlist_add_uint16_array' mangled-name='fnvlist_add_uint16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_uint16_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-144' name='val'/>
+ <parameter type-id='type-id-129' name='n'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvlist_add_int32' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-5'/>
- <return type-id='type-id-5'/>
+ <function-decl name='fnvlist_add_int16_array' mangled-name='fnvlist_add_int16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_int16_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-141' name='val'/>
+ <parameter type-id='type-id-129' name='n'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_add_uint32' mangled-name='fnvlist_add_uint32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_uint32'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-66' name='val'/>
- <return type-id='type-id-1'/>
+ <function-decl name='fnvlist_add_uint8_array' mangled-name='fnvlist_add_uint8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_uint8_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-138' name='val'/>
+ <parameter type-id='type-id-129' name='n'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvlist_add_uint32' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-69'/>
- <return type-id='type-id-5'/>
+ <function-decl name='fnvlist_add_int8_array' mangled-name='fnvlist_add_int8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_int8_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-135' name='val'/>
+ <parameter type-id='type-id-129' name='n'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_add_int64' mangled-name='fnvlist_add_int64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_int64'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-106' name='val'/>
- <return type-id='type-id-1'/>
+ <function-decl name='fnvlist_add_byte_array' mangled-name='fnvlist_add_byte_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_byte_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-132' name='val'/>
+ <parameter type-id='type-id-129' name='n'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvlist_add_int64' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-24'/>
- <return type-id='type-id-5'/>
+ <function-decl name='fnvlist_add_boolean_array' mangled-name='fnvlist_add_boolean_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_boolean_array'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-128' name='val'/>
+ <parameter type-id='type-id-129' name='n'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_add_uint64' mangled-name='fnvlist_add_uint64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_uint64'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-67' name='val'/>
- <return type-id='type-id-1'/>
+ <function-decl name='fnvlist_add_nvpair' mangled-name='fnvlist_add_nvpair' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_nvpair'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-12' name='pair'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvlist_add_uint64' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-29'/>
- <return type-id='type-id-5'/>
+ <function-decl name='fnvlist_add_nvlist' mangled-name='fnvlist_add_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_nvlist'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-32' name='val'/>
+ <return type-id='type-id-25'/>
</function-decl>
<function-decl name='fnvlist_add_string' mangled-name='fnvlist_add_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_string'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-6' name='val'/>
- <return type-id='type-id-1'/>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-36' name='val'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvlist_add_string' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-6'/>
- <return type-id='type-id-5'/>
+ <function-decl name='fnvlist_add_uint64' mangled-name='fnvlist_add_uint64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_uint64'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-28' name='val'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_add_nvlist' mangled-name='fnvlist_add_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_nvlist'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-73' name='val'/>
- <return type-id='type-id-1'/>
+ <function-decl name='fnvlist_add_int64' mangled-name='fnvlist_add_int64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_int64'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-112' name='val'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvlist_add_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-218'/>
- <return type-id='type-id-5'/>
+ <function-decl name='fnvlist_add_uint32' mangled-name='fnvlist_add_uint32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_uint32'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-27' name='val'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_add_nvpair' mangled-name='fnvlist_add_nvpair' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_nvpair'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-243' name='pair'/>
- <return type-id='type-id-1'/>
+ <function-decl name='fnvlist_add_int32' mangled-name='fnvlist_add_int32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_int32'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-3' name='val'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvlist_add_nvpair' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-217'/>
- <return type-id='type-id-5'/>
+ <function-decl name='fnvlist_add_uint16' mangled-name='fnvlist_add_uint16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_uint16'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-104' name='val'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_add_boolean_array' mangled-name='fnvlist_add_boolean_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_boolean_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-122' name='val'/>
- <parameter type-id='type-id-123' name='n'/>
- <return type-id='type-id-1'/>
+ <function-decl name='fnvlist_add_int16' mangled-name='fnvlist_add_int16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_int16'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-4' name='val'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvlist_add_boolean_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-237'/>
- <parameter type-id='type-id-69'/>
- <return type-id='type-id-5'/>
+ <function-decl name='fnvlist_add_uint8' mangled-name='fnvlist_add_uint8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_uint8'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-98' name='val'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_add_byte_array' mangled-name='fnvlist_add_byte_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_byte_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-126' name='val'/>
- <parameter type-id='type-id-123' name='n'/>
- <return type-id='type-id-1'/>
+ <function-decl name='fnvlist_add_int8' mangled-name='fnvlist_add_int8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_int8'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-94' name='val'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvlist_add_byte_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-219'/>
- <parameter type-id='type-id-69'/>
- <return type-id='type-id-5'/>
+ <function-decl name='fnvlist_add_byte' mangled-name='fnvlist_add_byte' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_byte'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-90' name='val'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_add_int8_array' mangled-name='fnvlist_add_int8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_int8_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-129' name='val'/>
- <parameter type-id='type-id-123' name='n'/>
- <return type-id='type-id-1'/>
+ <function-decl name='fnvlist_add_boolean_value' mangled-name='fnvlist_add_boolean_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_boolean_value'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <parameter type-id='type-id-87' name='val'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvlist_add_int8_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-238'/>
- <parameter type-id='type-id-69'/>
- <return type-id='type-id-5'/>
+ <function-decl name='fnvlist_add_boolean' mangled-name='fnvlist_add_boolean' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_boolean'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-36' name='name'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_add_uint8_array' mangled-name='fnvlist_add_uint8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_uint8_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-132' name='val'/>
- <parameter type-id='type-id-123' name='n'/>
- <return type-id='type-id-1'/>
+ <function-decl name='fnvlist_num_pairs' mangled-name='fnvlist_num_pairs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_num_pairs'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <return type-id='type-id-20'/>
</function-decl>
- <function-decl name='nvlist_add_uint8_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-219'/>
- <parameter type-id='type-id-69'/>
- <return type-id='type-id-5'/>
+ <function-decl name='fnvlist_merge' mangled-name='fnvlist_merge' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_merge'>
+ <parameter type-id='type-id-32' name='dst'/>
+ <parameter type-id='type-id-32' name='src'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_add_int16_array' mangled-name='fnvlist_add_int16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_int16_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-135' name='val'/>
- <parameter type-id='type-id-123' name='n'/>
- <return type-id='type-id-1'/>
+ <function-decl name='fnvlist_dup' mangled-name='fnvlist_dup' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_dup'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <return type-id='type-id-32'/>
</function-decl>
- <function-decl name='nvlist_add_int16_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-220'/>
- <parameter type-id='type-id-69'/>
- <return type-id='type-id-5'/>
+ <function-decl name='fnvlist_unpack' mangled-name='fnvlist_unpack' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_unpack'>
+ <parameter type-id='type-id-14' name='buf'/>
+ <parameter type-id='type-id-20' name='buflen'/>
+ <return type-id='type-id-32'/>
</function-decl>
- <function-decl name='fnvlist_add_uint16_array' mangled-name='fnvlist_add_uint16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_uint16_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-138' name='val'/>
- <parameter type-id='type-id-123' name='n'/>
- <return type-id='type-id-1'/>
+ <function-decl name='fnvlist_pack_free' mangled-name='fnvlist_pack_free' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_pack_free'>
+ <parameter type-id='type-id-14' name='pack'/>
+ <parameter type-id='type-id-20' name='size'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvlist_add_uint16_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-221'/>
- <parameter type-id='type-id-69'/>
- <return type-id='type-id-5'/>
+ <function-decl name='fnvlist_pack' mangled-name='fnvlist_pack' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_pack'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <parameter type-id='type-id-236' name='sizep'/>
+ <return type-id='type-id-14'/>
</function-decl>
- <function-decl name='fnvlist_add_int32_array' mangled-name='fnvlist_add_int32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_int32_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-141' name='val'/>
- <parameter type-id='type-id-123' name='n'/>
- <return type-id='type-id-1'/>
+ <function-decl name='fnvlist_size' mangled-name='fnvlist_size' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_size'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <return type-id='type-id-20'/>
</function-decl>
- <function-decl name='nvlist_add_int32_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-222'/>
- <parameter type-id='type-id-69'/>
- <return type-id='type-id-5'/>
+ <function-decl name='fnvlist_free' mangled-name='fnvlist_free' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_free'>
+ <parameter type-id='type-id-32' name='nvl'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_add_uint32_array' mangled-name='fnvlist_add_uint32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_uint32_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-144' name='val'/>
- <parameter type-id='type-id-123' name='n'/>
- <return type-id='type-id-1'/>
+ <function-decl name='fnvlist_alloc' mangled-name='fnvlist_alloc' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_alloc'>
+ <return type-id='type-id-32'/>
</function-decl>
- <function-decl name='nvlist_add_uint32_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-223'/>
- <parameter type-id='type-id-69'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_lookup_uint64_array' mangled-name='nvlist_lookup_uint64_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_add_int64_array' mangled-name='fnvlist_add_int64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_int64_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-147' name='val'/>
- <parameter type-id='type-id-123' name='n'/>
- <return type-id='type-id-1'/>
+ <function-decl name='nvlist_lookup_int64_array' mangled-name='nvlist_lookup_int64_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvlist_add_int64_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-224'/>
- <parameter type-id='type-id-69'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_lookup_uint32_array' mangled-name='nvlist_lookup_uint32_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_add_uint64_array' mangled-name='fnvlist_add_uint64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_uint64_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-150' name='val'/>
- <parameter type-id='type-id-123' name='n'/>
- <return type-id='type-id-1'/>
+ <function-decl name='nvlist_lookup_int32_array' mangled-name='nvlist_lookup_int32_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvlist_add_uint64_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-225'/>
- <parameter type-id='type-id-69'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_lookup_uint16_array' mangled-name='nvlist_lookup_uint16_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_add_string_array' mangled-name='fnvlist_add_string_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_string_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-279' name='val'/>
- <parameter type-id='type-id-123' name='n'/>
- <return type-id='type-id-1'/>
+ <function-decl name='nvlist_lookup_int16_array' mangled-name='nvlist_lookup_int16_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvlist_add_string_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-279'/>
- <parameter type-id='type-id-69'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_lookup_uint8_array' mangled-name='nvlist_lookup_uint8_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_add_nvlist_array' mangled-name='fnvlist_add_nvlist_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_nvlist_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-156' name='val'/>
- <parameter type-id='type-id-123' name='n'/>
- <return type-id='type-id-1'/>
+ <function-decl name='nvlist_lookup_int8_array' mangled-name='nvlist_lookup_int8_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvlist_add_nvlist_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-235'/>
- <parameter type-id='type-id-69'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_lookup_byte_array' mangled-name='nvlist_lookup_byte_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_remove' mangled-name='fnvlist_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_remove'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <return type-id='type-id-1'/>
+ <function-decl name='nvlist_lookup_boolean_array' mangled-name='nvlist_lookup_boolean_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvlist_remove_all' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_lookup_nvlist' mangled-name='nvlist_lookup_nvlist' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_remove_nvpair' mangled-name='fnvlist_remove_nvpair' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_remove_nvpair'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-243' name='pair'/>
- <return type-id='type-id-1'/>
+ <function-decl name='nvlist_lookup_string' mangled-name='nvlist_lookup_string' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvlist_remove_nvpair' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-217'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_lookup_uint64' mangled-name='nvlist_lookup_uint64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_lookup_nvpair' mangled-name='fnvlist_lookup_nvpair' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_nvpair'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <return type-id='type-id-243'/>
- </function-decl>
- <pointer-type-def type-id='type-id-217' size-in-bits='64' id='type-id-334'/>
- <function-decl name='nvlist_lookup_nvpair' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-334'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_lookup_uint32' mangled-name='nvlist_lookup_uint32' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_lookup_boolean' mangled-name='fnvlist_lookup_boolean' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_boolean'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <return type-id='type-id-77'/>
+ <function-decl name='nvlist_lookup_uint16' mangled-name='nvlist_lookup_uint16' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvlist_lookup_boolean' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_lookup_uint8' mangled-name='nvlist_lookup_uint8' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_lookup_boolean_value' mangled-name='fnvlist_lookup_boolean_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_boolean_value'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <return type-id='type-id-77'/>
- </function-decl>
- <function-decl name='nvlist_lookup_boolean_value' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-237'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_lookup_int64' mangled-name='nvlist_lookup_int64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_lookup_byte' mangled-name='fnvlist_lookup_byte' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_byte'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <return type-id='type-id-81'/>
- </function-decl>
- <function-decl name='nvlist_lookup_byte' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-219'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_lookup_int32' mangled-name='nvlist_lookup_int32' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_lookup_int8' mangled-name='fnvlist_lookup_int8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_int8'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <return type-id='type-id-85'/>
- </function-decl>
- <function-decl name='nvlist_lookup_int8' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-238'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_lookup_int16' mangled-name='nvlist_lookup_int16' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_lookup_int16' mangled-name='fnvlist_lookup_int16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_int16'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <return type-id='type-id-94'/>
+ <function-decl name='nvlist_lookup_int8' mangled-name='nvlist_lookup_int8' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvlist_lookup_int16' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-220'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_lookup_byte' mangled-name='nvlist_lookup_byte' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_lookup_int32' mangled-name='fnvlist_lookup_int32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_int32'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <return type-id='type-id-65'/>
- </function-decl>
- <function-decl name='nvlist_lookup_int32' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-222'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_lookup_boolean_value' mangled-name='nvlist_lookup_boolean_value' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_lookup_int64' mangled-name='fnvlist_lookup_int64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_int64'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <return type-id='type-id-106'/>
- </function-decl>
- <function-decl name='nvlist_lookup_int64' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-224'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_lookup_boolean' mangled-name='nvlist_lookup_boolean' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_lookup_uint8' mangled-name='fnvlist_lookup_uint8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_uint8'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <return type-id='type-id-89'/>
- </function-decl>
- <function-decl name='nvlist_lookup_uint8' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-219'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_lookup_nvpair' mangled-name='nvlist_lookup_nvpair' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_lookup_uint16' mangled-name='fnvlist_lookup_uint16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_uint16'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <return type-id='type-id-98'/>
+ <function-decl name='nvlist_remove_nvpair' mangled-name='nvlist_remove_nvpair' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvlist_lookup_uint16' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-221'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_remove_all' mangled-name='nvlist_remove_all' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_lookup_uint32' mangled-name='fnvlist_lookup_uint32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_uint32'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <return type-id='type-id-66'/>
- </function-decl>
- <function-decl name='nvlist_lookup_uint32' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-223'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_add_nvlist_array' mangled-name='nvlist_add_nvlist_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_lookup_uint64' mangled-name='fnvlist_lookup_uint64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_uint64'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <return type-id='type-id-67'/>
- </function-decl>
- <function-decl name='nvlist_lookup_uint64' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-225'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_add_string_array' mangled-name='nvlist_add_string_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_lookup_string' mangled-name='fnvlist_lookup_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_string'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <return type-id='type-id-9'/>
+ <function-decl name='nvlist_add_uint64_array' mangled-name='nvlist_add_uint64_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvlist_lookup_string' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-153'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_add_int64_array' mangled-name='nvlist_add_int64_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_lookup_nvlist' mangled-name='fnvlist_lookup_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_nvlist'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <return type-id='type-id-73'/>
- </function-decl>
- <function-decl name='nvlist_lookup_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-235'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_add_uint32_array' mangled-name='nvlist_add_uint32_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_lookup_boolean_array' mangled-name='fnvlist_lookup_boolean_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_boolean_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-281' name='n'/>
- <return type-id='type-id-122'/>
- </function-decl>
- <function-decl name='nvlist_lookup_boolean_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-239'/>
- <parameter type-id='type-id-223'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_add_int32_array' mangled-name='nvlist_add_int32_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_lookup_byte_array' mangled-name='fnvlist_lookup_byte_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_byte_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-281' name='n'/>
- <return type-id='type-id-126'/>
- </function-decl>
- <function-decl name='nvlist_lookup_byte_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-226'/>
- <parameter type-id='type-id-223'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_add_uint16_array' mangled-name='nvlist_add_uint16_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_lookup_int8_array' mangled-name='fnvlist_lookup_int8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_int8_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-281' name='n'/>
- <return type-id='type-id-129'/>
+ <function-decl name='nvlist_add_int16_array' mangled-name='nvlist_add_int16_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvlist_lookup_int8_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-240'/>
- <parameter type-id='type-id-223'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_add_uint8_array' mangled-name='nvlist_add_uint8_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_lookup_uint8_array' mangled-name='fnvlist_lookup_uint8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_uint8_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-281' name='n'/>
- <return type-id='type-id-132'/>
+ <function-decl name='nvlist_add_int8_array' mangled-name='nvlist_add_int8_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvlist_lookup_uint8_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-226'/>
- <parameter type-id='type-id-223'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_add_byte_array' mangled-name='nvlist_add_byte_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_lookup_int16_array' mangled-name='fnvlist_lookup_int16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_int16_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-281' name='n'/>
- <return type-id='type-id-135'/>
+ <function-decl name='nvlist_add_boolean_array' mangled-name='nvlist_add_boolean_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvlist_lookup_int16_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-227'/>
- <parameter type-id='type-id-223'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_add_nvpair' mangled-name='nvlist_add_nvpair' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_lookup_uint16_array' mangled-name='fnvlist_lookup_uint16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_uint16_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-281' name='n'/>
- <return type-id='type-id-138'/>
+ <function-decl name='nvlist_add_nvlist' mangled-name='nvlist_add_nvlist' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvlist_lookup_uint16_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-228'/>
- <parameter type-id='type-id-223'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_add_string' mangled-name='nvlist_add_string' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_lookup_int32_array' mangled-name='fnvlist_lookup_int32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_int32_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-281' name='n'/>
- <return type-id='type-id-141'/>
+ <function-decl name='nvlist_add_uint64' mangled-name='nvlist_add_uint64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvlist_lookup_int32_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-229'/>
- <parameter type-id='type-id-223'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_add_int64' mangled-name='nvlist_add_int64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_lookup_uint32_array' mangled-name='fnvlist_lookup_uint32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_uint32_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-281' name='n'/>
- <return type-id='type-id-144'/>
+ <function-decl name='nvlist_add_uint32' mangled-name='nvlist_add_uint32' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvlist_lookup_uint32_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-230'/>
- <parameter type-id='type-id-223'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_add_int32' mangled-name='nvlist_add_int32' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_lookup_int64_array' mangled-name='fnvlist_lookup_int64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_int64_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-281' name='n'/>
- <return type-id='type-id-147'/>
+ <function-decl name='nvlist_add_uint16' mangled-name='nvlist_add_uint16' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvlist_lookup_int64_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-231'/>
- <parameter type-id='type-id-223'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_add_int16' mangled-name='nvlist_add_int16' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvlist_lookup_uint64_array' mangled-name='fnvlist_lookup_uint64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_uint64_array'>
- <parameter type-id='type-id-73' name='nvl'/>
- <parameter type-id='type-id-6' name='name'/>
- <parameter type-id='type-id-281' name='n'/>
- <return type-id='type-id-150'/>
+ <function-decl name='nvlist_add_uint8' mangled-name='nvlist_add_uint8' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='nvlist_lookup_uint64_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-218'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-232'/>
- <parameter type-id='type-id-223'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nvlist_add_int8' mangled-name='nvlist_add_int8' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvpair_value_boolean_value' mangled-name='fnvpair_value_boolean_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_boolean_value'>
- <parameter type-id='type-id-243' name='nvp'/>
- <return type-id='type-id-77'/>
+ <function-decl name='nvlist_add_byte' mangled-name='nvlist_add_byte' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvpair_value_byte' mangled-name='fnvpair_value_byte' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_byte'>
- <parameter type-id='type-id-243' name='nvp'/>
- <return type-id='type-id-81'/>
+ <function-decl name='nvlist_add_boolean_value' mangled-name='nvlist_add_boolean_value' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvpair_value_int8' mangled-name='fnvpair_value_int8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_int8'>
- <parameter type-id='type-id-243' name='nvp'/>
- <return type-id='type-id-85'/>
+ <function-decl name='nvlist_add_boolean' mangled-name='nvlist_add_boolean' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvpair_value_int16' mangled-name='fnvpair_value_int16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_int16'>
- <parameter type-id='type-id-243' name='nvp'/>
- <return type-id='type-id-94'/>
+ <function-decl name='nvlist_merge' mangled-name='nvlist_merge' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvpair_value_int32' mangled-name='fnvpair_value_int32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_int32'>
- <parameter type-id='type-id-243' name='nvp'/>
- <return type-id='type-id-65'/>
+ <function-decl name='nvlist_dup' mangled-name='nvlist_dup' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvpair_value_int64' mangled-name='fnvpair_value_int64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_int64'>
- <parameter type-id='type-id-243' name='nvp'/>
- <return type-id='type-id-106'/>
+ <function-decl name='nvlist_unpack' mangled-name='nvlist_unpack' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvpair_value_uint8' mangled-name='fnvpair_value_uint8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_uint8'>
- <parameter type-id='type-id-243' name='nvp'/>
- <return type-id='type-id-89'/>
+ <function-decl name='nvlist_pack' mangled-name='nvlist_pack' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvpair_value_uint16' mangled-name='fnvpair_value_uint16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_uint16'>
- <parameter type-id='type-id-243' name='nvp'/>
- <return type-id='type-id-98'/>
+ <function-decl name='nvlist_size' mangled-name='nvlist_size' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvpair_value_uint32' mangled-name='fnvpair_value_uint32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_uint32'>
- <parameter type-id='type-id-243' name='nvp'/>
- <return type-id='type-id-66'/>
+ <function-decl name='nvlist_free' mangled-name='nvlist_free' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvpair_value_uint64' mangled-name='fnvpair_value_uint64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_uint64'>
- <parameter type-id='type-id-243' name='nvp'/>
- <return type-id='type-id-67'/>
+ <function-decl name='nvlist_alloc' mangled-name='nvlist_alloc' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvpair_value_string' mangled-name='fnvpair_value_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_string'>
- <parameter type-id='type-id-243' name='nvp'/>
- <return type-id='type-id-9'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='assert.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
+ <var-decl name='libspl_assert_ok' type-id='type-id-1' mangled-name='libspl_assert_ok' visibility='default' elf-symbol-id='libspl_assert_ok'/>
+ <function-decl name='libspl_assertf' mangled-name='libspl_assertf' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libspl_assertf'>
+ <parameter type-id='type-id-36' name='file'/>
+ <parameter type-id='type-id-36' name='func'/>
+ <parameter type-id='type-id-1' name='line'/>
+ <parameter type-id='type-id-36' name='format'/>
+ <parameter is-variadic='yes'/>
+ <return type-id='type-id-25'/>
</function-decl>
- <function-decl name='fnvpair_value_nvlist' mangled-name='fnvpair_value_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_nvlist'>
- <parameter type-id='type-id-243' name='nvp'/>
- <return type-id='type-id-73'/>
+ <function-decl name='__vfprintf_chk' mangled-name='__vfprintf_chk' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
+ </function-decl>
+ <function-decl name='abort' mangled-name='abort' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-25'/>
</function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='assert.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libspl' language='LANG_C99'>
- <var-decl name='libspl_assert_ok' type-id='type-id-5' mangled-name='libspl_assert_ok' visibility='default' elf-symbol-id='libspl_assert_ok'/>
</abi-instr>
</abi-corpus>
diff --git a/sys/contrib/openzfs/lib/libnvpair/libnvpair.c b/sys/contrib/openzfs/lib/libnvpair/libnvpair.c
index 2e9ea1c174e9..fd43a44c1c0d 100644
--- a/sys/contrib/openzfs/lib/libnvpair/libnvpair.c
+++ b/sys/contrib/openzfs/lib/libnvpair/libnvpair.c
@@ -1,1277 +1,1276 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
*/
#include <unistd.h>
#include <string.h>
#include <libintl.h>
#include <sys/types.h>
#include <sys/inttypes.h>
-#include <sys/note.h>
#include <stdarg.h>
#include "libnvpair.h"
/*
* libnvpair - A tools library for manipulating <name, value> pairs.
*
* This library provides routines packing an unpacking nv pairs
* for transporting data across process boundaries, transporting
* between kernel and userland, and possibly saving onto disk files.
*/
/*
* Print control structure.
*/
#define DEFINEOP(opname, vtype) \
struct { \
int (*op)(struct nvlist_prtctl *, void *, nvlist_t *, \
const char *, vtype); \
void *arg; \
} opname
#define DEFINEARROP(opname, vtype) \
struct { \
int (*op)(struct nvlist_prtctl *, void *, nvlist_t *, \
const char *, vtype, uint_t); \
void *arg; \
} opname
struct nvlist_printops {
DEFINEOP(print_boolean, int);
DEFINEOP(print_boolean_value, boolean_t);
DEFINEOP(print_byte, uchar_t);
DEFINEOP(print_int8, int8_t);
DEFINEOP(print_uint8, uint8_t);
DEFINEOP(print_int16, int16_t);
DEFINEOP(print_uint16, uint16_t);
DEFINEOP(print_int32, int32_t);
DEFINEOP(print_uint32, uint32_t);
DEFINEOP(print_int64, int64_t);
DEFINEOP(print_uint64, uint64_t);
DEFINEOP(print_double, double);
DEFINEOP(print_string, char *);
DEFINEOP(print_hrtime, hrtime_t);
DEFINEOP(print_nvlist, nvlist_t *);
DEFINEARROP(print_boolean_array, boolean_t *);
DEFINEARROP(print_byte_array, uchar_t *);
DEFINEARROP(print_int8_array, int8_t *);
DEFINEARROP(print_uint8_array, uint8_t *);
DEFINEARROP(print_int16_array, int16_t *);
DEFINEARROP(print_uint16_array, uint16_t *);
DEFINEARROP(print_int32_array, int32_t *);
DEFINEARROP(print_uint32_array, uint32_t *);
DEFINEARROP(print_int64_array, int64_t *);
DEFINEARROP(print_uint64_array, uint64_t *);
DEFINEARROP(print_string_array, char **);
DEFINEARROP(print_nvlist_array, nvlist_t **);
};
struct nvlist_prtctl {
FILE *nvprt_fp; /* output destination */
enum nvlist_indent_mode nvprt_indent_mode; /* see above */
int nvprt_indent; /* absolute indent, or tab depth */
int nvprt_indentinc; /* indent or tab increment */
const char *nvprt_nmfmt; /* member name format, max one %s */
const char *nvprt_eomfmt; /* after member format, e.g. "\n" */
const char *nvprt_btwnarrfmt; /* between array members */
int nvprt_btwnarrfmt_nl; /* nvprt_eoamfmt includes newline? */
struct nvlist_printops *nvprt_dfltops;
struct nvlist_printops *nvprt_custops;
};
#define DFLTPRTOP(pctl, type) \
((pctl)->nvprt_dfltops->print_##type.op)
#define DFLTPRTOPARG(pctl, type) \
((pctl)->nvprt_dfltops->print_##type.arg)
#define CUSTPRTOP(pctl, type) \
((pctl)->nvprt_custops->print_##type.op)
#define CUSTPRTOPARG(pctl, type) \
((pctl)->nvprt_custops->print_##type.arg)
#define RENDER(pctl, type, nvl, name, val) \
{ \
int done = 0; \
if ((pctl)->nvprt_custops && CUSTPRTOP(pctl, type)) { \
done = CUSTPRTOP(pctl, type)(pctl, \
CUSTPRTOPARG(pctl, type), nvl, name, val); \
} \
if (!done) { \
(void) DFLTPRTOP(pctl, type)(pctl, \
DFLTPRTOPARG(pctl, type), nvl, name, val); \
} \
(void) fprintf(pctl->nvprt_fp, "%s", pctl->nvprt_eomfmt); \
}
#define ARENDER(pctl, type, nvl, name, arrp, count) \
{ \
int done = 0; \
if ((pctl)->nvprt_custops && CUSTPRTOP(pctl, type)) { \
done = CUSTPRTOP(pctl, type)(pctl, \
CUSTPRTOPARG(pctl, type), nvl, name, arrp, count); \
} \
if (!done) { \
(void) DFLTPRTOP(pctl, type)(pctl, \
DFLTPRTOPARG(pctl, type), nvl, name, arrp, count); \
} \
(void) fprintf(pctl->nvprt_fp, "%s", pctl->nvprt_eomfmt); \
}
static void nvlist_print_with_indent(nvlist_t *, nvlist_prtctl_t);
/*
* ======================================================================
* | |
* | Indentation |
* | |
* ======================================================================
*/
static void
indent(nvlist_prtctl_t pctl, int onemore)
{
int depth;
switch (pctl->nvprt_indent_mode) {
case NVLIST_INDENT_ABS:
(void) fprintf(pctl->nvprt_fp, "%*s",
pctl->nvprt_indent + onemore * pctl->nvprt_indentinc, "");
break;
case NVLIST_INDENT_TABBED:
depth = pctl->nvprt_indent + onemore;
while (depth-- > 0)
(void) fprintf(pctl->nvprt_fp, "\t");
}
}
/*
* ======================================================================
* | |
* | Default nvlist member rendering functions. |
* | |
* ======================================================================
*/
/*
* Generate functions to print single-valued nvlist members.
*
* type_and_variant - suffix to form function name
* vtype - C type for the member value
* ptype - C type to cast value to for printing
* vfmt - format string for pair value, e.g "%d" or "0x%llx"
*/
#define NVLIST_PRTFUNC(type_and_variant, vtype, ptype, vfmt) \
static int \
nvprint_##type_and_variant(nvlist_prtctl_t pctl, void *private, \
nvlist_t *nvl, const char *name, vtype value) \
{ \
+ (void) private; \
+ (void) nvl; \
FILE *fp = pctl->nvprt_fp; \
- NOTE(ARGUNUSED(private)) \
- NOTE(ARGUNUSED(nvl)) \
indent(pctl, 1); \
(void) fprintf(fp, pctl->nvprt_nmfmt, name); \
(void) fprintf(fp, vfmt, (ptype)value); \
return (1); \
}
NVLIST_PRTFUNC(boolean, int, int, "%d")
NVLIST_PRTFUNC(boolean_value, boolean_t, int, "%d")
NVLIST_PRTFUNC(byte, uchar_t, uchar_t, "0x%2.2x")
NVLIST_PRTFUNC(int8, int8_t, int, "%d")
NVLIST_PRTFUNC(uint8, uint8_t, uint8_t, "0x%x")
NVLIST_PRTFUNC(int16, int16_t, int16_t, "%d")
NVLIST_PRTFUNC(uint16, uint16_t, uint16_t, "0x%x")
NVLIST_PRTFUNC(int32, int32_t, int32_t, "%d")
NVLIST_PRTFUNC(uint32, uint32_t, uint32_t, "0x%x")
NVLIST_PRTFUNC(int64, int64_t, longlong_t, "%lld")
NVLIST_PRTFUNC(uint64, uint64_t, u_longlong_t, "0x%llx")
NVLIST_PRTFUNC(double, double, double, "0x%f")
NVLIST_PRTFUNC(string, char *, char *, "%s")
NVLIST_PRTFUNC(hrtime, hrtime_t, hrtime_t, "0x%llx")
/*
* Generate functions to print array-valued nvlist members.
*/
#define NVLIST_ARRPRTFUNC(type_and_variant, vtype, ptype, vfmt) \
static int \
nvaprint_##type_and_variant(nvlist_prtctl_t pctl, void *private, \
nvlist_t *nvl, const char *name, vtype *valuep, uint_t count) \
{ \
+ (void) private; \
+ (void) nvl; \
FILE *fp = pctl->nvprt_fp; \
uint_t i; \
- NOTE(ARGUNUSED(private)) \
- NOTE(ARGUNUSED(nvl)) \
for (i = 0; i < count; i++) { \
if (i == 0 || pctl->nvprt_btwnarrfmt_nl) { \
indent(pctl, 1); \
(void) fprintf(fp, pctl->nvprt_nmfmt, name); \
if (pctl->nvprt_btwnarrfmt_nl) \
(void) fprintf(fp, "[%d]: ", i); \
} \
if (i != 0) \
(void) fprintf(fp, "%s", pctl->nvprt_btwnarrfmt); \
(void) fprintf(fp, vfmt, (ptype)valuep[i]); \
} \
return (1); \
}
NVLIST_ARRPRTFUNC(boolean_array, boolean_t, boolean_t, "%d")
NVLIST_ARRPRTFUNC(byte_array, uchar_t, uchar_t, "0x%2.2x")
NVLIST_ARRPRTFUNC(int8_array, int8_t, int8_t, "%d")
NVLIST_ARRPRTFUNC(uint8_array, uint8_t, uint8_t, "0x%x")
NVLIST_ARRPRTFUNC(int16_array, int16_t, int16_t, "%d")
NVLIST_ARRPRTFUNC(uint16_array, uint16_t, uint16_t, "0x%x")
NVLIST_ARRPRTFUNC(int32_array, int32_t, int32_t, "%d")
NVLIST_ARRPRTFUNC(uint32_array, uint32_t, uint32_t, "0x%x")
NVLIST_ARRPRTFUNC(int64_array, int64_t, longlong_t, "%lld")
NVLIST_ARRPRTFUNC(uint64_array, uint64_t, u_longlong_t, "0x%llx")
NVLIST_ARRPRTFUNC(string_array, char *, char *, "%s")
/*ARGSUSED*/
static int
nvprint_nvlist(nvlist_prtctl_t pctl, void *private,
nvlist_t *nvl, const char *name, nvlist_t *value)
{
FILE *fp = pctl->nvprt_fp;
indent(pctl, 1);
(void) fprintf(fp, "%s = (embedded nvlist)\n", name);
pctl->nvprt_indent += pctl->nvprt_indentinc;
nvlist_print_with_indent(value, pctl);
pctl->nvprt_indent -= pctl->nvprt_indentinc;
indent(pctl, 1);
(void) fprintf(fp, "(end %s)\n", name);
return (1);
}
/*ARGSUSED*/
static int
nvaprint_nvlist_array(nvlist_prtctl_t pctl, void *private,
nvlist_t *nvl, const char *name, nvlist_t **valuep, uint_t count)
{
FILE *fp = pctl->nvprt_fp;
uint_t i;
indent(pctl, 1);
(void) fprintf(fp, "%s = (array of embedded nvlists)\n", name);
for (i = 0; i < count; i++) {
indent(pctl, 1);
(void) fprintf(fp, "(start %s[%d])\n", name, i);
pctl->nvprt_indent += pctl->nvprt_indentinc;
nvlist_print_with_indent(valuep[i], pctl);
pctl->nvprt_indent -= pctl->nvprt_indentinc;
indent(pctl, 1);
(void) fprintf(fp, "(end %s[%d])\n", name, i);
}
return (1);
}
/*
* ======================================================================
* | |
* | Interfaces that allow control over formatting. |
* | |
* ======================================================================
*/
void
nvlist_prtctl_setdest(nvlist_prtctl_t pctl, FILE *fp)
{
pctl->nvprt_fp = fp;
}
FILE *
nvlist_prtctl_getdest(nvlist_prtctl_t pctl)
{
return (pctl->nvprt_fp);
}
void
nvlist_prtctl_setindent(nvlist_prtctl_t pctl, enum nvlist_indent_mode mode,
int start, int inc)
{
if (mode < NVLIST_INDENT_ABS || mode > NVLIST_INDENT_TABBED)
mode = NVLIST_INDENT_TABBED;
if (start < 0)
start = 0;
if (inc < 0)
inc = 1;
pctl->nvprt_indent_mode = mode;
pctl->nvprt_indent = start;
pctl->nvprt_indentinc = inc;
}
void
nvlist_prtctl_doindent(nvlist_prtctl_t pctl, int onemore)
{
indent(pctl, onemore);
}
void
nvlist_prtctl_setfmt(nvlist_prtctl_t pctl, enum nvlist_prtctl_fmt which,
const char *fmt)
{
switch (which) {
case NVLIST_FMT_MEMBER_NAME:
if (fmt == NULL)
fmt = "%s = ";
pctl->nvprt_nmfmt = fmt;
break;
case NVLIST_FMT_MEMBER_POSTAMBLE:
if (fmt == NULL)
fmt = "\n";
pctl->nvprt_eomfmt = fmt;
break;
case NVLIST_FMT_BTWN_ARRAY:
if (fmt == NULL) {
pctl->nvprt_btwnarrfmt = " ";
pctl->nvprt_btwnarrfmt_nl = 0;
} else {
pctl->nvprt_btwnarrfmt = fmt;
pctl->nvprt_btwnarrfmt_nl = (strstr(fmt, "\n") != NULL);
}
break;
default:
break;
}
}
void
nvlist_prtctl_dofmt(nvlist_prtctl_t pctl, enum nvlist_prtctl_fmt which, ...)
{
FILE *fp = pctl->nvprt_fp;
va_list ap;
char *name;
va_start(ap, which);
switch (which) {
case NVLIST_FMT_MEMBER_NAME:
name = va_arg(ap, char *);
(void) fprintf(fp, pctl->nvprt_nmfmt, name);
break;
case NVLIST_FMT_MEMBER_POSTAMBLE:
(void) fprintf(fp, "%s", pctl->nvprt_eomfmt);
break;
case NVLIST_FMT_BTWN_ARRAY:
(void) fprintf(fp, "%s", pctl->nvprt_btwnarrfmt);
break;
default:
break;
}
va_end(ap);
}
/*
* ======================================================================
* | |
* | Interfaces to allow appointment of replacement rendering functions.|
* | |
* ======================================================================
*/
#define NVLIST_PRINTCTL_REPLACE(type, vtype) \
void \
nvlist_prtctlop_##type(nvlist_prtctl_t pctl, \
int (*func)(nvlist_prtctl_t, void *, nvlist_t *, const char *, vtype), \
void *private) \
{ \
CUSTPRTOP(pctl, type) = func; \
CUSTPRTOPARG(pctl, type) = private; \
}
NVLIST_PRINTCTL_REPLACE(boolean, int)
NVLIST_PRINTCTL_REPLACE(boolean_value, boolean_t)
NVLIST_PRINTCTL_REPLACE(byte, uchar_t)
NVLIST_PRINTCTL_REPLACE(int8, int8_t)
NVLIST_PRINTCTL_REPLACE(uint8, uint8_t)
NVLIST_PRINTCTL_REPLACE(int16, int16_t)
NVLIST_PRINTCTL_REPLACE(uint16, uint16_t)
NVLIST_PRINTCTL_REPLACE(int32, int32_t)
NVLIST_PRINTCTL_REPLACE(uint32, uint32_t)
NVLIST_PRINTCTL_REPLACE(int64, int64_t)
NVLIST_PRINTCTL_REPLACE(uint64, uint64_t)
NVLIST_PRINTCTL_REPLACE(double, double)
NVLIST_PRINTCTL_REPLACE(string, char *)
NVLIST_PRINTCTL_REPLACE(hrtime, hrtime_t)
NVLIST_PRINTCTL_REPLACE(nvlist, nvlist_t *)
#define NVLIST_PRINTCTL_AREPLACE(type, vtype) \
void \
nvlist_prtctlop_##type(nvlist_prtctl_t pctl, \
int (*func)(nvlist_prtctl_t, void *, nvlist_t *, const char *, vtype, \
uint_t), void *private) \
{ \
CUSTPRTOP(pctl, type) = func; \
CUSTPRTOPARG(pctl, type) = private; \
}
NVLIST_PRINTCTL_AREPLACE(boolean_array, boolean_t *)
NVLIST_PRINTCTL_AREPLACE(byte_array, uchar_t *)
NVLIST_PRINTCTL_AREPLACE(int8_array, int8_t *)
NVLIST_PRINTCTL_AREPLACE(uint8_array, uint8_t *)
NVLIST_PRINTCTL_AREPLACE(int16_array, int16_t *)
NVLIST_PRINTCTL_AREPLACE(uint16_array, uint16_t *)
NVLIST_PRINTCTL_AREPLACE(int32_array, int32_t *)
NVLIST_PRINTCTL_AREPLACE(uint32_array, uint32_t *)
NVLIST_PRINTCTL_AREPLACE(int64_array, int64_t *)
NVLIST_PRINTCTL_AREPLACE(uint64_array, uint64_t *)
NVLIST_PRINTCTL_AREPLACE(string_array, char **)
NVLIST_PRINTCTL_AREPLACE(nvlist_array, nvlist_t **)
/*
* ======================================================================
* | |
* | Interfaces to manage nvlist_prtctl_t cookies. |
* | |
* ======================================================================
*/
static const struct nvlist_printops defprtops =
{
{ nvprint_boolean, NULL },
{ nvprint_boolean_value, NULL },
{ nvprint_byte, NULL },
{ nvprint_int8, NULL },
{ nvprint_uint8, NULL },
{ nvprint_int16, NULL },
{ nvprint_uint16, NULL },
{ nvprint_int32, NULL },
{ nvprint_uint32, NULL },
{ nvprint_int64, NULL },
{ nvprint_uint64, NULL },
{ nvprint_double, NULL },
{ nvprint_string, NULL },
{ nvprint_hrtime, NULL },
{ nvprint_nvlist, NULL },
{ nvaprint_boolean_array, NULL },
{ nvaprint_byte_array, NULL },
{ nvaprint_int8_array, NULL },
{ nvaprint_uint8_array, NULL },
{ nvaprint_int16_array, NULL },
{ nvaprint_uint16_array, NULL },
{ nvaprint_int32_array, NULL },
{ nvaprint_uint32_array, NULL },
{ nvaprint_int64_array, NULL },
{ nvaprint_uint64_array, NULL },
{ nvaprint_string_array, NULL },
{ nvaprint_nvlist_array, NULL },
};
static void
prtctl_defaults(FILE *fp, struct nvlist_prtctl *pctl,
struct nvlist_printops *ops)
{
pctl->nvprt_fp = fp;
pctl->nvprt_indent_mode = NVLIST_INDENT_TABBED;
pctl->nvprt_indent = 0;
pctl->nvprt_indentinc = 1;
pctl->nvprt_nmfmt = "%s = ";
pctl->nvprt_eomfmt = "\n";
pctl->nvprt_btwnarrfmt = " ";
pctl->nvprt_btwnarrfmt_nl = 0;
pctl->nvprt_dfltops = (struct nvlist_printops *)&defprtops;
pctl->nvprt_custops = ops;
}
nvlist_prtctl_t
nvlist_prtctl_alloc(void)
{
struct nvlist_prtctl *pctl;
struct nvlist_printops *ops;
if ((pctl = malloc(sizeof (*pctl))) == NULL)
return (NULL);
if ((ops = calloc(1, sizeof (*ops))) == NULL) {
free(pctl);
return (NULL);
}
prtctl_defaults(stdout, pctl, ops);
return (pctl);
}
void
nvlist_prtctl_free(nvlist_prtctl_t pctl)
{
if (pctl != NULL) {
free(pctl->nvprt_custops);
free(pctl);
}
}
/*
* ======================================================================
* | |
* | Top-level print request interfaces. |
* | |
* ======================================================================
*/
/*
* nvlist_print - Prints elements in an event buffer
*/
static void
nvlist_print_with_indent(nvlist_t *nvl, nvlist_prtctl_t pctl)
{
FILE *fp = pctl->nvprt_fp;
char *name;
uint_t nelem;
nvpair_t *nvp;
if (nvl == NULL)
return;
indent(pctl, 0);
(void) fprintf(fp, "nvlist version: %d\n", NVL_VERSION(nvl));
nvp = nvlist_next_nvpair(nvl, NULL);
while (nvp) {
data_type_t type = nvpair_type(nvp);
name = nvpair_name(nvp);
nelem = 0;
switch (type) {
case DATA_TYPE_BOOLEAN: {
RENDER(pctl, boolean, nvl, name, 1);
break;
}
case DATA_TYPE_BOOLEAN_VALUE: {
boolean_t val;
(void) nvpair_value_boolean_value(nvp, &val);
RENDER(pctl, boolean_value, nvl, name, val);
break;
}
case DATA_TYPE_BYTE: {
uchar_t val;
(void) nvpair_value_byte(nvp, &val);
RENDER(pctl, byte, nvl, name, val);
break;
}
case DATA_TYPE_INT8: {
int8_t val;
(void) nvpair_value_int8(nvp, &val);
RENDER(pctl, int8, nvl, name, val);
break;
}
case DATA_TYPE_UINT8: {
uint8_t val;
(void) nvpair_value_uint8(nvp, &val);
RENDER(pctl, uint8, nvl, name, val);
break;
}
case DATA_TYPE_INT16: {
int16_t val;
(void) nvpair_value_int16(nvp, &val);
RENDER(pctl, int16, nvl, name, val);
break;
}
case DATA_TYPE_UINT16: {
uint16_t val;
(void) nvpair_value_uint16(nvp, &val);
RENDER(pctl, uint16, nvl, name, val);
break;
}
case DATA_TYPE_INT32: {
int32_t val;
(void) nvpair_value_int32(nvp, &val);
RENDER(pctl, int32, nvl, name, val);
break;
}
case DATA_TYPE_UINT32: {
uint32_t val;
(void) nvpair_value_uint32(nvp, &val);
RENDER(pctl, uint32, nvl, name, val);
break;
}
case DATA_TYPE_INT64: {
int64_t val;
(void) nvpair_value_int64(nvp, &val);
RENDER(pctl, int64, nvl, name, val);
break;
}
case DATA_TYPE_UINT64: {
uint64_t val;
(void) nvpair_value_uint64(nvp, &val);
RENDER(pctl, uint64, nvl, name, val);
break;
}
case DATA_TYPE_DOUBLE: {
double val;
(void) nvpair_value_double(nvp, &val);
RENDER(pctl, double, nvl, name, val);
break;
}
case DATA_TYPE_STRING: {
char *val;
(void) nvpair_value_string(nvp, &val);
RENDER(pctl, string, nvl, name, val);
break;
}
case DATA_TYPE_BOOLEAN_ARRAY: {
boolean_t *val;
(void) nvpair_value_boolean_array(nvp, &val, &nelem);
ARENDER(pctl, boolean_array, nvl, name, val, nelem);
break;
}
case DATA_TYPE_BYTE_ARRAY: {
uchar_t *val;
(void) nvpair_value_byte_array(nvp, &val, &nelem);
ARENDER(pctl, byte_array, nvl, name, val, nelem);
break;
}
case DATA_TYPE_INT8_ARRAY: {
int8_t *val;
(void) nvpair_value_int8_array(nvp, &val, &nelem);
ARENDER(pctl, int8_array, nvl, name, val, nelem);
break;
}
case DATA_TYPE_UINT8_ARRAY: {
uint8_t *val;
(void) nvpair_value_uint8_array(nvp, &val, &nelem);
ARENDER(pctl, uint8_array, nvl, name, val, nelem);
break;
}
case DATA_TYPE_INT16_ARRAY: {
int16_t *val;
(void) nvpair_value_int16_array(nvp, &val, &nelem);
ARENDER(pctl, int16_array, nvl, name, val, nelem);
break;
}
case DATA_TYPE_UINT16_ARRAY: {
uint16_t *val;
(void) nvpair_value_uint16_array(nvp, &val, &nelem);
ARENDER(pctl, uint16_array, nvl, name, val, nelem);
break;
}
case DATA_TYPE_INT32_ARRAY: {
int32_t *val;
(void) nvpair_value_int32_array(nvp, &val, &nelem);
ARENDER(pctl, int32_array, nvl, name, val, nelem);
break;
}
case DATA_TYPE_UINT32_ARRAY: {
uint32_t *val;
(void) nvpair_value_uint32_array(nvp, &val, &nelem);
ARENDER(pctl, uint32_array, nvl, name, val, nelem);
break;
}
case DATA_TYPE_INT64_ARRAY: {
int64_t *val;
(void) nvpair_value_int64_array(nvp, &val, &nelem);
ARENDER(pctl, int64_array, nvl, name, val, nelem);
break;
}
case DATA_TYPE_UINT64_ARRAY: {
uint64_t *val;
(void) nvpair_value_uint64_array(nvp, &val, &nelem);
ARENDER(pctl, uint64_array, nvl, name, val, nelem);
break;
}
case DATA_TYPE_STRING_ARRAY: {
char **val;
(void) nvpair_value_string_array(nvp, &val, &nelem);
ARENDER(pctl, string_array, nvl, name, val, nelem);
break;
}
case DATA_TYPE_HRTIME: {
hrtime_t val;
(void) nvpair_value_hrtime(nvp, &val);
RENDER(pctl, hrtime, nvl, name, val);
break;
}
case DATA_TYPE_NVLIST: {
nvlist_t *val;
(void) nvpair_value_nvlist(nvp, &val);
RENDER(pctl, nvlist, nvl, name, val);
break;
}
case DATA_TYPE_NVLIST_ARRAY: {
nvlist_t **val;
(void) nvpair_value_nvlist_array(nvp, &val, &nelem);
ARENDER(pctl, nvlist_array, nvl, name, val, nelem);
break;
}
default:
(void) fprintf(fp, " unknown data type (%d)", type);
break;
}
nvp = nvlist_next_nvpair(nvl, nvp);
}
}
void
nvlist_print(FILE *fp, nvlist_t *nvl)
{
struct nvlist_prtctl pc;
prtctl_defaults(fp, &pc, NULL);
nvlist_print_with_indent(nvl, &pc);
}
void
nvlist_prt(nvlist_t *nvl, nvlist_prtctl_t pctl)
{
nvlist_print_with_indent(nvl, pctl);
}
#define NVP(elem, type, vtype, ptype, format) { \
vtype value; \
\
(void) nvpair_value_##type(elem, &value); \
(void) printf("%*s%s: " format "\n", indent, "", \
nvpair_name(elem), (ptype)value); \
}
#define NVPA(elem, type, vtype, ptype, format) { \
uint_t i, count; \
vtype *value; \
\
(void) nvpair_value_##type(elem, &value, &count); \
for (i = 0; i < count; i++) { \
(void) printf("%*s%s[%d]: " format "\n", indent, "", \
nvpair_name(elem), i, (ptype)value[i]); \
} \
}
/*
* Similar to nvlist_print() but handles arrays slightly differently.
*/
void
dump_nvlist(nvlist_t *list, int indent)
{
nvpair_t *elem = NULL;
boolean_t bool_value;
nvlist_t *nvlist_value;
nvlist_t **nvlist_array_value;
uint_t i, count;
if (list == NULL) {
return;
}
while ((elem = nvlist_next_nvpair(list, elem)) != NULL) {
switch (nvpair_type(elem)) {
case DATA_TYPE_BOOLEAN:
(void) printf("%*s%s\n", indent, "", nvpair_name(elem));
break;
case DATA_TYPE_BOOLEAN_VALUE:
(void) nvpair_value_boolean_value(elem, &bool_value);
(void) printf("%*s%s: %s\n", indent, "",
nvpair_name(elem), bool_value ? "true" : "false");
break;
case DATA_TYPE_BYTE:
NVP(elem, byte, uchar_t, int, "%u");
break;
case DATA_TYPE_INT8:
NVP(elem, int8, int8_t, int, "%d");
break;
case DATA_TYPE_UINT8:
NVP(elem, uint8, uint8_t, int, "%u");
break;
case DATA_TYPE_INT16:
NVP(elem, int16, int16_t, int, "%d");
break;
case DATA_TYPE_UINT16:
NVP(elem, uint16, uint16_t, int, "%u");
break;
case DATA_TYPE_INT32:
NVP(elem, int32, int32_t, long, "%ld");
break;
case DATA_TYPE_UINT32:
NVP(elem, uint32, uint32_t, ulong_t, "%lu");
break;
case DATA_TYPE_INT64:
NVP(elem, int64, int64_t, longlong_t, "%lld");
break;
case DATA_TYPE_UINT64:
NVP(elem, uint64, uint64_t, u_longlong_t, "%llu");
break;
case DATA_TYPE_STRING:
NVP(elem, string, char *, char *, "'%s'");
break;
case DATA_TYPE_BYTE_ARRAY:
NVPA(elem, byte_array, uchar_t, int, "%u");
break;
case DATA_TYPE_INT8_ARRAY:
NVPA(elem, int8_array, int8_t, int, "%d");
break;
case DATA_TYPE_UINT8_ARRAY:
NVPA(elem, uint8_array, uint8_t, int, "%u");
break;
case DATA_TYPE_INT16_ARRAY:
NVPA(elem, int16_array, int16_t, int, "%d");
break;
case DATA_TYPE_UINT16_ARRAY:
NVPA(elem, uint16_array, uint16_t, int, "%u");
break;
case DATA_TYPE_INT32_ARRAY:
NVPA(elem, int32_array, int32_t, long, "%ld");
break;
case DATA_TYPE_UINT32_ARRAY:
NVPA(elem, uint32_array, uint32_t, ulong_t, "%lu");
break;
case DATA_TYPE_INT64_ARRAY:
NVPA(elem, int64_array, int64_t, longlong_t, "%lld");
break;
case DATA_TYPE_UINT64_ARRAY:
NVPA(elem, uint64_array, uint64_t, u_longlong_t,
"%llu");
break;
case DATA_TYPE_STRING_ARRAY:
NVPA(elem, string_array, char *, char *, "'%s'");
break;
case DATA_TYPE_NVLIST:
(void) nvpair_value_nvlist(elem, &nvlist_value);
(void) printf("%*s%s:\n", indent, "",
nvpair_name(elem));
dump_nvlist(nvlist_value, indent + 4);
break;
case DATA_TYPE_NVLIST_ARRAY:
(void) nvpair_value_nvlist_array(elem,
&nvlist_array_value, &count);
for (i = 0; i < count; i++) {
(void) printf("%*s%s[%u]:\n", indent, "",
nvpair_name(elem), i);
dump_nvlist(nvlist_array_value[i], indent + 4);
}
break;
default:
(void) printf(dgettext(TEXT_DOMAIN, "bad config type "
"%d for %s\n"), nvpair_type(elem),
nvpair_name(elem));
}
}
}
/*
* ======================================================================
* | |
* | Misc private interface. |
* | |
* ======================================================================
*/
/*
* Determine if string 'value' matches 'nvp' value. The 'value' string is
* converted, depending on the type of 'nvp', prior to match. For numeric
* types, a radix independent sscanf conversion of 'value' is used. If 'nvp'
* is an array type, 'ai' is the index into the array against which we are
* checking for match. If nvp is of DATA_TYPE_STRING*, the caller can pass
* in a regex_t compilation of value in 'value_regex' to trigger regular
* expression string match instead of simple strcmp().
*
* Return 1 on match, 0 on no-match, and -1 on error. If the error is
* related to value syntax error and 'ep' is non-NULL, *ep will point into
* the 'value' string at the location where the error exists.
*
* NOTE: It may be possible to move the non-regex_t version of this into
* common code used by library/kernel/boot.
*/
int
nvpair_value_match_regex(nvpair_t *nvp, int ai,
char *value, regex_t *value_regex, char **ep)
{
char *evalue;
uint_t a_len;
int sr;
if (ep)
*ep = NULL;
if ((nvp == NULL) || (value == NULL))
return (-1); /* error fail match - invalid args */
/* make sure array and index combination make sense */
if ((nvpair_type_is_array(nvp) && (ai < 0)) ||
(!nvpair_type_is_array(nvp) && (ai >= 0)))
return (-1); /* error fail match - bad index */
/* non-string values should be single 'chunk' */
if ((nvpair_type(nvp) != DATA_TYPE_STRING) &&
(nvpair_type(nvp) != DATA_TYPE_STRING_ARRAY)) {
value += strspn(value, " \t");
evalue = value + strcspn(value, " \t");
if (*evalue) {
if (ep)
*ep = evalue;
return (-1); /* error fail match - syntax */
}
}
sr = EOF;
switch (nvpair_type(nvp)) {
case DATA_TYPE_STRING: {
char *val;
/* check string value for match */
if (nvpair_value_string(nvp, &val) == 0) {
if (value_regex) {
if (regexec(value_regex, val,
(size_t)0, NULL, 0) == 0)
return (1); /* match */
} else {
if (strcmp(value, val) == 0)
return (1); /* match */
}
}
break;
}
case DATA_TYPE_STRING_ARRAY: {
char **val_array;
/* check indexed string value of array for match */
if ((nvpair_value_string_array(nvp, &val_array, &a_len) == 0) &&
(ai < a_len)) {
if (value_regex) {
if (regexec(value_regex, val_array[ai],
(size_t)0, NULL, 0) == 0)
return (1);
} else {
if (strcmp(value, val_array[ai]) == 0)
return (1);
}
}
break;
}
case DATA_TYPE_BYTE: {
uchar_t val, val_arg;
/* scanf uchar_t from value and check for match */
sr = sscanf(value, "%c", &val_arg);
if ((sr == 1) && (nvpair_value_byte(nvp, &val) == 0) &&
(val == val_arg))
return (1);
break;
}
case DATA_TYPE_BYTE_ARRAY: {
uchar_t *val_array, val_arg;
/* check indexed value of array for match */
sr = sscanf(value, "%c", &val_arg);
if ((sr == 1) &&
(nvpair_value_byte_array(nvp, &val_array, &a_len) == 0) &&
(ai < a_len) &&
(val_array[ai] == val_arg))
return (1);
break;
}
case DATA_TYPE_INT8: {
int8_t val, val_arg;
/* scanf int8_t from value and check for match */
sr = sscanf(value, "%"SCNi8, &val_arg);
if ((sr == 1) &&
(nvpair_value_int8(nvp, &val) == 0) &&
(val == val_arg))
return (1);
break;
}
case DATA_TYPE_INT8_ARRAY: {
int8_t *val_array, val_arg;
/* check indexed value of array for match */
sr = sscanf(value, "%"SCNi8, &val_arg);
if ((sr == 1) &&
(nvpair_value_int8_array(nvp, &val_array, &a_len) == 0) &&
(ai < a_len) &&
(val_array[ai] == val_arg))
return (1);
break;
}
case DATA_TYPE_UINT8: {
uint8_t val, val_arg;
/* scanf uint8_t from value and check for match */
sr = sscanf(value, "%"SCNi8, (int8_t *)&val_arg);
if ((sr == 1) &&
(nvpair_value_uint8(nvp, &val) == 0) &&
(val == val_arg))
return (1);
break;
}
case DATA_TYPE_UINT8_ARRAY: {
uint8_t *val_array, val_arg;
/* check indexed value of array for match */
sr = sscanf(value, "%"SCNi8, (int8_t *)&val_arg);
if ((sr == 1) &&
(nvpair_value_uint8_array(nvp, &val_array, &a_len) == 0) &&
(ai < a_len) &&
(val_array[ai] == val_arg))
return (1);
break;
}
case DATA_TYPE_INT16: {
int16_t val, val_arg;
/* scanf int16_t from value and check for match */
sr = sscanf(value, "%"SCNi16, &val_arg);
if ((sr == 1) &&
(nvpair_value_int16(nvp, &val) == 0) &&
(val == val_arg))
return (1);
break;
}
case DATA_TYPE_INT16_ARRAY: {
int16_t *val_array, val_arg;
/* check indexed value of array for match */
sr = sscanf(value, "%"SCNi16, &val_arg);
if ((sr == 1) &&
(nvpair_value_int16_array(nvp, &val_array, &a_len) == 0) &&
(ai < a_len) &&
(val_array[ai] == val_arg))
return (1);
break;
}
case DATA_TYPE_UINT16: {
uint16_t val, val_arg;
/* scanf uint16_t from value and check for match */
sr = sscanf(value, "%"SCNi16, (int16_t *)&val_arg);
if ((sr == 1) &&
(nvpair_value_uint16(nvp, &val) == 0) &&
(val == val_arg))
return (1);
break;
}
case DATA_TYPE_UINT16_ARRAY: {
uint16_t *val_array, val_arg;
/* check indexed value of array for match */
sr = sscanf(value, "%"SCNi16, (int16_t *)&val_arg);
if ((sr == 1) &&
(nvpair_value_uint16_array(nvp, &val_array, &a_len) == 0) &&
(ai < a_len) &&
(val_array[ai] == val_arg))
return (1);
break;
}
case DATA_TYPE_INT32: {
int32_t val, val_arg;
/* scanf int32_t from value and check for match */
sr = sscanf(value, "%"SCNi32, &val_arg);
if ((sr == 1) &&
(nvpair_value_int32(nvp, &val) == 0) &&
(val == val_arg))
return (1);
break;
}
case DATA_TYPE_INT32_ARRAY: {
int32_t *val_array, val_arg;
/* check indexed value of array for match */
sr = sscanf(value, "%"SCNi32, &val_arg);
if ((sr == 1) &&
(nvpair_value_int32_array(nvp, &val_array, &a_len) == 0) &&
(ai < a_len) &&
(val_array[ai] == val_arg))
return (1);
break;
}
case DATA_TYPE_UINT32: {
uint32_t val, val_arg;
/* scanf uint32_t from value and check for match */
sr = sscanf(value, "%"SCNi32, (int32_t *)&val_arg);
if ((sr == 1) &&
(nvpair_value_uint32(nvp, &val) == 0) &&
(val == val_arg))
return (1);
break;
}
case DATA_TYPE_UINT32_ARRAY: {
uint32_t *val_array, val_arg;
/* check indexed value of array for match */
sr = sscanf(value, "%"SCNi32, (int32_t *)&val_arg);
if ((sr == 1) &&
(nvpair_value_uint32_array(nvp, &val_array, &a_len) == 0) &&
(ai < a_len) &&
(val_array[ai] == val_arg))
return (1);
break;
}
case DATA_TYPE_INT64: {
int64_t val, val_arg;
/* scanf int64_t from value and check for match */
sr = sscanf(value, "%"SCNi64, &val_arg);
if ((sr == 1) &&
(nvpair_value_int64(nvp, &val) == 0) &&
(val == val_arg))
return (1);
break;
}
case DATA_TYPE_INT64_ARRAY: {
int64_t *val_array, val_arg;
/* check indexed value of array for match */
sr = sscanf(value, "%"SCNi64, &val_arg);
if ((sr == 1) &&
(nvpair_value_int64_array(nvp, &val_array, &a_len) == 0) &&
(ai < a_len) &&
(val_array[ai] == val_arg))
return (1);
break;
}
case DATA_TYPE_UINT64: {
uint64_t val_arg, val;
/* scanf uint64_t from value and check for match */
sr = sscanf(value, "%"SCNi64, (int64_t *)&val_arg);
if ((sr == 1) &&
(nvpair_value_uint64(nvp, &val) == 0) &&
(val == val_arg))
return (1);
break;
}
case DATA_TYPE_UINT64_ARRAY: {
uint64_t *val_array, val_arg;
/* check indexed value of array for match */
sr = sscanf(value, "%"SCNi64, (int64_t *)&val_arg);
if ((sr == 1) &&
(nvpair_value_uint64_array(nvp, &val_array, &a_len) == 0) &&
(ai < a_len) &&
(val_array[ai] == val_arg))
return (1);
break;
}
case DATA_TYPE_BOOLEAN_VALUE: {
int32_t val_arg;
boolean_t val;
/* scanf boolean_t from value and check for match */
sr = sscanf(value, "%"SCNi32, (int32_t *)&val_arg);
if ((sr == 1) &&
(nvpair_value_boolean_value(nvp, &val) == 0) &&
(val == val_arg))
return (1);
break;
}
case DATA_TYPE_BOOLEAN_ARRAY: {
boolean_t *val_array;
int32_t val_arg;
/* check indexed value of array for match */
sr = sscanf(value, "%"SCNi32, (int32_t *)&val_arg);
if ((sr == 1) &&
(nvpair_value_boolean_array(nvp,
&val_array, &a_len) == 0) &&
(ai < a_len) &&
(val_array[ai] == val_arg))
return (1);
break;
}
case DATA_TYPE_HRTIME:
case DATA_TYPE_NVLIST:
case DATA_TYPE_NVLIST_ARRAY:
case DATA_TYPE_BOOLEAN:
case DATA_TYPE_DOUBLE:
case DATA_TYPE_UNKNOWN:
default:
/*
* unknown/unsupported data type
*/
return (-1); /* error fail match */
}
/*
* check to see if sscanf failed conversion, return approximate
* pointer to problem
*/
if (sr != 1) {
if (ep)
*ep = value;
return (-1); /* error fail match - syntax */
}
return (0); /* fail match */
}
int
nvpair_value_match(nvpair_t *nvp, int ai, char *value, char **ep)
{
return (nvpair_value_match_regex(nvp, ai, value, NULL, ep));
}
diff --git a/sys/contrib/openzfs/lib/libspl/include/os/linux/sys/sysmacros.h b/sys/contrib/openzfs/lib/libspl/include/os/linux/sys/sysmacros.h
index 22fcb04b94e0..31f347c6fd5a 100644
--- a/sys/contrib/openzfs/lib/libspl/include/os/linux/sys/sysmacros.h
+++ b/sys/contrib/openzfs/lib/libspl/include/os/linux/sys/sysmacros.h
@@ -1,103 +1,101 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _LIBSPL_SYS_SYSMACROS_H
#define _LIBSPL_SYS_SYSMACROS_H
#include_next <sys/sysmacros.h>
/* common macros */
#ifndef MIN
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#endif
#ifndef MAX
#define MAX(a, b) ((a) < (b) ? (b) : (a))
#endif
#ifndef ABS
#define ABS(a) ((a) < 0 ? -(a) : (a))
#endif
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(a) (sizeof (a) / sizeof (a[0]))
#endif
#ifndef DIV_ROUND_UP
#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
#endif
#define makedevice(maj, min) makedev(maj, min)
#define _sysconf(a) sysconf(a)
/*
* Compatibility macros/typedefs needed for Solaris -> Linux port
*/
#define P2ALIGN(x, align) ((x) & -(align))
#define P2CROSS(x, y, align) (((x) ^ (y)) > (align) - 1)
#define P2ROUNDUP(x, align) ((((x) - 1) | ((align) - 1)) + 1)
#define P2BOUNDARY(off, len, align) \
(((off) ^ ((off) + (len) - 1)) > (align) - 1)
#define P2PHASE(x, align) ((x) & ((align) - 1))
#define P2NPHASE(x, align) (-(x) & ((align) - 1))
#define P2NPHASE_TYPED(x, align, type) \
(-(type)(x) & ((type)(align) - 1))
#define ISP2(x) (((x) & ((x) - 1)) == 0)
#define IS_P2ALIGNED(v, a) ((((uintptr_t)(v)) & ((uintptr_t)(a) - 1)) == 0)
/*
* Typed version of the P2* macros. These macros should be used to ensure
* that the result is correctly calculated based on the data type of (x),
* which is passed in as the last argument, regardless of the data
* type of the alignment. For example, if (x) is of type uint64_t,
* and we want to round it up to a page boundary using "PAGESIZE" as
* the alignment, we can do either
* P2ROUNDUP(x, (uint64_t)PAGESIZE)
* or
* P2ROUNDUP_TYPED(x, PAGESIZE, uint64_t)
*/
#define P2ALIGN_TYPED(x, align, type) \
((type)(x) & -(type)(align))
#define P2PHASE_TYPED(x, align, type) \
((type)(x) & ((type)(align) - 1))
#define P2NPHASE_TYPED(x, align, type) \
(-(type)(x) & ((type)(align) - 1))
#define P2ROUNDUP_TYPED(x, align, type) \
((((type)(x) - 1) | ((type)(align) - 1)) + 1)
#define P2END_TYPED(x, align, type) \
(-(~(type)(x) & -(type)(align)))
#define P2PHASEUP_TYPED(x, align, phase, type) \
((type)(phase) - (((type)(phase) - (type)(x)) & -(type)(align)))
#define P2CROSS_TYPED(x, y, align, type) \
(((type)(x) ^ (type)(y)) > (type)(align) - 1)
#define P2SAMEHIGHBIT_TYPED(x, y, type) \
(((type)(x) ^ (type)(y)) < ((type)(x) & (type)(y)))
/* avoid any possibility of clashing with <stddef.h> version */
#if defined(_KERNEL) && !defined(_KMEMUSER) && !defined(offsetof)
#define offsetof(s, m) ((size_t)(&(((s *)0)->m)))
#endif
-#define _NOTE(x)
-
#endif /* _LIBSPL_SYS_SYSMACROS_H */
diff --git a/sys/contrib/openzfs/lib/libuutil/libuutil.abi b/sys/contrib/openzfs/lib/libuutil/libuutil.abi
index 5f0c8861171f..7737b10fa44d 100644
--- a/sys/contrib/openzfs/lib/libuutil/libuutil.abi
+++ b/sys/contrib/openzfs/lib/libuutil/libuutil.abi
@@ -1,2448 +1,2087 @@
-<abi-corpus path='libuutil.so' architecture='elf-amd-x86_64' soname='libuutil.so.3'>
+<abi-corpus architecture='elf-amd-x86_64' soname='libuutil.so.3'>
<elf-needed>
- <dependency name='libatomic.so.1'/>
<dependency name='libpthread.so.0'/>
<dependency name='libc.so.6'/>
<dependency name='ld-linux-x86-64.so.2'/>
</elf-needed>
<elf-function-symbols>
+ <elf-symbol name='_fini' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='_sol_getmntent' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_16_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_32_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_64_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_8_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_char' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_char_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_int' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_int_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_long' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_long_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_ptr' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_ptr_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_short' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_short_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_16_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_32_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_64_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_8_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_uchar' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_uchar_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_uint_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_ulong_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_ushort_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_ptr' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_uchar' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_clear_long_excl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_16_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_32_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_64_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_8_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_uchar' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_uchar_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_uint_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_ulong_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_ushort_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_16_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_32_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_64_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_8_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_uchar' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_uchar_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_uint_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_ulong_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_ushort_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_16_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_32_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_64_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_8_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_uchar' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_uchar_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_uint_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_ulong_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_ushort_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_set_long_excl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_16_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_32_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_64_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_8_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_char' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_char_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_int' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_int_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_long' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_long_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_ptr' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_ptr_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_short' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_short_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_ptr' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_uchar' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_add' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_destroy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_destroy_nodes' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_find' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_first' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_insert' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_insert_here' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_is_empty' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_last' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_nearest' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_numnodes' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_remove' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_swap' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_update' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_update_gt' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_update_lt' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_walk' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='get_system_hostid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getexecname' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getextmntent' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getmntany' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getzoneid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libspl_assertf' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_destroy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_head' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_insert_after' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_insert_before' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_insert_head' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_insert_tail' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_is_empty' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_link_active' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_link_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_link_replace' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_move_tail' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_next' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_prev' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_remove' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_remove_head' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_remove_tail' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_tail' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='membar_consumer' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='membar_enter' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='membar_exit' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='membar_producer' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='mkdirp' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='print_timestamp' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='spl_pagesize' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='strlcat' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='strlcpy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_alt_exit' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_destroy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_find' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_first' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_insert' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_last' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_lockup' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_nearest_next' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_nearest_prev' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_next' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_node_fini' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_node_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_numnodes' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_pool_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_pool_destroy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_prev' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_release' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_remove' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_teardown' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_walk' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_walk_end' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_walk_next' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_walk_start' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_check_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_die' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_error' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_exit_fatal' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_exit_ok' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_exit_usage' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_free' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_getpname' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_destroy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_find' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_first' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_insert' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_insert_after' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_insert_before' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_last' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_lockup' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_nearest_next' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_nearest_prev' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_next' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_node_fini' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_node_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_numnodes' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_pool_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_pool_destroy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_prev' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_release' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_remove' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_teardown' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_walk' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_walk_end' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_walk_next' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_walk_start' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_memdup' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_msprintf' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_panic' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_set_error' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_setpname' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_strbw' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_strcaseeq' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_strdup' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_streq' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_strerror' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_strndup' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_vdie' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_vwarn' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_vxdie' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_warn' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_xdie' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_zalloc' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
</elf-function-symbols>
<elf-variable-symbols>
<elf-symbol name='libspl_assert_ok' size='4' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_exit_fatal_value' size='4' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_exit_ok_value' size='4' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_exit_usage_value' size='4' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
</elf-variable-symbols>
- <abi-instr version='1.0' address-size='64' path='uu_alloc.c' comp-dir-path='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil' language='LANG_C99'>
- <type-decl name='void' id='type-id-1'/>
+ <abi-instr version='1.0' address-size='64' path='uu_alloc.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libuutil' language='LANG_C99'>
+ <type-decl name='char' size-in-bits='8' id='type-id-1'/>
<pointer-type-def type-id='type-id-1' size-in-bits='64' id='type-id-2'/>
- <type-decl name='unsigned long int' size-in-bits='64' id='type-id-3'/>
- <typedef-decl name='size_t' type-id='type-id-3' filepath='/usr/lib/llvm-13/lib/clang/13.0.0/include/stddef.h' line='46' column='1' id='type-id-4'/>
- <function-decl name='uu_zalloc' mangled-name='uu_zalloc' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_alloc.c' line='33' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_zalloc'>
- <parameter type-id='type-id-4' name='n' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_alloc.c' line='33' column='1'/>
+ <qualified-type-def type-id='type-id-1' const='yes' id='type-id-3'/>
+ <pointer-type-def type-id='type-id-3' size-in-bits='64' id='type-id-4'/>
+ <function-decl name='uu_msprintf' mangled-name='uu_msprintf' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_msprintf'>
+ <parameter type-id='type-id-4' name='format'/>
+ <parameter is-variadic='yes'/>
<return type-id='type-id-2'/>
</function-decl>
- <type-decl name='unsigned int' size-in-bits='32' id='type-id-5'/>
- <function-decl name='uu_set_error' filepath='../../include/libuutil_impl.h' line='42' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-5'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='uu_free' mangled-name='uu_free' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_alloc.c' line='48' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_free'>
- <parameter type-id='type-id-2' name='p' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_alloc.c' line='48' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <type-decl name='char' size-in-bits='8' id='type-id-6'/>
- <pointer-type-def type-id='type-id-6' size-in-bits='64' id='type-id-7'/>
- <qualified-type-def type-id='type-id-6' const='yes' id='type-id-8'/>
- <pointer-type-def type-id='type-id-8' size-in-bits='64' id='type-id-9'/>
- <function-decl name='uu_strdup' mangled-name='uu_strdup' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_alloc.c' line='54' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_strdup'>
- <parameter type-id='type-id-9' name='str' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_alloc.c' line='54' column='1'/>
- <return type-id='type-id-7'/>
+ <type-decl name='void' id='type-id-5'/>
+ <pointer-type-def type-id='type-id-5' size-in-bits='64' id='type-id-6'/>
+ <type-decl name='unsigned long int' size-in-bits='64' id='type-id-7'/>
+ <typedef-decl name='size_t' type-id='type-id-7' id='type-id-8'/>
+ <function-decl name='uu_memdup' mangled-name='uu_memdup' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_memdup'>
+ <parameter type-id='type-id-6' name='buf'/>
+ <parameter type-id='type-id-8' name='sz'/>
+ <return type-id='type-id-6'/>
+ </function-decl>
+ <function-decl name='uu_strndup' mangled-name='uu_strndup' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_strndup'>
+ <parameter type-id='type-id-4' name='s'/>
+ <parameter type-id='type-id-8' name='n'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='uu_strndup' mangled-name='uu_strndup' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_alloc.c' line='74' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_strndup'>
- <parameter type-id='type-id-9' name='s' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_alloc.c' line='74' column='1'/>
- <parameter type-id='type-id-4' name='n' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_alloc.c' line='74' column='1'/>
- <return type-id='type-id-7'/>
+ <function-decl name='uu_strdup' mangled-name='uu_strdup' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_strdup'>
+ <parameter type-id='type-id-4' name='str'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='strnlen' filepath='/usr/include/string.h' line='390' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-9'/>
- <parameter type-id='type-id-3'/>
- <return type-id='type-id-3'/>
+ <function-decl name='uu_free' mangled-name='uu_free' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_free'>
+ <parameter type-id='type-id-6' name='p'/>
+ <return type-id='type-id-5'/>
</function-decl>
- <function-decl name='uu_memdup' mangled-name='uu_memdup' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_alloc.c' line='96' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_memdup'>
- <parameter type-id='type-id-2' name='buf' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_alloc.c' line='96' column='1'/>
- <parameter type-id='type-id-4' name='sz' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_alloc.c' line='96' column='1'/>
- <return type-id='type-id-2'/>
+ <function-decl name='uu_zalloc' mangled-name='uu_zalloc' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_zalloc'>
+ <parameter type-id='type-id-8' name='n'/>
+ <return type-id='type-id-6'/>
</function-decl>
- <function-decl name='uu_msprintf' mangled-name='uu_msprintf' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_alloc.c' line='108' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_msprintf'>
- <parameter type-id='type-id-9' name='format' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_alloc.c' line='108' column='1'/>
- <parameter is-variadic='yes'/>
- <return type-id='type-id-7'/>
+ <function-decl name='__builtin___vsnprintf_chk' mangled-name='__vsnprintf_chk' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='__stack_chk_fail' mangled-name='__stack_chk_fail' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='__builtin_memcpy' mangled-name='memcpy' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='strnlen' mangled-name='strnlen' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='strlen' mangled-name='strlen' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='free' mangled-name='free' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='__builtin_calloc' mangled-name='calloc' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='uu_set_error' mangled-name='uu_set_error' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='uu_avl.c' comp-dir-path='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil' language='LANG_C99'>
- <class-decl name='uu_avl_pool' size-in-bits='2176' is-struct='yes' visibility='default' filepath='../../include/libuutil_impl.h' line='148' column='1' id='type-id-10'>
+ <abi-instr version='1.0' address-size='64' path='uu_avl.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libuutil' language='LANG_C99'>
+ <function-decl name='uu_avl_release' mangled-name='uu_avl_release' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_release'>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='uu_avl_lockup' mangled-name='uu_avl_lockup' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_lockup'>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <class-decl name='uu_avl' size-in-bits='960' is-struct='yes' visibility='default' id='type-id-9'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='uap_next' type-id='type-id-11' visibility='default' filepath='../../include/libuutil_impl.h' line='149' column='1'/>
+ <var-decl name='ua_next_enc' type-id='type-id-10' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='uap_prev' type-id='type-id-11' visibility='default' filepath='../../include/libuutil_impl.h' line='150' column='1'/>
+ <var-decl name='ua_prev_enc' type-id='type-id-10' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='uap_name' type-id='type-id-12' visibility='default' filepath='../../include/libuutil_impl.h' line='152' column='1'/>
+ <var-decl name='ua_pool' type-id='type-id-11' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='ua_parent_enc' type-id='type-id-10' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='ua_debug' type-id='type-id-12' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='264'>
+ <var-decl name='ua_index' type-id='type-id-12' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='320'>
+ <var-decl name='ua_tree' type-id='type-id-13' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='640'>
- <var-decl name='uap_nodeoffset' type-id='type-id-4' visibility='default' filepath='../../include/libuutil_impl.h' line='153' column='1'/>
+ <var-decl name='ua_null_walk' type-id='type-id-14' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='uintptr_t' type-id='type-id-7' id='type-id-10'/>
+ <class-decl name='uu_avl_pool' size-in-bits='2176' is-struct='yes' visibility='default' id='type-id-15'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='uap_next' type-id='type-id-11' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='uap_prev' type-id='type-id-11' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='uap_name' type-id='type-id-16' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='640'>
+ <var-decl name='uap_nodeoffset' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='704'>
- <var-decl name='uap_objsize' type-id='type-id-4' visibility='default' filepath='../../include/libuutil_impl.h' line='154' column='1'/>
+ <var-decl name='uap_objsize' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='768'>
- <var-decl name='uap_cmp' type-id='type-id-13' visibility='default' filepath='../../include/libuutil_impl.h' line='155' column='1'/>
+ <var-decl name='uap_cmp' type-id='type-id-17' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='832'>
- <var-decl name='uap_debug' type-id='type-id-14' visibility='default' filepath='../../include/libuutil_impl.h' line='156' column='1'/>
+ <var-decl name='uap_debug' type-id='type-id-12' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='840'>
- <var-decl name='uap_last_index' type-id='type-id-14' visibility='default' filepath='../../include/libuutil_impl.h' line='157' column='1'/>
+ <var-decl name='uap_last_index' type-id='type-id-12' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='896'>
- <var-decl name='uap_lock' type-id='type-id-15' visibility='default' filepath='../../include/libuutil_impl.h' line='158' column='1'/>
+ <var-decl name='uap_lock' type-id='type-id-18' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1216'>
- <var-decl name='uap_null_avl' type-id='type-id-16' visibility='default' filepath='../../include/libuutil_impl.h' line='159' column='1'/>
+ <var-decl name='uap_null_avl' type-id='type-id-19' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='uu_avl_pool_t' type-id='type-id-10' filepath='../../include/libuutil.h' line='259' column='1' id='type-id-17'/>
- <pointer-type-def type-id='type-id-17' size-in-bits='64' id='type-id-11'/>
- <type-decl name='__ARRAY_SIZE_TYPE__' size-in-bits='64' id='type-id-18'/>
+ <typedef-decl name='uu_avl_pool_t' type-id='type-id-15' id='type-id-20'/>
+ <pointer-type-def type-id='type-id-20' size-in-bits='64' id='type-id-11'/>
- <array-type-def dimensions='1' type-id='type-id-6' size-in-bits='512' id='type-id-12'>
- <subrange length='64' type-id='type-id-18' id='type-id-19'/>
+ <array-type-def dimensions='1' type-id='type-id-1' size-in-bits='512' id='type-id-16'>
+ <subrange length='64' type-id='type-id-7' id='type-id-21'/>
</array-type-def>
- <type-decl name='int' size-in-bits='32' id='type-id-20'/>
- <typedef-decl name='uu_compare_fn_t' type-id='type-id-21' filepath='../../include/libuutil.h' line='131' column='1' id='type-id-22'/>
- <pointer-type-def type-id='type-id-22' size-in-bits='64' id='type-id-13'/>
- <type-decl name='unsigned char' size-in-bits='8' id='type-id-23'/>
- <typedef-decl name='__uint8_t' type-id='type-id-23' filepath='/usr/include/x86_64-linux-gnu/bits/types.h' line='37' column='1' id='type-id-24'/>
- <typedef-decl name='uint8_t' type-id='type-id-24' filepath='/usr/include/x86_64-linux-gnu/bits/stdint-uintn.h' line='24' column='1' id='type-id-14'/>
- <union-decl name='__anonymous_union__' size-in-bits='320' is-anonymous='yes' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h' line='67' column='1' id='type-id-25'>
+ <type-decl name='int' size-in-bits='32' id='type-id-22'/>
+ <typedef-decl name='uu_compare_fn_t' type-id='type-id-23' id='type-id-24'/>
+ <pointer-type-def type-id='type-id-24' size-in-bits='64' id='type-id-17'/>
+ <type-decl name='unsigned char' size-in-bits='8' id='type-id-25'/>
+ <typedef-decl name='__uint8_t' type-id='type-id-25' id='type-id-26'/>
+ <typedef-decl name='uint8_t' type-id='type-id-26' id='type-id-12'/>
+ <union-decl name='__anonymous_union__' size-in-bits='320' is-anonymous='yes' visibility='default' id='type-id-27'>
<data-member access='private'>
- <var-decl name='__data' type-id='type-id-26' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h' line='69' column='1'/>
+ <var-decl name='__data' type-id='type-id-28' visibility='default'/>
</data-member>
<data-member access='private'>
- <var-decl name='__size' type-id='type-id-27' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h' line='70' column='1'/>
+ <var-decl name='__size' type-id='type-id-29' visibility='default'/>
</data-member>
<data-member access='private'>
- <var-decl name='__align' type-id='type-id-28' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h' line='71' column='1'/>
+ <var-decl name='__align' type-id='type-id-30' visibility='default'/>
</data-member>
</union-decl>
- <class-decl name='__pthread_mutex_s' size-in-bits='320' is-struct='yes' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/thread-shared-types.h' line='118' column='1' id='type-id-26'>
+ <class-decl name='__pthread_mutex_s' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-28'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='__lock' type-id='type-id-20' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/thread-shared-types.h' line='120' column='1'/>
+ <var-decl name='__lock' type-id='type-id-22' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='__count' type-id='type-id-5' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/thread-shared-types.h' line='121' column='1'/>
+ <var-decl name='__count' type-id='type-id-31' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='__owner' type-id='type-id-20' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/thread-shared-types.h' line='122' column='1'/>
+ <var-decl name='__owner' type-id='type-id-22' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='96'>
- <var-decl name='__nusers' type-id='type-id-5' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/thread-shared-types.h' line='124' column='1'/>
+ <var-decl name='__nusers' type-id='type-id-31' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='__kind' type-id='type-id-20' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/thread-shared-types.h' line='148' column='1'/>
+ <var-decl name='__kind' type-id='type-id-22' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='160'>
- <var-decl name='__spins' type-id='type-id-29' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/thread-shared-types.h' line='154' column='1'/>
+ <var-decl name='__spins' type-id='type-id-32' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='176'>
- <var-decl name='__elision' type-id='type-id-29' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/thread-shared-types.h' line='154' column='1'/>
+ <var-decl name='__elision' type-id='type-id-32' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='__list' type-id='type-id-30' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/thread-shared-types.h' line='155' column='1'/>
+ <var-decl name='__list' type-id='type-id-33' visibility='default'/>
</data-member>
</class-decl>
- <type-decl name='short int' size-in-bits='16' id='type-id-29'/>
- <class-decl name='__pthread_internal_list' size-in-bits='128' is-struct='yes' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/thread-shared-types.h' line='82' column='1' id='type-id-31'>
+ <type-decl name='unsigned int' size-in-bits='32' id='type-id-31'/>
+ <type-decl name='short int' size-in-bits='16' id='type-id-32'/>
+ <class-decl name='__pthread_internal_list' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-34'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='__prev' type-id='type-id-32' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/thread-shared-types.h' line='84' column='1'/>
+ <var-decl name='__prev' type-id='type-id-35' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='__next' type-id='type-id-32' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/thread-shared-types.h' line='85' column='1'/>
+ <var-decl name='__next' type-id='type-id-35' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-31' size-in-bits='64' id='type-id-32'/>
- <typedef-decl name='__pthread_list_t' type-id='type-id-31' filepath='/usr/include/x86_64-linux-gnu/bits/thread-shared-types.h' line='86' column='1' id='type-id-30'/>
+ <pointer-type-def type-id='type-id-34' size-in-bits='64' id='type-id-35'/>
+ <typedef-decl name='__pthread_list_t' type-id='type-id-34' id='type-id-33'/>
- <array-type-def dimensions='1' type-id='type-id-6' size-in-bits='320' id='type-id-27'>
- <subrange length='40' type-id='type-id-18' id='type-id-33'/>
+ <array-type-def dimensions='1' type-id='type-id-1' size-in-bits='320' id='type-id-29'>
+ <subrange length='40' type-id='type-id-7' id='type-id-36'/>
</array-type-def>
- <type-decl name='long int' size-in-bits='64' id='type-id-28'/>
- <typedef-decl name='pthread_mutex_t' type-id='type-id-25' filepath='/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h' line='72' column='1' id='type-id-15'/>
- <class-decl name='uu_avl' size-in-bits='960' is-struct='yes' visibility='default' filepath='../../include/libuutil_impl.h' line='131' column='1' id='type-id-34'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='ua_next_enc' type-id='type-id-35' visibility='default' filepath='../../include/libuutil_impl.h' line='132' column='1'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='ua_prev_enc' type-id='type-id-35' visibility='default' filepath='../../include/libuutil_impl.h' line='133' column='1'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='ua_pool' type-id='type-id-11' visibility='default' filepath='../../include/libuutil_impl.h' line='135' column='1'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='ua_parent_enc' type-id='type-id-35' visibility='default' filepath='../../include/libuutil_impl.h' line='136' column='1'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='ua_debug' type-id='type-id-14' visibility='default' filepath='../../include/libuutil_impl.h' line='137' column='1'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='264'>
- <var-decl name='ua_index' type-id='type-id-14' visibility='default' filepath='../../include/libuutil_impl.h' line='138' column='1'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='ua_tree' type-id='type-id-36' visibility='default' filepath='../../include/libuutil_impl.h' line='140' column='1'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='640'>
- <var-decl name='ua_null_walk' type-id='type-id-37' visibility='default' filepath='../../include/libuutil_impl.h' line='141' column='1'/>
- </data-member>
- </class-decl>
- <typedef-decl name='uintptr_t' type-id='type-id-3' filepath='/usr/include/stdint.h' line='90' column='1' id='type-id-35'/>
- <class-decl name='avl_tree' size-in-bits='320' is-struct='yes' visibility='default' filepath='../../include/sys/avl_impl.h' line='146' column='1' id='type-id-36'>
+ <type-decl name='long int' size-in-bits='64' id='type-id-30'/>
+ <typedef-decl name='pthread_mutex_t' type-id='type-id-27' id='type-id-18'/>
+ <typedef-decl name='uu_avl_t' type-id='type-id-9' id='type-id-19'/>
+ <class-decl name='avl_tree' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-13'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='avl_root' type-id='type-id-38' visibility='default' filepath='../../include/sys/avl_impl.h' line='147' column='1'/>
+ <var-decl name='avl_root' type-id='type-id-37' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='avl_compar' type-id='type-id-39' visibility='default' filepath='../../include/sys/avl_impl.h' line='148' column='1'/>
+ <var-decl name='avl_compar' type-id='type-id-38' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='avl_offset' type-id='type-id-4' visibility='default' filepath='../../include/sys/avl_impl.h' line='149' column='1'/>
+ <var-decl name='avl_offset' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='avl_numnodes' type-id='type-id-40' visibility='default' filepath='../../include/sys/avl_impl.h' line='150' column='1'/>
+ <var-decl name='avl_numnodes' type-id='type-id-39' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='avl_size' type-id='type-id-4' visibility='default' filepath='../../include/sys/avl_impl.h' line='151' column='1'/>
+ <var-decl name='avl_pad' type-id='type-id-8' visibility='default'/>
</data-member>
</class-decl>
- <class-decl name='avl_node' size-in-bits='192' is-struct='yes' visibility='default' filepath='../../include/sys/avl_impl.h' line='90' column='1' id='type-id-41'>
+ <class-decl name='avl_node' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-40'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='avl_child' type-id='type-id-42' visibility='default' filepath='../../include/sys/avl_impl.h' line='91' column='1'/>
+ <var-decl name='avl_child' type-id='type-id-41' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='avl_pcb' type-id='type-id-35' visibility='default' filepath='../../include/sys/avl_impl.h' line='92' column='1'/>
+ <var-decl name='avl_pcb' type-id='type-id-10' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-41' size-in-bits='64' id='type-id-38'/>
+ <pointer-type-def type-id='type-id-40' size-in-bits='64' id='type-id-37'/>
- <array-type-def dimensions='1' type-id='type-id-38' size-in-bits='128' id='type-id-42'>
- <subrange length='2' type-id='type-id-18' id='type-id-43'/>
+ <array-type-def dimensions='1' type-id='type-id-37' size-in-bits='128' id='type-id-41'>
+ <subrange length='2' type-id='type-id-7' id='type-id-42'/>
</array-type-def>
- <pointer-type-def type-id='type-id-44' size-in-bits='64' id='type-id-39'/>
- <typedef-decl name='ulong_t' type-id='type-id-3' filepath='../../lib/libspl/include/sys/stdtypes.h' line='34' column='1' id='type-id-40'/>
- <class-decl name='uu_avl_walk' size-in-bits='320' is-struct='yes' visibility='default' filepath='../../include/libuutil_impl.h' line='121' column='1' id='type-id-45'>
+ <pointer-type-def type-id='type-id-43' size-in-bits='64' id='type-id-38'/>
+ <typedef-decl name='ulong_t' type-id='type-id-7' id='type-id-39'/>
+ <class-decl name='uu_avl_walk' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-44'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='uaw_next' type-id='type-id-46' visibility='default' filepath='../../include/libuutil_impl.h' line='122' column='1'/>
+ <var-decl name='uaw_next' type-id='type-id-45' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='uaw_prev' type-id='type-id-46' visibility='default' filepath='../../include/libuutil_impl.h' line='123' column='1'/>
+ <var-decl name='uaw_prev' type-id='type-id-45' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='uaw_avl' type-id='type-id-47' visibility='default' filepath='../../include/libuutil_impl.h' line='125' column='1'/>
+ <var-decl name='uaw_avl' type-id='type-id-46' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='uaw_next_result' type-id='type-id-2' visibility='default' filepath='../../include/libuutil_impl.h' line='126' column='1'/>
+ <var-decl name='uaw_next_result' type-id='type-id-6' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='uaw_dir' type-id='type-id-48' visibility='default' filepath='../../include/libuutil_impl.h' line='127' column='1'/>
+ <var-decl name='uaw_dir' type-id='type-id-47' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='264'>
- <var-decl name='uaw_robust' type-id='type-id-14' visibility='default' filepath='../../include/libuutil_impl.h' line='128' column='1'/>
+ <var-decl name='uaw_robust' type-id='type-id-12' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='uu_avl_walk_t' type-id='type-id-45' filepath='../../include/libuutil.h' line='270' column='1' id='type-id-37'/>
- <pointer-type-def type-id='type-id-37' size-in-bits='64' id='type-id-46'/>
- <typedef-decl name='uu_avl_t' type-id='type-id-34' filepath='../../include/libuutil.h' line='260' column='1' id='type-id-16'/>
- <pointer-type-def type-id='type-id-16' size-in-bits='64' id='type-id-47'/>
- <type-decl name='signed char' size-in-bits='8' id='type-id-49'/>
- <typedef-decl name='__int8_t' type-id='type-id-49' filepath='/usr/include/x86_64-linux-gnu/bits/types.h' line='36' column='1' id='type-id-50'/>
- <typedef-decl name='int8_t' type-id='type-id-50' filepath='/usr/include/x86_64-linux-gnu/bits/stdint-intn.h' line='24' column='1' id='type-id-48'/>
- <typedef-decl name='__uint32_t' type-id='type-id-5' filepath='/usr/include/x86_64-linux-gnu/bits/types.h' line='41' column='1' id='type-id-51'/>
- <typedef-decl name='uint32_t' type-id='type-id-51' filepath='/usr/include/x86_64-linux-gnu/bits/stdint-uintn.h' line='26' column='1' id='type-id-52'/>
- <function-decl name='uu_avl_pool_create' mangled-name='uu_avl_pool_create' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='66' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_pool_create'>
- <parameter type-id='type-id-9' name='name' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='66' column='1'/>
- <parameter type-id='type-id-4' name='objsize' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='66' column='1'/>
- <parameter type-id='type-id-4' name='nodeoffset' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='66' column='1'/>
- <parameter type-id='type-id-13' name='compare_func' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='67' column='1'/>
- <parameter type-id='type-id-52' name='flags' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='67' column='1'/>
- <return type-id='type-id-11'/>
- </function-decl>
- <function-decl name='uu_check_name' filepath='../../include/libuutil.h' line='107' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-9'/>
- <parameter type-id='type-id-5'/>
- <return type-id='type-id-20'/>
- </function-decl>
- <function-decl name='uu_zalloc' filepath='../../include/libuutil.h' line='116' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-3'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='strlcpy' filepath='../../lib/libspl/include/string.h' line='37' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-7'/>
- <parameter type-id='type-id-9'/>
- <parameter type-id='type-id-3'/>
- <return type-id='type-id-3'/>
- </function-decl>
- <pointer-type-def type-id='type-id-25' size-in-bits='64' id='type-id-53'/>
- <union-decl name='__anonymous_union__' size-in-bits='32' is-anonymous='yes' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h' line='32' column='1' id='type-id-54'>
- <data-member access='private'>
- <var-decl name='__size' type-id='type-id-55' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h' line='34' column='1'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='__align' type-id='type-id-20' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h' line='35' column='1'/>
- </data-member>
- </union-decl>
-
- <array-type-def dimensions='1' type-id='type-id-6' size-in-bits='32' id='type-id-55'>
- <subrange length='4' type-id='type-id-18' id='type-id-56'/>
-
- </array-type-def>
- <qualified-type-def type-id='type-id-54' const='yes' id='type-id-57'/>
- <pointer-type-def type-id='type-id-57' size-in-bits='64' id='type-id-58'/>
- <function-decl name='pthread_mutex_init' filepath='/usr/include/pthread.h' line='750' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-53'/>
- <parameter type-id='type-id-58'/>
- <return type-id='type-id-20'/>
- </function-decl>
- <function-decl name='pthread_mutex_lock' filepath='/usr/include/pthread.h' line='763' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-53'/>
- <return type-id='type-id-20'/>
- </function-decl>
- <function-decl name='pthread_mutex_unlock' filepath='/usr/include/pthread.h' line='774' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-53'/>
- <return type-id='type-id-20'/>
- </function-decl>
- <function-decl name='uu_avl_pool_destroy' mangled-name='uu_avl_pool_destroy' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='114' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_pool_destroy'>
- <parameter type-id='type-id-11' name='pp' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='114' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='uu_panic' filepath='../../include/libuutil_impl.h' line='46' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-9'/>
- <parameter is-variadic='yes'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='pthread_mutex_destroy' filepath='/usr/include/pthread.h' line='755' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-53'/>
- <return type-id='type-id-20'/>
+ <typedef-decl name='uu_avl_walk_t' type-id='type-id-44' id='type-id-14'/>
+ <pointer-type-def type-id='type-id-14' size-in-bits='64' id='type-id-45'/>
+ <pointer-type-def type-id='type-id-19' size-in-bits='64' id='type-id-46'/>
+ <type-decl name='signed char' size-in-bits='8' id='type-id-48'/>
+ <typedef-decl name='__int8_t' type-id='type-id-48' id='type-id-49'/>
+ <typedef-decl name='int8_t' type-id='type-id-49' id='type-id-47'/>
+ <typedef-decl name='uu_avl_index_t' type-id='type-id-10' id='type-id-50'/>
+ <function-decl name='uu_avl_nearest_prev' mangled-name='uu_avl_nearest_prev' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_nearest_prev'>
+ <parameter type-id='type-id-46' name='ap'/>
+ <parameter type-id='type-id-50' name='idx'/>
+ <return type-id='type-id-6'/>
+ </function-decl>
+ <function-decl name='uu_avl_nearest_next' mangled-name='uu_avl_nearest_next' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_nearest_next'>
+ <parameter type-id='type-id-46' name='ap'/>
+ <parameter type-id='type-id-50' name='idx'/>
+ <return type-id='type-id-6'/>
+ </function-decl>
+ <function-decl name='uu_avl_insert' mangled-name='uu_avl_insert' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_insert'>
+ <parameter type-id='type-id-46' name='ap'/>
+ <parameter type-id='type-id-6' name='elem'/>
+ <parameter type-id='type-id-50' name='idx'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <pointer-type-def type-id='type-id-50' size-in-bits='64' id='type-id-51'/>
+ <function-decl name='uu_avl_find' mangled-name='uu_avl_find' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_find'>
+ <parameter type-id='type-id-46' name='ap'/>
+ <parameter type-id='type-id-6' name='elem'/>
+ <parameter type-id='type-id-6' name='private'/>
+ <parameter type-id='type-id-51' name='out'/>
+ <return type-id='type-id-6'/>
+ </function-decl>
+ <pointer-type-def type-id='type-id-6' size-in-bits='64' id='type-id-52'/>
+ <function-decl name='uu_avl_teardown' mangled-name='uu_avl_teardown' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_teardown'>
+ <parameter type-id='type-id-46' name='ap'/>
+ <parameter type-id='type-id-52' name='cookie'/>
+ <return type-id='type-id-6'/>
+ </function-decl>
+ <function-decl name='uu_avl_remove' mangled-name='uu_avl_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_remove'>
+ <parameter type-id='type-id-46' name='ap'/>
+ <parameter type-id='type-id-6' name='elem'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <typedef-decl name='uu_walk_fn_t' type-id='type-id-43' id='type-id-53'/>
+ <pointer-type-def type-id='type-id-53' size-in-bits='64' id='type-id-54'/>
+ <typedef-decl name='__uint32_t' type-id='type-id-31' id='type-id-55'/>
+ <typedef-decl name='uint32_t' type-id='type-id-55' id='type-id-56'/>
+ <function-decl name='uu_avl_walk' mangled-name='uu_avl_walk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_walk'>
+ <parameter type-id='type-id-46' name='ap'/>
+ <parameter type-id='type-id-54' name='func'/>
+ <parameter type-id='type-id-6' name='private'/>
+ <parameter type-id='type-id-56' name='flags'/>
+ <return type-id='type-id-22'/>
+ </function-decl>
+ <function-decl name='uu_avl_walk_end' mangled-name='uu_avl_walk_end' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_walk_end'>
+ <parameter type-id='type-id-45' name='wp'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='uu_avl_walk_next' mangled-name='uu_avl_walk_next' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_walk_next'>
+ <parameter type-id='type-id-45' name='wp'/>
+ <return type-id='type-id-6'/>
+ </function-decl>
+ <function-decl name='uu_avl_walk_start' mangled-name='uu_avl_walk_start' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_walk_start'>
+ <parameter type-id='type-id-46' name='ap'/>
+ <parameter type-id='type-id-56' name='flags'/>
+ <return type-id='type-id-45'/>
+ </function-decl>
+ <function-decl name='uu_avl_prev' mangled-name='uu_avl_prev' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_prev'>
+ <parameter type-id='type-id-46' name='ap'/>
+ <parameter type-id='type-id-6' name='node'/>
+ <return type-id='type-id-6'/>
+ </function-decl>
+ <function-decl name='uu_avl_next' mangled-name='uu_avl_next' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_next'>
+ <parameter type-id='type-id-46' name='ap'/>
+ <parameter type-id='type-id-6' name='node'/>
+ <return type-id='type-id-6'/>
+ </function-decl>
+ <function-decl name='uu_avl_last' mangled-name='uu_avl_last' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_last'>
+ <parameter type-id='type-id-46' name='ap'/>
+ <return type-id='type-id-6'/>
+ </function-decl>
+ <function-decl name='uu_avl_first' mangled-name='uu_avl_first' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_first'>
+ <parameter type-id='type-id-46' name='ap'/>
+ <return type-id='type-id-6'/>
+ </function-decl>
+ <function-decl name='uu_avl_numnodes' mangled-name='uu_avl_numnodes' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_numnodes'>
+ <parameter type-id='type-id-46' name='ap'/>
+ <return type-id='type-id-8'/>
+ </function-decl>
+ <function-decl name='uu_avl_destroy' mangled-name='uu_avl_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_destroy'>
+ <parameter type-id='type-id-46' name='ap'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='uu_avl_create' mangled-name='uu_avl_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_create'>
+ <parameter type-id='type-id-11' name='pp'/>
+ <parameter type-id='type-id-6' name='parent'/>
+ <parameter type-id='type-id-56' name='flags'/>
+ <return type-id='type-id-46'/>
</function-decl>
- <class-decl name='uu_avl_node' size-in-bits='192' is-struct='yes' visibility='default' filepath='../../include/libuutil.h' line='262' column='1' id='type-id-59'>
+ <class-decl name='uu_avl_node' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-57'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='uan_opaque' type-id='type-id-60' visibility='default' filepath='../../include/libuutil.h' line='264' column='1'/>
+ <var-decl name='uan_opaque' type-id='type-id-58' visibility='default'/>
</data-member>
</class-decl>
- <array-type-def dimensions='1' type-id='type-id-35' size-in-bits='192' id='type-id-60'>
- <subrange length='3' type-id='type-id-18' id='type-id-61'/>
+ <array-type-def dimensions='1' type-id='type-id-10' size-in-bits='192' id='type-id-58'>
+ <subrange length='3' type-id='type-id-7' id='type-id-59'/>
</array-type-def>
- <typedef-decl name='uu_avl_node_t' type-id='type-id-59' filepath='../../include/libuutil.h' line='268' column='1' id='type-id-62'/>
- <pointer-type-def type-id='type-id-62' size-in-bits='64' id='type-id-63'/>
- <function-decl name='uu_avl_node_init' mangled-name='uu_avl_node_init' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='138' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_node_init'>
- <parameter type-id='type-id-2' name='base' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='138' column='1'/>
- <parameter type-id='type-id-63' name='np' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='138' column='1'/>
- <parameter type-id='type-id-11' name='pp' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='138' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='uu_avl_node_fini' mangled-name='uu_avl_node_fini' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='163' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_node_fini'>
- <parameter type-id='type-id-2' name='base' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='163' column='1'/>
- <parameter type-id='type-id-63' name='np' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='163' column='1'/>
- <parameter type-id='type-id-11' name='pp' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='163' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='uu_avl_create' mangled-name='uu_avl_create' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='211' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_create'>
- <parameter type-id='type-id-11' name='pp' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='211' column='1'/>
- <parameter type-id='type-id-2' name='parent' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='211' column='1'/>
- <parameter type-id='type-id-52' name='flags' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='211' column='1'/>
- <return type-id='type-id-47'/>
- </function-decl>
- <pointer-type-def type-id='type-id-36' size-in-bits='64' id='type-id-64'/>
- <function-decl name='avl_create' filepath='../../include/sys/avl.h' line='163' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-64'/>
- <parameter type-id='type-id-39'/>
- <parameter type-id='type-id-3'/>
- <parameter type-id='type-id-3'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='uu_avl_destroy' mangled-name='uu_avl_destroy' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='250' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_destroy'>
- <parameter type-id='type-id-47' name='ap' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='250' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='avl_numnodes' filepath='../../include/sys/avl.h' line='281' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-64'/>
- <return type-id='type-id-3'/>
- </function-decl>
- <function-decl name='avl_destroy' filepath='../../include/sys/avl.h' line='317' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-64'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='uu_avl_numnodes' mangled-name='uu_avl_numnodes' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='279' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_numnodes'>
- <parameter type-id='type-id-47' name='ap' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='279' column='1'/>
- <return type-id='type-id-4'/>
+ <typedef-decl name='uu_avl_node_t' type-id='type-id-57' id='type-id-60'/>
+ <pointer-type-def type-id='type-id-60' size-in-bits='64' id='type-id-61'/>
+ <function-decl name='uu_avl_node_fini' mangled-name='uu_avl_node_fini' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_node_fini'>
+ <parameter type-id='type-id-6' name='base'/>
+ <parameter type-id='type-id-61' name='np'/>
+ <parameter type-id='type-id-11' name='pp'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='uu_avl_node_init' mangled-name='uu_avl_node_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_node_init'>
+ <parameter type-id='type-id-6' name='base'/>
+ <parameter type-id='type-id-61' name='np'/>
+ <parameter type-id='type-id-11' name='pp'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='uu_avl_pool_destroy' mangled-name='uu_avl_pool_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_pool_destroy'>
+ <parameter type-id='type-id-11' name='pp'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='uu_avl_pool_create' mangled-name='uu_avl_pool_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_pool_create'>
+ <parameter type-id='type-id-4' name='name'/>
+ <parameter type-id='type-id-8' name='objsize'/>
+ <parameter type-id='type-id-8' name='nodeoffset'/>
+ <parameter type-id='type-id-17' name='compare_func'/>
+ <parameter type-id='type-id-56' name='flags'/>
+ <return type-id='type-id-11'/>
</function-decl>
- <function-decl name='uu_avl_first' mangled-name='uu_avl_first' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='285' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_first'>
- <parameter type-id='type-id-47' name='ap' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='285' column='1'/>
- <return type-id='type-id-2'/>
+ <function-decl name='pthread_mutex_unlock' mangled-name='pthread_mutex_unlock' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
- <function-decl name='avl_first' filepath='../../include/sys/avl.h' line='205' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-64'/>
- <return type-id='type-id-2'/>
+ <function-decl name='pthread_mutex_lock' mangled-name='pthread_mutex_lock' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
- <function-decl name='uu_avl_last' mangled-name='uu_avl_last' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='291' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_last'>
- <parameter type-id='type-id-47' name='ap' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='285' column='1'/>
- <return type-id='type-id-2'/>
+ <function-decl name='uu_panic' mangled-name='uu_panic' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
- <function-decl name='avl_last' filepath='../../include/sys/avl.h' line='206' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-64'/>
- <return type-id='type-id-2'/>
+ <function-decl name='avl_nearest' mangled-name='avl_nearest' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
- <function-decl name='uu_avl_next' mangled-name='uu_avl_next' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='297' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_next'>
- <parameter type-id='type-id-47' name='ap' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='297' column='1'/>
- <parameter type-id='type-id-2' name='node' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='297' column='1'/>
- <return type-id='type-id-2'/>
+ <function-decl name='avl_insert' mangled-name='avl_insert' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
- <function-decl name='avl_walk' filepath='../../include/sys/avl_impl.h' line='158' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-64'/>
- <parameter type-id='type-id-2'/>
- <parameter type-id='type-id-20'/>
- <return type-id='type-id-2'/>
+ <function-decl name='avl_find' mangled-name='avl_find' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
- <function-decl name='uu_avl_prev' mangled-name='uu_avl_prev' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='303' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_prev'>
- <parameter type-id='type-id-47' name='ap' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='297' column='1'/>
- <parameter type-id='type-id-2' name='node' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='297' column='1'/>
- <return type-id='type-id-2'/>
+ <function-decl name='avl_destroy_nodes' mangled-name='avl_destroy_nodes' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
- <function-decl name='uu_avl_walk_start' mangled-name='uu_avl_walk_start' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='364' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_walk_start'>
- <parameter type-id='type-id-47' name='ap' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='364' column='1'/>
- <parameter type-id='type-id-52' name='flags' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='364' column='1'/>
- <return type-id='type-id-46'/>
+ <function-decl name='avl_remove' mangled-name='avl_remove' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
- <function-decl name='uu_avl_walk_next' mangled-name='uu_avl_walk_next' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='384' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_walk_next'>
- <parameter type-id='type-id-46' name='wp' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='384' column='1'/>
- <return type-id='type-id-2'/>
+ <function-decl name='uu_free' mangled-name='uu_free' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
- <function-decl name='uu_avl_walk_end' mangled-name='uu_avl_walk_end' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='390' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_walk_end'>
- <parameter type-id='type-id-46' name='wp' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='390' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <typedef-decl name='uu_walk_fn_t' type-id='type-id-44' filepath='../../include/libuutil.h' line='155' column='1' id='type-id-65'/>
- <pointer-type-def type-id='type-id-65' size-in-bits='64' id='type-id-66'/>
- <function-decl name='uu_avl_walk' mangled-name='uu_avl_walk' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='397' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_walk'>
- <parameter type-id='type-id-47' name='ap' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='397' column='1'/>
- <parameter type-id='type-id-66' name='func' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='397' column='1'/>
- <parameter type-id='type-id-2' name='private' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='397' column='1'/>
- <parameter type-id='type-id-52' name='flags' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='397' column='1'/>
- <return type-id='type-id-20'/>
- </function-decl>
- <function-decl name='uu_avl_remove' mangled-name='uu_avl_remove' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='422' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_remove'>
- <parameter type-id='type-id-47' name='ap' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='422' column='1'/>
- <parameter type-id='type-id-2' name='elem' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='422' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='avl_remove' filepath='../../include/sys/avl.h' line='260' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-64'/>
- <parameter type-id='type-id-2'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <pointer-type-def type-id='type-id-2' size-in-bits='64' id='type-id-67'/>
- <function-decl name='uu_avl_teardown' mangled-name='uu_avl_teardown' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='458' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_teardown'>
- <parameter type-id='type-id-47' name='ap' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='458' column='1'/>
- <parameter type-id='type-id-67' name='cookie' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='458' column='1'/>
- <return type-id='type-id-2'/>
+ <function-decl name='uu_zalloc' mangled-name='uu_zalloc' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
- <function-decl name='avl_destroy_nodes' filepath='../../include/sys/avl.h' line='309' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-64'/>
- <parameter type-id='type-id-67'/>
- <return type-id='type-id-2'/>
+ <function-decl name='avl_last' mangled-name='avl_last' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
- <typedef-decl name='uu_avl_index_t' type-id='type-id-35' filepath='../../include/libuutil.h' line='272' column='1' id='type-id-68'/>
- <pointer-type-def type-id='type-id-68' size-in-bits='64' id='type-id-69'/>
- <function-decl name='uu_avl_find' mangled-name='uu_avl_find' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='473' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_find'>
- <parameter type-id='type-id-47' name='ap' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='473' column='1'/>
- <parameter type-id='type-id-2' name='elem' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='473' column='1'/>
- <parameter type-id='type-id-2' name='private' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='473' column='1'/>
- <parameter type-id='type-id-69' name='out' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='473' column='1'/>
- <return type-id='type-id-2'/>
+ <function-decl name='avl_first' mangled-name='avl_first' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
- <pointer-type-def type-id='type-id-3' size-in-bits='64' id='type-id-70'/>
- <function-decl name='avl_find' filepath='../../include/sys/avl.h' line='175' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-64'/>
- <parameter type-id='type-id-2'/>
- <parameter type-id='type-id-70'/>
- <return type-id='type-id-2'/>
+ <function-decl name='avl_walk' mangled-name='avl_walk' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
- <function-decl name='uu_avl_insert' mangled-name='uu_avl_insert' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='494' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_insert'>
- <parameter type-id='type-id-47' name='ap' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='494' column='1'/>
- <parameter type-id='type-id-2' name='elem' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='494' column='1'/>
- <parameter type-id='type-id-68' name='idx' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='494' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='avl_insert' filepath='../../include/sys/avl.h' line='183' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-64'/>
- <parameter type-id='type-id-2'/>
- <parameter type-id='type-id-3'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='uu_avl_nearest_next' mangled-name='uu_avl_nearest_next' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='528' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_nearest_next'>
- <parameter type-id='type-id-47' name='ap' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='528' column='1'/>
- <parameter type-id='type-id-68' name='idx' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='528' column='1'/>
- <return type-id='type-id-2'/>
+ <function-decl name='avl_numnodes' mangled-name='avl_numnodes' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
- <function-decl name='avl_nearest' filepath='../../include/sys/avl.h' line='242' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-64'/>
- <parameter type-id='type-id-3'/>
- <parameter type-id='type-id-20'/>
- <return type-id='type-id-2'/>
+ <function-decl name='avl_destroy' mangled-name='avl_destroy' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
- <function-decl name='uu_avl_nearest_prev' mangled-name='uu_avl_nearest_prev' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='538' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_nearest_prev'>
- <parameter type-id='type-id-47' name='ap' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='528' column='1'/>
- <parameter type-id='type-id-68' name='idx' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='528' column='1'/>
- <return type-id='type-id-2'/>
+ <function-decl name='avl_create' mangled-name='avl_create' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
- <function-decl name='uu_avl_lockup' mangled-name='uu_avl_lockup' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='551' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_lockup'>
- <return type-id='type-id-1'/>
+ <function-decl name='pthread_mutex_destroy' mangled-name='pthread_mutex_destroy' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
- <function-decl name='uu_avl_release' mangled-name='uu_avl_release' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_avl.c' line='562' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_release'>
- <return type-id='type-id-1'/>
+ <function-decl name='uu_check_name' mangled-name='uu_check_name' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
- <function-type size-in-bits='64' id='type-id-44'>
- <parameter type-id='type-id-2'/>
- <parameter type-id='type-id-2'/>
- <return type-id='type-id-20'/>
+ <function-decl name='strlcpy' mangled-name='strlcpy' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='pthread_mutex_init' mangled-name='pthread_mutex_init' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-type size-in-bits='64' id='type-id-43'>
+ <parameter type-id='type-id-6'/>
+ <parameter type-id='type-id-6'/>
+ <return type-id='type-id-22'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-21'>
- <parameter type-id='type-id-2'/>
- <parameter type-id='type-id-2'/>
- <parameter type-id='type-id-2'/>
- <return type-id='type-id-20'/>
+ <function-type size-in-bits='64' id='type-id-23'>
+ <parameter type-id='type-id-6'/>
+ <parameter type-id='type-id-6'/>
+ <parameter type-id='type-id-6'/>
+ <return type-id='type-id-22'/>
</function-type>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='uu_ident.c' comp-dir-path='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil' language='LANG_C99'>
- <typedef-decl name='uint_t' type-id='type-id-5' filepath='../../lib/libspl/include/sys/stdtypes.h' line='33' column='1' id='type-id-71'/>
- <function-decl name='uu_check_name' mangled-name='uu_check_name' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_ident.c' line='93' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_check_name'>
- <parameter type-id='type-id-9' name='name' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_ident.c' line='93' column='1'/>
- <parameter type-id='type-id-71' name='flags' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_ident.c' line='93' column='1'/>
- <return type-id='type-id-20'/>
+ <abi-instr version='1.0' address-size='64' path='uu_ident.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libuutil' language='LANG_C99'>
+ <typedef-decl name='uint_t' type-id='type-id-31' id='type-id-62'/>
+ <function-decl name='uu_check_name' mangled-name='uu_check_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_check_name'>
+ <parameter type-id='type-id-4' name='name'/>
+ <parameter type-id='type-id-62' name='flags'/>
+ <return type-id='type-id-22'/>
+ </function-decl>
+ <function-decl name='strchr' mangled-name='strchr' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='uu_list.c' comp-dir-path='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil' language='LANG_C99'>
- <class-decl name='uu_list_pool' size-in-bits='2112' is-struct='yes' visibility='default' filepath='../../include/libuutil_impl.h' line='102' column='1' id='type-id-72'>
+ <abi-instr version='1.0' address-size='64' path='uu_list.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libuutil' language='LANG_C99'>
+ <function-decl name='uu_list_release' mangled-name='uu_list_release' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_release'>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='uu_list_lockup' mangled-name='uu_list_lockup' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_lockup'>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <class-decl name='uu_list' size-in-bits='896' is-struct='yes' visibility='default' id='type-id-63'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='ulp_next' type-id='type-id-73' visibility='default' filepath='../../include/libuutil_impl.h' line='103' column='1'/>
+ <var-decl name='ul_next_enc' type-id='type-id-10' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='ulp_prev' type-id='type-id-73' visibility='default' filepath='../../include/libuutil_impl.h' line='104' column='1'/>
+ <var-decl name='ul_prev_enc' type-id='type-id-10' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='ulp_name' type-id='type-id-12' visibility='default' filepath='../../include/libuutil_impl.h' line='106' column='1'/>
+ <var-decl name='ul_pool' type-id='type-id-64' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='640'>
- <var-decl name='ulp_nodeoffset' type-id='type-id-4' visibility='default' filepath='../../include/libuutil_impl.h' line='107' column='1'/>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='ul_parent_enc' type-id='type-id-10' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='704'>
- <var-decl name='ulp_objsize' type-id='type-id-4' visibility='default' filepath='../../include/libuutil_impl.h' line='108' column='1'/>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='ul_offset' type-id='type-id-8' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='768'>
- <var-decl name='ulp_cmp' type-id='type-id-13' visibility='default' filepath='../../include/libuutil_impl.h' line='109' column='1'/>
+ <data-member access='public' layout-offset-in-bits='320'>
+ <var-decl name='ul_numnodes' type-id='type-id-8' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='832'>
- <var-decl name='ulp_debug' type-id='type-id-14' visibility='default' filepath='../../include/libuutil_impl.h' line='110' column='1'/>
+ <data-member access='public' layout-offset-in-bits='384'>
+ <var-decl name='ul_debug' type-id='type-id-12' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='840'>
- <var-decl name='ulp_last_index' type-id='type-id-14' visibility='default' filepath='../../include/libuutil_impl.h' line='111' column='1'/>
+ <data-member access='public' layout-offset-in-bits='392'>
+ <var-decl name='ul_sorted' type-id='type-id-12' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='896'>
- <var-decl name='ulp_lock' type-id='type-id-15' visibility='default' filepath='../../include/libuutil_impl.h' line='112' column='1'/>
+ <data-member access='public' layout-offset-in-bits='400'>
+ <var-decl name='ul_index' type-id='type-id-12' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='1216'>
- <var-decl name='ulp_null_list' type-id='type-id-74' visibility='default' filepath='../../include/libuutil_impl.h' line='113' column='1'/>
+ <data-member access='public' layout-offset-in-bits='448'>
+ <var-decl name='ul_null_node' type-id='type-id-65' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='576'>
+ <var-decl name='ul_null_walk' type-id='type-id-66' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='uu_list_pool_t' type-id='type-id-72' filepath='../../include/libuutil.h' line='160' column='1' id='type-id-75'/>
- <pointer-type-def type-id='type-id-75' size-in-bits='64' id='type-id-73'/>
- <class-decl name='uu_list' size-in-bits='896' is-struct='yes' visibility='default' filepath='../../include/libuutil_impl.h' line='82' column='1' id='type-id-76'>
+ <class-decl name='uu_list_pool' size-in-bits='2112' is-struct='yes' visibility='default' id='type-id-67'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='ul_next_enc' type-id='type-id-35' visibility='default' filepath='../../include/libuutil_impl.h' line='83' column='1'/>
+ <var-decl name='ulp_next' type-id='type-id-64' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='ul_prev_enc' type-id='type-id-35' visibility='default' filepath='../../include/libuutil_impl.h' line='84' column='1'/>
+ <var-decl name='ulp_prev' type-id='type-id-64' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='ul_pool' type-id='type-id-73' visibility='default' filepath='../../include/libuutil_impl.h' line='86' column='1'/>
+ <var-decl name='ulp_name' type-id='type-id-16' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='ul_parent_enc' type-id='type-id-35' visibility='default' filepath='../../include/libuutil_impl.h' line='87' column='1'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='ul_offset' type-id='type-id-4' visibility='default' filepath='../../include/libuutil_impl.h' line='88' column='1'/>
+ <data-member access='public' layout-offset-in-bits='640'>
+ <var-decl name='ulp_nodeoffset' type-id='type-id-8' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='ul_numnodes' type-id='type-id-4' visibility='default' filepath='../../include/libuutil_impl.h' line='89' column='1'/>
+ <data-member access='public' layout-offset-in-bits='704'>
+ <var-decl name='ulp_objsize' type-id='type-id-8' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='ul_debug' type-id='type-id-14' visibility='default' filepath='../../include/libuutil_impl.h' line='90' column='1'/>
+ <data-member access='public' layout-offset-in-bits='768'>
+ <var-decl name='ulp_cmp' type-id='type-id-17' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='392'>
- <var-decl name='ul_sorted' type-id='type-id-14' visibility='default' filepath='../../include/libuutil_impl.h' line='91' column='1'/>
+ <data-member access='public' layout-offset-in-bits='832'>
+ <var-decl name='ulp_debug' type-id='type-id-12' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='400'>
- <var-decl name='ul_index' type-id='type-id-14' visibility='default' filepath='../../include/libuutil_impl.h' line='92' column='1'/>
+ <data-member access='public' layout-offset-in-bits='840'>
+ <var-decl name='ulp_last_index' type-id='type-id-12' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='ul_null_node' type-id='type-id-77' visibility='default' filepath='../../include/libuutil_impl.h' line='94' column='1'/>
+ <data-member access='public' layout-offset-in-bits='896'>
+ <var-decl name='ulp_lock' type-id='type-id-18' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='576'>
- <var-decl name='ul_null_walk' type-id='type-id-78' visibility='default' filepath='../../include/libuutil_impl.h' line='95' column='1'/>
+ <data-member access='public' layout-offset-in-bits='1216'>
+ <var-decl name='ulp_null_list' type-id='type-id-68' visibility='default'/>
</data-member>
</class-decl>
- <class-decl name='uu_list_node_impl' size-in-bits='128' is-struct='yes' visibility='default' filepath='../../include/libuutil_impl.h' line='67' column='1' id='type-id-79'>
+ <typedef-decl name='uu_list_pool_t' type-id='type-id-67' id='type-id-69'/>
+ <pointer-type-def type-id='type-id-69' size-in-bits='64' id='type-id-64'/>
+ <typedef-decl name='uu_list_t' type-id='type-id-63' id='type-id-68'/>
+ <class-decl name='uu_list_node_impl' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-70'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='uln_next' type-id='type-id-80' visibility='default' filepath='../../include/libuutil_impl.h' line='68' column='1'/>
+ <var-decl name='uln_next' type-id='type-id-71' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='uln_prev' type-id='type-id-80' visibility='default' filepath='../../include/libuutil_impl.h' line='69' column='1'/>
+ <var-decl name='uln_prev' type-id='type-id-71' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-79' size-in-bits='64' id='type-id-80'/>
- <typedef-decl name='uu_list_node_impl_t' type-id='type-id-79' filepath='../../include/libuutil_impl.h' line='70' column='1' id='type-id-77'/>
- <class-decl name='uu_list_walk' size-in-bits='320' is-struct='yes' visibility='default' filepath='../../include/libuutil_impl.h' line='72' column='1' id='type-id-81'>
+ <pointer-type-def type-id='type-id-70' size-in-bits='64' id='type-id-71'/>
+ <typedef-decl name='uu_list_node_impl_t' type-id='type-id-70' id='type-id-65'/>
+ <class-decl name='uu_list_walk' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-72'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='ulw_next' type-id='type-id-82' visibility='default' filepath='../../include/libuutil_impl.h' line='73' column='1'/>
+ <var-decl name='ulw_next' type-id='type-id-73' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='ulw_prev' type-id='type-id-82' visibility='default' filepath='../../include/libuutil_impl.h' line='74' column='1'/>
+ <var-decl name='ulw_prev' type-id='type-id-73' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='ulw_list' type-id='type-id-83' visibility='default' filepath='../../include/libuutil_impl.h' line='76' column='1'/>
+ <var-decl name='ulw_list' type-id='type-id-74' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='ulw_dir' type-id='type-id-48' visibility='default' filepath='../../include/libuutil_impl.h' line='77' column='1'/>
+ <var-decl name='ulw_dir' type-id='type-id-47' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='200'>
- <var-decl name='ulw_robust' type-id='type-id-14' visibility='default' filepath='../../include/libuutil_impl.h' line='78' column='1'/>
+ <var-decl name='ulw_robust' type-id='type-id-12' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='ulw_next_result' type-id='type-id-84' visibility='default' filepath='../../include/libuutil_impl.h' line='79' column='1'/>
+ <var-decl name='ulw_next_result' type-id='type-id-75' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='uu_list_walk_t' type-id='type-id-81' filepath='../../include/libuutil.h' line='167' column='1' id='type-id-78'/>
- <pointer-type-def type-id='type-id-78' size-in-bits='64' id='type-id-82'/>
- <typedef-decl name='uu_list_t' type-id='type-id-76' filepath='../../include/libuutil.h' line='161' column='1' id='type-id-74'/>
- <pointer-type-def type-id='type-id-74' size-in-bits='64' id='type-id-83'/>
- <pointer-type-def type-id='type-id-77' size-in-bits='64' id='type-id-84'/>
- <function-decl name='uu_list_pool_create' mangled-name='uu_list_pool_create' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='63' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_pool_create'>
- <parameter type-id='type-id-9' name='name' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='63' column='1'/>
- <parameter type-id='type-id-4' name='objsize' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='63' column='1'/>
- <parameter type-id='type-id-4' name='nodeoffset' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='64' column='1'/>
- <parameter type-id='type-id-13' name='compare_func' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='64' column='1'/>
- <parameter type-id='type-id-52' name='flags' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='64' column='1'/>
+ <typedef-decl name='uu_list_walk_t' type-id='type-id-72' id='type-id-66'/>
+ <pointer-type-def type-id='type-id-66' size-in-bits='64' id='type-id-73'/>
+ <pointer-type-def type-id='type-id-68' size-in-bits='64' id='type-id-74'/>
+ <pointer-type-def type-id='type-id-65' size-in-bits='64' id='type-id-75'/>
+ <function-decl name='uu_list_prev' mangled-name='uu_list_prev' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_prev'>
+ <parameter type-id='type-id-74' name='lp'/>
+ <parameter type-id='type-id-6' name='elem'/>
+ <return type-id='type-id-6'/>
+ </function-decl>
+ <function-decl name='uu_list_next' mangled-name='uu_list_next' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_next'>
+ <parameter type-id='type-id-74' name='lp'/>
+ <parameter type-id='type-id-6' name='elem'/>
+ <return type-id='type-id-6'/>
+ </function-decl>
+ <function-decl name='uu_list_last' mangled-name='uu_list_last' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_last'>
+ <parameter type-id='type-id-74' name='lp'/>
+ <return type-id='type-id-6'/>
+ </function-decl>
+ <function-decl name='uu_list_first' mangled-name='uu_list_first' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_first'>
+ <parameter type-id='type-id-74' name='lp'/>
+ <return type-id='type-id-6'/>
+ </function-decl>
+ <function-decl name='uu_list_numnodes' mangled-name='uu_list_numnodes' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_numnodes'>
+ <parameter type-id='type-id-74' name='lp'/>
+ <return type-id='type-id-8'/>
+ </function-decl>
+ <function-decl name='uu_list_insert_after' mangled-name='uu_list_insert_after' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_insert_after'>
+ <parameter type-id='type-id-74' name='lp'/>
+ <parameter type-id='type-id-6' name='target'/>
+ <parameter type-id='type-id-6' name='elem'/>
+ <return type-id='type-id-22'/>
+ </function-decl>
+ <function-decl name='uu_list_insert_before' mangled-name='uu_list_insert_before' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_insert_before'>
+ <parameter type-id='type-id-74' name='lp'/>
+ <parameter type-id='type-id-6' name='target'/>
+ <parameter type-id='type-id-6' name='elem'/>
+ <return type-id='type-id-22'/>
+ </function-decl>
+ <function-decl name='uu_list_teardown' mangled-name='uu_list_teardown' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_teardown'>
+ <parameter type-id='type-id-74' name='lp'/>
+ <parameter type-id='type-id-52' name='cookie'/>
+ <return type-id='type-id-6'/>
+ </function-decl>
+ <function-decl name='uu_list_remove' mangled-name='uu_list_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_remove'>
+ <parameter type-id='type-id-74' name='lp'/>
+ <parameter type-id='type-id-6' name='elem'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='uu_list_walk' mangled-name='uu_list_walk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_walk'>
+ <parameter type-id='type-id-74' name='lp'/>
+ <parameter type-id='type-id-54' name='func'/>
+ <parameter type-id='type-id-6' name='private'/>
+ <parameter type-id='type-id-56' name='flags'/>
+ <return type-id='type-id-22'/>
+ </function-decl>
+ <function-decl name='uu_list_walk_end' mangled-name='uu_list_walk_end' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_walk_end'>
+ <parameter type-id='type-id-73' name='wp'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='uu_list_walk_next' mangled-name='uu_list_walk_next' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_walk_next'>
+ <parameter type-id='type-id-73' name='wp'/>
+ <return type-id='type-id-6'/>
+ </function-decl>
+ <function-decl name='uu_list_walk_start' mangled-name='uu_list_walk_start' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_walk_start'>
+ <parameter type-id='type-id-74' name='lp'/>
+ <parameter type-id='type-id-56' name='flags'/>
<return type-id='type-id-73'/>
</function-decl>
- <function-decl name='uu_list_pool_destroy' mangled-name='uu_list_pool_destroy' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='110' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_pool_destroy'>
- <parameter type-id='type-id-73' name='pp' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='110' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <class-decl name='uu_list_node' size-in-bits='128' is-struct='yes' visibility='default' filepath='../../include/libuutil.h' line='163' column='1' id='type-id-85'>
+ <typedef-decl name='uu_list_index_t' type-id='type-id-10' id='type-id-76'/>
+ <function-decl name='uu_list_nearest_prev' mangled-name='uu_list_nearest_prev' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_nearest_prev'>
+ <parameter type-id='type-id-74' name='lp'/>
+ <parameter type-id='type-id-76' name='idx'/>
+ <return type-id='type-id-6'/>
+ </function-decl>
+ <function-decl name='uu_list_nearest_next' mangled-name='uu_list_nearest_next' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_nearest_next'>
+ <parameter type-id='type-id-74' name='lp'/>
+ <parameter type-id='type-id-76' name='idx'/>
+ <return type-id='type-id-6'/>
+ </function-decl>
+ <pointer-type-def type-id='type-id-76' size-in-bits='64' id='type-id-77'/>
+ <function-decl name='uu_list_find' mangled-name='uu_list_find' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_find'>
+ <parameter type-id='type-id-74' name='lp'/>
+ <parameter type-id='type-id-6' name='elem'/>
+ <parameter type-id='type-id-6' name='private'/>
+ <parameter type-id='type-id-77' name='out'/>
+ <return type-id='type-id-6'/>
+ </function-decl>
+ <function-decl name='uu_list_insert' mangled-name='uu_list_insert' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_insert'>
+ <parameter type-id='type-id-74' name='lp'/>
+ <parameter type-id='type-id-6' name='elem'/>
+ <parameter type-id='type-id-76' name='idx'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='uu_list_destroy' mangled-name='uu_list_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_destroy'>
+ <parameter type-id='type-id-74' name='lp'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='uu_list_create' mangled-name='uu_list_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_create'>
+ <parameter type-id='type-id-64' name='pp'/>
+ <parameter type-id='type-id-6' name='parent'/>
+ <parameter type-id='type-id-56' name='flags'/>
+ <return type-id='type-id-74'/>
+ </function-decl>
+ <class-decl name='uu_list_node' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-78'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='uln_opaque' type-id='type-id-86' visibility='default' filepath='../../include/libuutil.h' line='164' column='1'/>
+ <var-decl name='uln_opaque' type-id='type-id-79' visibility='default'/>
</data-member>
</class-decl>
- <array-type-def dimensions='1' type-id='type-id-35' size-in-bits='128' id='type-id-86'>
- <subrange length='2' type-id='type-id-18' id='type-id-43'/>
+ <array-type-def dimensions='1' type-id='type-id-10' size-in-bits='128' id='type-id-79'>
+ <subrange length='2' type-id='type-id-7' id='type-id-42'/>
</array-type-def>
- <typedef-decl name='uu_list_node_t' type-id='type-id-85' filepath='../../include/libuutil.h' line='165' column='1' id='type-id-87'/>
- <pointer-type-def type-id='type-id-87' size-in-bits='64' id='type-id-88'/>
- <function-decl name='uu_list_node_init' mangled-name='uu_list_node_init' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='133' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_node_init'>
- <parameter type-id='type-id-2' name='base' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='133' column='1'/>
- <parameter type-id='type-id-88' name='np_arg' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='133' column='1'/>
- <parameter type-id='type-id-73' name='pp' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='133' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='uu_list_node_fini' mangled-name='uu_list_node_fini' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='157' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_node_fini'>
- <parameter type-id='type-id-2' name='base' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='157' column='1'/>
- <parameter type-id='type-id-88' name='np_arg' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='157' column='1'/>
- <parameter type-id='type-id-73' name='pp' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='157' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='uu_list_create' mangled-name='uu_list_create' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='180' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_create'>
- <parameter type-id='type-id-73' name='pp' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='180' column='1'/>
- <parameter type-id='type-id-2' name='parent' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='180' column='1'/>
- <parameter type-id='type-id-52' name='flags' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='180' column='1'/>
- <return type-id='type-id-83'/>
- </function-decl>
- <function-decl name='uu_list_destroy' mangled-name='uu_list_destroy' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='231' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_destroy'>
- <parameter type-id='type-id-83' name='lp' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='231' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <typedef-decl name='uu_list_index_t' type-id='type-id-35' filepath='../../include/libuutil.h' line='169' column='1' id='type-id-89'/>
- <function-decl name='uu_list_insert' mangled-name='uu_list_insert' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='292' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_insert'>
- <parameter type-id='type-id-83' name='lp' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='292' column='1'/>
- <parameter type-id='type-id-2' name='elem' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='292' column='1'/>
- <parameter type-id='type-id-89' name='idx' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='292' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <pointer-type-def type-id='type-id-89' size-in-bits='64' id='type-id-90'/>
- <function-decl name='uu_list_find' mangled-name='uu_list_find' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='315' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_find'>
- <parameter type-id='type-id-83' name='lp' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='315' column='1'/>
- <parameter type-id='type-id-2' name='elem' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='315' column='1'/>
- <parameter type-id='type-id-2' name='private' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='315' column='1'/>
- <parameter type-id='type-id-90' name='out' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='315' column='1'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='uu_list_nearest_next' mangled-name='uu_list_nearest_next' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='348' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_nearest_next'>
- <parameter type-id='type-id-83' name='lp' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='348' column='1'/>
- <parameter type-id='type-id-89' name='idx' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='348' column='1'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='uu_list_nearest_prev' mangled-name='uu_list_nearest_prev' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='373' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_nearest_prev'>
- <parameter type-id='type-id-83' name='lp' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='348' column='1'/>
- <parameter type-id='type-id-89' name='idx' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='348' column='1'/>
- <return type-id='type-id-2'/>
+ <typedef-decl name='uu_list_node_t' type-id='type-id-78' id='type-id-80'/>
+ <pointer-type-def type-id='type-id-80' size-in-bits='64' id='type-id-81'/>
+ <function-decl name='uu_list_node_fini' mangled-name='uu_list_node_fini' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_node_fini'>
+ <parameter type-id='type-id-6' name='base'/>
+ <parameter type-id='type-id-81' name='np_arg'/>
+ <parameter type-id='type-id-64' name='pp'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='uu_list_node_init' mangled-name='uu_list_node_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_node_init'>
+ <parameter type-id='type-id-6' name='base'/>
+ <parameter type-id='type-id-81' name='np_arg'/>
+ <parameter type-id='type-id-64' name='pp'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='uu_list_pool_destroy' mangled-name='uu_list_pool_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_pool_destroy'>
+ <parameter type-id='type-id-64' name='pp'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='uu_list_pool_create' mangled-name='uu_list_pool_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_pool_create'>
+ <parameter type-id='type-id-4' name='name'/>
+ <parameter type-id='type-id-8' name='objsize'/>
+ <parameter type-id='type-id-8' name='nodeoffset'/>
+ <parameter type-id='type-id-17' name='compare_func'/>
+ <parameter type-id='type-id-56' name='flags'/>
+ <return type-id='type-id-64'/>
</function-decl>
- <function-decl name='uu_list_walk_start' mangled-name='uu_list_walk_start' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='456' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_walk_start'>
- <parameter type-id='type-id-83' name='lp' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='456' column='1'/>
- <parameter type-id='type-id-52' name='flags' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='456' column='1'/>
- <return type-id='type-id-82'/>
- </function-decl>
- <function-decl name='uu_list_walk_next' mangled-name='uu_list_walk_next' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='476' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_walk_next'>
- <parameter type-id='type-id-82' name='wp' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='476' column='1'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='uu_list_walk_end' mangled-name='uu_list_walk_end' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='488' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_walk_end'>
- <parameter type-id='type-id-82' name='wp' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='488' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='uu_list_walk' mangled-name='uu_list_walk' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='495' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_walk'>
- <parameter type-id='type-id-83' name='lp' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='495' column='1'/>
- <parameter type-id='type-id-66' name='func' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='495' column='1'/>
- <parameter type-id='type-id-2' name='private' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='495' column='1'/>
- <parameter type-id='type-id-52' name='flags' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='495' column='1'/>
- <return type-id='type-id-20'/>
- </function-decl>
- <function-decl name='uu_list_remove' mangled-name='uu_list_remove' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='540' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_remove'>
- <parameter type-id='type-id-83' name='lp' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='540' column='1'/>
- <parameter type-id='type-id-2' name='elem' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='540' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='uu_list_teardown' mangled-name='uu_list_teardown' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='580' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_teardown'>
- <parameter type-id='type-id-83' name='lp' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='580' column='1'/>
- <parameter type-id='type-id-67' name='cookie' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='580' column='1'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='uu_list_first' mangled-name='uu_list_first' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='656' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_first'>
- <parameter type-id='type-id-83' name='lp' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='656' column='1'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='uu_list_insert_before' mangled-name='uu_list_insert_before' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='598' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_insert_before'>
- <parameter type-id='type-id-83' name='lp' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='598' column='1'/>
- <parameter type-id='type-id-2' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='598' column='1'/>
- <parameter type-id='type-id-2' name='elem' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='598' column='1'/>
- <return type-id='type-id-20'/>
- </function-decl>
- <function-decl name='uu_list_insert_after' mangled-name='uu_list_insert_after' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='624' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_insert_after'>
- <parameter type-id='type-id-83' name='lp' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='598' column='1'/>
- <parameter type-id='type-id-2' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='598' column='1'/>
- <parameter type-id='type-id-2' name='elem' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='598' column='1'/>
- <return type-id='type-id-20'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='uu_misc.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libuutil' language='LANG_C99'>
+ <function-decl name='uu_panic' mangled-name='uu_panic' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_panic'>
+ <parameter type-id='type-id-4' name='format'/>
+ <parameter is-variadic='yes'/>
+ <return type-id='type-id-5'/>
</function-decl>
- <function-decl name='uu_list_numnodes' mangled-name='uu_list_numnodes' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='650' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_numnodes'>
- <parameter type-id='type-id-83' name='lp' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='650' column='1'/>
+ <function-decl name='uu_strerror' mangled-name='uu_strerror' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_strerror'>
+ <parameter type-id='type-id-56' name='code'/>
<return type-id='type-id-4'/>
</function-decl>
- <function-decl name='uu_list_last' mangled-name='uu_list_last' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='665' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_last'>
- <parameter type-id='type-id-83' name='lp' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='656' column='1'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='uu_list_next' mangled-name='uu_list_next' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='674' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_next'>
- <parameter type-id='type-id-83' name='lp' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='674' column='1'/>
- <parameter type-id='type-id-2' name='elem' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='674' column='1'/>
- <return type-id='type-id-2'/>
+ <function-decl name='uu_error' mangled-name='uu_error' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_error'>
+ <return type-id='type-id-56'/>
</function-decl>
- <function-decl name='uu_list_prev' mangled-name='uu_list_prev' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='685' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_prev'>
- <parameter type-id='type-id-83' name='lp' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='674' column='1'/>
- <parameter type-id='type-id-2' name='elem' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='674' column='1'/>
- <return type-id='type-id-2'/>
+ <function-decl name='uu_set_error' mangled-name='uu_set_error' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_set_error'>
+ <parameter type-id='type-id-62' name='code'/>
+ <return type-id='type-id-5'/>
</function-decl>
- <function-decl name='uu_list_lockup' mangled-name='uu_list_lockup' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='699' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_lockup'>
- <return type-id='type-id-1'/>
+ <function-decl name='pthread_atfork' mangled-name='pthread_atfork' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
- <function-decl name='uu_list_release' mangled-name='uu_list_release' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_list.c' line='710' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_release'>
- <return type-id='type-id-1'/>
+ <function-decl name='__vfprintf_chk' mangled-name='__vfprintf_chk' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='uu_misc.c' comp-dir-path='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil' language='LANG_C99'>
- <function-decl name='uu_set_error' mangled-name='uu_set_error' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_misc.c' line='73' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_set_error'>
- <parameter type-id='type-id-71' name='code' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_misc.c' line='73' column='1'/>
- <return type-id='type-id-1'/>
+ <function-decl name='pthread_self' mangled-name='pthread_self' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
- <pointer-type-def type-id='type-id-5' size-in-bits='64' id='type-id-91'/>
- <pointer-type-def type-id='type-id-92' size-in-bits='64' id='type-id-93'/>
- <function-decl name='pthread_key_create' filepath='/usr/include/pthread.h' line='1112' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <parameter type-id='type-id-93'/>
- <return type-id='type-id-20'/>
+ <function-decl name='pause' mangled-name='pause' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
- <function-decl name='pthread_setspecific' filepath='/usr/include/pthread.h' line='1123' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-5'/>
- <parameter type-id='type-id-2'/>
- <return type-id='type-id-20'/>
+ <function-decl name='abort' mangled-name='abort' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
- <function-decl name='uu_error' mangled-name='uu_error' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_misc.c' line='102' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_error'>
- <return type-id='type-id-52'/>
+ <function-decl name='dcgettext' mangled-name='dcgettext' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
- <function-decl name='pthread_getspecific' filepath='/usr/include/pthread.h' line='1120' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-5'/>
- <return type-id='type-id-2'/>
+ <function-decl name='__errno_location' mangled-name='__errno_location' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
- <function-decl name='uu_strerror' mangled-name='uu_strerror' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_misc.c' line='118' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_strerror'>
- <parameter type-id='type-id-52' name='code' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_misc.c' line='118' column='1'/>
- <return type-id='type-id-9'/>
+ <function-decl name='pthread_getspecific' mangled-name='pthread_getspecific' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
- <function-decl name='dcgettext' filepath='/usr/include/libintl.h' line='51' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-9'/>
- <parameter type-id='type-id-9'/>
- <parameter type-id='type-id-20'/>
- <return type-id='type-id-7'/>
- </function-decl>
- <function-decl name='pthread_self' filepath='/usr/include/pthread.h' line='276' column='1' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-3'/>
+ <function-decl name='pthread_setspecific' mangled-name='pthread_setspecific' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
- <function-decl name='pause' filepath='/usr/include/unistd.h' line='469' column='1' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-20'/>
+ <function-decl name='pthread_key_create' mangled-name='pthread_key_create' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
- <pointer-type-def type-id='type-id-94' size-in-bits='64' id='type-id-95'/>
- <function-decl name='pthread_atfork' filepath='/usr/include/pthread.h' line='1146' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-95'/>
- <parameter type-id='type-id-95'/>
- <parameter type-id='type-id-95'/>
- <return type-id='type-id-20'/>
- </function-decl>
- <function-type size-in-bits='64' id='type-id-94'>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-92'>
- <parameter type-id='type-id-2'/>
- <return type-id='type-id-1'/>
- </function-type>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='uu_pname.c' comp-dir-path='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil' language='LANG_C99'>
- <var-decl name='uu_exit_ok_value' type-id='type-id-20' mangled-name='uu_exit_ok_value' visibility='default' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_pname.c' line='49' column='1' elf-symbol-id='uu_exit_ok_value'/>
- <var-decl name='uu_exit_fatal_value' type-id='type-id-20' mangled-name='uu_exit_fatal_value' visibility='default' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_pname.c' line='50' column='1' elf-symbol-id='uu_exit_fatal_value'/>
- <var-decl name='uu_exit_usage_value' type-id='type-id-20' mangled-name='uu_exit_usage_value' visibility='default' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_pname.c' line='51' column='1' elf-symbol-id='uu_exit_usage_value'/>
- <pointer-type-def type-id='type-id-20' size-in-bits='64' id='type-id-96'/>
- <function-decl name='uu_exit_ok' mangled-name='uu_exit_ok' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_pname.c' line='54' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_exit_ok'>
- <return type-id='type-id-96'/>
- </function-decl>
- <function-decl name='uu_exit_fatal' mangled-name='uu_exit_fatal' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_pname.c' line='60' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_exit_fatal'>
- <return type-id='type-id-96'/>
+ <abi-instr version='1.0' address-size='64' path='uu_pname.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libuutil' language='LANG_C99'>
+ <var-decl name='uu_exit_ok_value' type-id='type-id-22' mangled-name='uu_exit_ok_value' visibility='default' elf-symbol-id='uu_exit_ok_value'/>
+ <var-decl name='uu_exit_fatal_value' type-id='type-id-22' mangled-name='uu_exit_fatal_value' visibility='default' elf-symbol-id='uu_exit_fatal_value'/>
+ <var-decl name='uu_exit_usage_value' type-id='type-id-22' mangled-name='uu_exit_usage_value' visibility='default' elf-symbol-id='uu_exit_usage_value'/>
+ <function-decl name='uu_getpname' mangled-name='uu_getpname' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_getpname'>
+ <return type-id='type-id-4'/>
</function-decl>
- <function-decl name='uu_exit_usage' mangled-name='uu_exit_usage' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_pname.c' line='66' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_exit_usage'>
- <return type-id='type-id-96'/>
+ <function-decl name='uu_setpname' mangled-name='uu_setpname' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_setpname'>
+ <parameter type-id='type-id-2' name='arg0'/>
+ <return type-id='type-id-4'/>
</function-decl>
- <function-decl name='uu_alt_exit' mangled-name='uu_alt_exit' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_pname.c' line='72' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_alt_exit'>
- <parameter type-id='type-id-20' name='profile' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_pname.c' line='72' column='1'/>
- <return type-id='type-id-1'/>
+ <function-decl name='uu_xdie' mangled-name='uu_xdie' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_xdie'>
+ <parameter type-id='type-id-22' name='status'/>
+ <parameter type-id='type-id-4' name='format'/>
+ <parameter is-variadic='yes'/>
+ <return type-id='type-id-5'/>
</function-decl>
- <class-decl name='__va_list_tag' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-97'>
+ <class-decl name='__va_list_tag' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-82'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='gp_offset' type-id='type-id-5' visibility='default' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_pname.c' line='86' column='1'/>
+ <var-decl name='gp_offset' type-id='type-id-31' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='fp_offset' type-id='type-id-5' visibility='default' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_pname.c' line='86' column='1'/>
+ <var-decl name='fp_offset' type-id='type-id-31' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='overflow_arg_area' type-id='type-id-2' visibility='default' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_pname.c' line='86' column='1'/>
+ <var-decl name='overflow_arg_area' type-id='type-id-6' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='reg_save_area' type-id='type-id-2' visibility='default' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_pname.c' line='86' column='1'/>
+ <var-decl name='reg_save_area' type-id='type-id-6' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-97' size-in-bits='64' id='type-id-98'/>
- <function-decl name='uu_vwarn' mangled-name='uu_vwarn' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_pname.c' line='101' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_vwarn'>
- <parameter type-id='type-id-9' name='format' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_pname.c' line='101' column='1'/>
- <parameter type-id='type-id-98' name='alist' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_pname.c' line='101' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='uu_warn' mangled-name='uu_warn' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_pname.c' line='108' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_warn'>
- <parameter type-id='type-id-9' name='format' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_pname.c' line='108' column='1'/>
+ <pointer-type-def type-id='type-id-82' size-in-bits='64' id='type-id-83'/>
+ <function-decl name='uu_vxdie' mangled-name='uu_vxdie' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_vxdie'>
+ <parameter type-id='type-id-22' name='status'/>
+ <parameter type-id='type-id-4' name='format'/>
+ <parameter type-id='type-id-83' name='alist'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='uu_die' mangled-name='uu_die' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_die'>
+ <parameter type-id='type-id-4' name='format'/>
<parameter is-variadic='yes'/>
- <return type-id='type-id-1'/>
+ <return type-id='type-id-5'/>
</function-decl>
- <function-decl name='uu_vdie' mangled-name='uu_vdie' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_pname.c' line='135' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_vdie'>
- <parameter type-id='type-id-9' name='format' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_pname.c' line='135' column='1'/>
- <parameter type-id='type-id-98' name='alist' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_pname.c' line='135' column='1'/>
- <return type-id='type-id-1'/>
+ <function-decl name='uu_vdie' mangled-name='uu_vdie' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_vdie'>
+ <parameter type-id='type-id-4' name='format'/>
+ <parameter type-id='type-id-83' name='alist'/>
+ <return type-id='type-id-5'/>
</function-decl>
- <function-decl name='uu_die' mangled-name='uu_die' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_pname.c' line='142' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_die'>
- <parameter type-id='type-id-9' name='format' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_pname.c' line='142' column='1'/>
+ <function-decl name='uu_warn' mangled-name='uu_warn' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_warn'>
+ <parameter type-id='type-id-4' name='format'/>
<parameter is-variadic='yes'/>
- <return type-id='type-id-1'/>
+ <return type-id='type-id-5'/>
</function-decl>
- <function-decl name='uu_vxdie' mangled-name='uu_vxdie' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_pname.c' line='151' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_vxdie'>
- <parameter type-id='type-id-20' name='status' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_pname.c' line='151' column='1'/>
- <parameter type-id='type-id-9' name='format' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_pname.c' line='151' column='1'/>
- <parameter type-id='type-id-98' name='alist' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_pname.c' line='151' column='1'/>
- <return type-id='type-id-1'/>
+ <function-decl name='uu_vwarn' mangled-name='uu_vwarn' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_vwarn'>
+ <parameter type-id='type-id-4' name='format'/>
+ <parameter type-id='type-id-83' name='alist'/>
+ <return type-id='type-id-5'/>
</function-decl>
- <function-decl name='uu_xdie' mangled-name='uu_xdie' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_pname.c' line='158' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_xdie'>
- <parameter type-id='type-id-20' name='status' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_pname.c' line='158' column='1'/>
- <parameter type-id='type-id-9' name='format' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_pname.c' line='158' column='1'/>
- <parameter is-variadic='yes'/>
- <return type-id='type-id-1'/>
+ <function-decl name='uu_alt_exit' mangled-name='uu_alt_exit' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_alt_exit'>
+ <parameter type-id='type-id-22' name='profile'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <pointer-type-def type-id='type-id-22' size-in-bits='64' id='type-id-84'/>
+ <function-decl name='uu_exit_usage' mangled-name='uu_exit_usage' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_exit_usage'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='uu_setpname' mangled-name='uu_setpname' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_pname.c' line='167' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_setpname'>
- <parameter type-id='type-id-7' name='arg0' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_pname.c' line='167' column='1'/>
- <return type-id='type-id-9'/>
+ <function-decl name='uu_exit_fatal' mangled-name='uu_exit_fatal' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_exit_fatal'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='getexecname' filepath='../../lib/libspl/include/stdlib.h' line='32' column='1' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-9'/>
+ <function-decl name='uu_exit_ok' mangled-name='uu_exit_ok' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_exit_ok'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='uu_getpname' mangled-name='uu_getpname' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_pname.c' line='204' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_getpname'>
- <return type-id='type-id-9'/>
+ <function-decl name='strrchr' mangled-name='strrchr' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='getexecname' mangled-name='getexecname' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='exit' mangled-name='exit' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='__fprintf_chk' mangled-name='__fprintf_chk' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='strerror' mangled-name='strerror' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='uu_string.c' comp-dir-path='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil' language='LANG_C99'>
- <type-decl name='unnamed-enum-underlying-type' is-anonymous='yes' size-in-bits='32' alignment-in-bits='32' id='type-id-99'/>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' filepath='../../lib/libspl/include/sys/stdtypes.h' line='26' column='1' id='type-id-100'>
- <underlying-type type-id='type-id-99'/>
+ <abi-instr version='1.0' address-size='64' path='uu_string.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libuutil' language='LANG_C99'>
+ <type-decl name='unnamed-enum-underlying-type' is-anonymous='yes' size-in-bits='32' alignment-in-bits='32' id='type-id-85'/>
+ <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-86'>
+ <underlying-type type-id='type-id-85'/>
<enumerator name='B_FALSE' value='0'/>
<enumerator name='B_TRUE' value='1'/>
</enum-decl>
- <typedef-decl name='boolean_t' type-id='type-id-100' filepath='../../lib/libspl/include/sys/stdtypes.h' line='29' column='1' id='type-id-101'/>
- <function-decl name='uu_streq' mangled-name='uu_streq' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_string.c' line='37' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_streq'>
- <parameter type-id='type-id-9' name='a' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_string.c' line='37' column='1'/>
- <parameter type-id='type-id-9' name='b' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_string.c' line='37' column='1'/>
- <return type-id='type-id-101'/>
- </function-decl>
- <function-decl name='uu_strcaseeq' mangled-name='uu_strcaseeq' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_string.c' line='44' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_strcaseeq'>
- <parameter type-id='type-id-9' name='a' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_string.c' line='37' column='1'/>
- <parameter type-id='type-id-9' name='b' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_string.c' line='37' column='1'/>
- <return type-id='type-id-101'/>
- </function-decl>
- <function-decl name='uu_strbw' mangled-name='uu_strbw' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_string.c' line='51' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_strbw'>
- <parameter type-id='type-id-9' name='a' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_string.c' line='37' column='1'/>
- <parameter type-id='type-id-9' name='b' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libuutil/uu_string.c' line='37' column='1'/>
- <return type-id='type-id-101'/>
+ <typedef-decl name='boolean_t' type-id='type-id-86' id='type-id-87'/>
+ <function-decl name='uu_strbw' mangled-name='uu_strbw' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_strbw'>
+ <parameter type-id='type-id-4' name='a'/>
+ <parameter type-id='type-id-4' name='b'/>
+ <return type-id='type-id-87'/>
</function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='../../module/avl/avl.c' comp-dir-path='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libavl' language='LANG_C99'>
- <typedef-decl name='avl_tree_t' type-id='type-id-36' filepath='../../include/sys/avl.h' line='119' column='1' id='type-id-102'/>
- <pointer-type-def type-id='type-id-102' size-in-bits='64' id='type-id-103'/>
- <function-decl name='avl_walk' mangled-name='avl_walk' filepath='../../module/avl/avl.c' line='140' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_walk'>
- <parameter type-id='type-id-103' name='tree' filepath='../../module/avl/avl.c' line='140' column='1'/>
- <parameter type-id='type-id-2' name='oldnode' filepath='../../module/avl/avl.c' line='140' column='1'/>
- <parameter type-id='type-id-20' name='left' filepath='../../module/avl/avl.c' line='140' column='1'/>
- <return type-id='type-id-2'/>
+ <function-decl name='uu_strcaseeq' mangled-name='uu_strcaseeq' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_strcaseeq'>
+ <parameter type-id='type-id-4' name='a'/>
+ <parameter type-id='type-id-4' name='b'/>
+ <return type-id='type-id-87'/>
</function-decl>
- <function-decl name='avl_first' mangled-name='avl_first' filepath='../../module/avl/avl.c' line='187' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_first'>
- <parameter type-id='type-id-103' name='tree' filepath='../../module/avl/avl.c' line='187' column='1'/>
- <return type-id='type-id-2'/>
+ <function-decl name='uu_streq' mangled-name='uu_streq' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_streq'>
+ <parameter type-id='type-id-4' name='a'/>
+ <parameter type-id='type-id-4' name='b'/>
+ <return type-id='type-id-87'/>
</function-decl>
- <function-decl name='avl_last' mangled-name='avl_last' filepath='../../module/avl/avl.c' line='206' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_last'>
- <parameter type-id='type-id-103' name='tree' filepath='../../module/avl/avl.c' line='187' column='1'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <typedef-decl name='avl_index_t' type-id='type-id-35' filepath='../../include/sys/avl.h' line='130' column='1' id='type-id-104'/>
- <function-decl name='avl_nearest' mangled-name='avl_nearest' filepath='../../module/avl/avl.c' line='230' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_nearest'>
- <parameter type-id='type-id-103' name='tree' filepath='../../module/avl/avl.c' line='230' column='1'/>
- <parameter type-id='type-id-104' name='where' filepath='../../module/avl/avl.c' line='230' column='1'/>
- <parameter type-id='type-id-20' name='direction' filepath='../../module/avl/avl.c' line='230' column='1'/>
- <return type-id='type-id-2'/>
+ <function-decl name='strncmp' mangled-name='strncmp' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
- <pointer-type-def type-id='type-id-104' size-in-bits='64' id='type-id-105'/>
- <function-decl name='avl_find' mangled-name='avl_find' filepath='../../module/avl/avl.c' line='259' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_find'>
- <parameter type-id='type-id-103' name='tree' filepath='../../module/avl/avl.c' line='259' column='1'/>
- <parameter type-id='type-id-2' name='value' filepath='../../module/avl/avl.c' line='259' column='1'/>
- <parameter type-id='type-id-105' name='where' filepath='../../module/avl/avl.c' line='259' column='1'/>
- <return type-id='type-id-2'/>
+ <function-decl name='strcasecmp' mangled-name='strcasecmp' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
- <function-decl name='avl_insert' mangled-name='avl_insert' filepath='../../module/avl/avl.c' line='486' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_insert'>
- <parameter type-id='type-id-103' name='tree' filepath='../../module/avl/avl.c' line='486' column='1'/>
- <parameter type-id='type-id-2' name='new_data' filepath='../../module/avl/avl.c' line='486' column='1'/>
- <parameter type-id='type-id-104' name='where' filepath='../../module/avl/avl.c' line='486' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='avl_insert_here' mangled-name='avl_insert_here' filepath='../../module/avl/avl.c' line='575' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_insert_here'>
- <parameter type-id='type-id-103' name='tree' filepath='../../module/avl/avl.c' line='576' column='1'/>
- <parameter type-id='type-id-2' name='new_data' filepath='../../module/avl/avl.c' line='577' column='1'/>
- <parameter type-id='type-id-2' name='here' filepath='../../module/avl/avl.c' line='578' column='1'/>
- <parameter type-id='type-id-20' name='direction' filepath='../../module/avl/avl.c' line='579' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='avl_add' mangled-name='avl_add' filepath='../../module/avl/avl.c' line='636' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_add'>
- <parameter type-id='type-id-103' name='tree' filepath='../../module/avl/avl.c' line='636' column='1'/>
- <parameter type-id='type-id-2' name='new_node' filepath='../../module/avl/avl.c' line='636' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='libspl_assertf' filepath='../../lib/libspl/include/assert.h' line='40' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-9'/>
- <parameter type-id='type-id-9'/>
- <parameter type-id='type-id-20'/>
- <parameter type-id='type-id-9'/>
- <parameter is-variadic='yes'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='avl_remove' mangled-name='avl_remove' filepath='../../module/avl/avl.c' line='669' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_remove'>
- <parameter type-id='type-id-103' name='tree' filepath='../../module/avl/avl.c' line='669' column='1'/>
- <parameter type-id='type-id-2' name='data' filepath='../../module/avl/avl.c' line='669' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='avl_update_lt' mangled-name='avl_update_lt' filepath='../../module/avl/avl.c' line='817' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_update_lt'>
- <parameter type-id='type-id-103' name='t' filepath='../../module/avl/avl.c' line='817' column='1'/>
- <parameter type-id='type-id-2' name='obj' filepath='../../module/avl/avl.c' line='817' column='1'/>
- <return type-id='type-id-101'/>
- </function-decl>
- <function-decl name='avl_update_gt' mangled-name='avl_update_gt' filepath='../../module/avl/avl.c' line='834' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_update_gt'>
- <parameter type-id='type-id-103' name='t' filepath='../../module/avl/avl.c' line='817' column='1'/>
- <parameter type-id='type-id-2' name='obj' filepath='../../module/avl/avl.c' line='817' column='1'/>
- <return type-id='type-id-101'/>
- </function-decl>
- <function-decl name='avl_update' mangled-name='avl_update' filepath='../../module/avl/avl.c' line='851' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_update'>
- <parameter type-id='type-id-103' name='t' filepath='../../module/avl/avl.c' line='851' column='1'/>
- <parameter type-id='type-id-2' name='obj' filepath='../../module/avl/avl.c' line='851' column='1'/>
- <return type-id='type-id-101'/>
- </function-decl>
- <function-decl name='avl_swap' mangled-name='avl_swap' filepath='../../module/avl/avl.c' line='871' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_swap'>
- <parameter type-id='type-id-103' name='tree1' filepath='../../module/avl/avl.c' line='871' column='1'/>
- <parameter type-id='type-id-103' name='tree2' filepath='../../module/avl/avl.c' line='871' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='avl_create' mangled-name='avl_create' filepath='../../module/avl/avl.c' line='892' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_create'>
- <parameter type-id='type-id-103' name='tree' filepath='../../module/avl/avl.c' line='892' column='1'/>
- <parameter type-id='type-id-39' name='compar' filepath='../../module/avl/avl.c' line='892' column='1'/>
- <parameter type-id='type-id-4' name='size' filepath='../../module/avl/avl.c' line='893' column='1'/>
- <parameter type-id='type-id-4' name='offset' filepath='../../module/avl/avl.c' line='893' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='avl_destroy' mangled-name='avl_destroy' filepath='../../module/avl/avl.c' line='915' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_destroy'>
- <parameter type-id='type-id-103' name='tree' filepath='../../module/avl/avl.c' line='915' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='avl_numnodes' mangled-name='avl_numnodes' filepath='../../module/avl/avl.c' line='927' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_numnodes'>
- <parameter type-id='type-id-103' name='tree' filepath='../../module/avl/avl.c' line='927' column='1'/>
- <return type-id='type-id-40'/>
- </function-decl>
- <function-decl name='avl_is_empty' mangled-name='avl_is_empty' filepath='../../module/avl/avl.c' line='934' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_is_empty'>
- <parameter type-id='type-id-103' name='tree' filepath='../../module/avl/avl.c' line='934' column='1'/>
- <return type-id='type-id-101'/>
- </function-decl>
- <function-decl name='avl_destroy_nodes' mangled-name='avl_destroy_nodes' filepath='../../module/avl/avl.c' line='962' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_destroy_nodes'>
- <parameter type-id='type-id-103' name='tree' filepath='../../module/avl/avl.c' line='962' column='1'/>
- <parameter type-id='type-id-67' name='cookie' filepath='../../module/avl/avl.c' line='962' column='1'/>
- <return type-id='type-id-2'/>
+ <function-decl name='strcmp' mangled-name='strcmp' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='atomic.c' comp-dir-path='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl' language='LANG_C99'>
- <qualified-type-def type-id='type-id-14' volatile='yes' id='type-id-106'/>
- <pointer-type-def type-id='type-id-106' size-in-bits='64' id='type-id-107'/>
- <function-decl name='atomic_inc_8' mangled-name='atomic_inc_8' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='39' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_8'>
- <parameter type-id='type-id-107' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='39' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <typedef-decl name='uchar_t' type-id='type-id-23' filepath='../../lib/libspl/include/sys/stdtypes.h' line='31' column='1' id='type-id-108'/>
- <qualified-type-def type-id='type-id-108' volatile='yes' id='type-id-109'/>
- <pointer-type-def type-id='type-id-109' size-in-bits='64' id='type-id-110'/>
- <function-decl name='atomic_inc_uchar' mangled-name='atomic_inc_uchar' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='40' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_uchar'>
- <parameter type-id='type-id-110' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='40' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <type-decl name='unsigned short int' size-in-bits='16' id='type-id-111'/>
- <typedef-decl name='__uint16_t' type-id='type-id-111' filepath='/usr/include/x86_64-linux-gnu/bits/types.h' line='39' column='1' id='type-id-112'/>
- <typedef-decl name='uint16_t' type-id='type-id-112' filepath='/usr/include/x86_64-linux-gnu/bits/stdint-uintn.h' line='25' column='1' id='type-id-113'/>
- <qualified-type-def type-id='type-id-113' volatile='yes' id='type-id-114'/>
- <pointer-type-def type-id='type-id-114' size-in-bits='64' id='type-id-115'/>
- <function-decl name='atomic_inc_16' mangled-name='atomic_inc_16' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='41' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_16'>
- <parameter type-id='type-id-115' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='41' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <typedef-decl name='ushort_t' type-id='type-id-111' filepath='../../lib/libspl/include/sys/stdtypes.h' line='32' column='1' id='type-id-116'/>
- <qualified-type-def type-id='type-id-116' volatile='yes' id='type-id-117'/>
- <pointer-type-def type-id='type-id-117' size-in-bits='64' id='type-id-118'/>
- <function-decl name='atomic_inc_ushort' mangled-name='atomic_inc_ushort' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='42' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_ushort'>
- <parameter type-id='type-id-118' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='42' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <qualified-type-def type-id='type-id-52' volatile='yes' id='type-id-119'/>
- <pointer-type-def type-id='type-id-119' size-in-bits='64' id='type-id-120'/>
- <function-decl name='atomic_inc_32' mangled-name='atomic_inc_32' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='43' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_32'>
- <parameter type-id='type-id-120' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='43' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <qualified-type-def type-id='type-id-71' volatile='yes' id='type-id-121'/>
- <pointer-type-def type-id='type-id-121' size-in-bits='64' id='type-id-122'/>
- <function-decl name='atomic_inc_uint' mangled-name='atomic_inc_uint' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='44' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_uint'>
- <parameter type-id='type-id-122' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='44' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <qualified-type-def type-id='type-id-40' volatile='yes' id='type-id-123'/>
- <pointer-type-def type-id='type-id-123' size-in-bits='64' id='type-id-124'/>
- <function-decl name='atomic_inc_ulong' mangled-name='atomic_inc_ulong' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='45' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_ulong'>
- <parameter type-id='type-id-124' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='45' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <typedef-decl name='__uint64_t' type-id='type-id-3' filepath='/usr/include/x86_64-linux-gnu/bits/types.h' line='44' column='1' id='type-id-125'/>
- <typedef-decl name='uint64_t' type-id='type-id-125' filepath='/usr/include/x86_64-linux-gnu/bits/stdint-uintn.h' line='27' column='1' id='type-id-126'/>
- <qualified-type-def type-id='type-id-126' volatile='yes' id='type-id-127'/>
- <pointer-type-def type-id='type-id-127' size-in-bits='64' id='type-id-128'/>
- <function-decl name='atomic_inc_64' mangled-name='atomic_inc_64' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='46' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_64'>
- <parameter type-id='type-id-128' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='46' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='atomic_dec_8' mangled-name='atomic_dec_8' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='55' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_8'>
- <parameter type-id='type-id-107' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='39' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='atomic_dec_uchar' mangled-name='atomic_dec_uchar' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='56' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_uchar'>
- <parameter type-id='type-id-110' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='40' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='atomic_dec_16' mangled-name='atomic_dec_16' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='57' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_16'>
- <parameter type-id='type-id-115' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='41' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='atomic_dec_ushort' mangled-name='atomic_dec_ushort' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='58' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_ushort'>
- <parameter type-id='type-id-118' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='42' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='atomic_dec_32' mangled-name='atomic_dec_32' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='59' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_32'>
- <parameter type-id='type-id-120' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='43' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='atomic_dec_uint' mangled-name='atomic_dec_uint' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='60' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_uint'>
- <parameter type-id='type-id-122' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='44' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='atomic_dec_ulong' mangled-name='atomic_dec_ulong' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='61' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_ulong'>
- <parameter type-id='type-id-124' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='45' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='atomic_dec_64' mangled-name='atomic_dec_64' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='62' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_64'>
- <parameter type-id='type-id-128' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='46' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='atomic_add_8' mangled-name='atomic_add_8' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='71' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_8'>
- <parameter type-id='type-id-107' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='71' column='1'/>
- <parameter type-id='type-id-48' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='71' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='atomic_add_char' mangled-name='atomic_add_char' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='72' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_char'>
- <parameter type-id='type-id-110' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='72' column='1'/>
- <parameter type-id='type-id-49' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='72' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <typedef-decl name='__int16_t' type-id='type-id-29' filepath='/usr/include/x86_64-linux-gnu/bits/types.h' line='38' column='1' id='type-id-129'/>
- <typedef-decl name='int16_t' type-id='type-id-129' filepath='/usr/include/x86_64-linux-gnu/bits/stdint-intn.h' line='25' column='1' id='type-id-130'/>
- <function-decl name='atomic_add_16' mangled-name='atomic_add_16' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='73' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_16'>
- <parameter type-id='type-id-115' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='73' column='1'/>
- <parameter type-id='type-id-130' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='73' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='atomic_add_short' mangled-name='atomic_add_short' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='74' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_short'>
- <parameter type-id='type-id-118' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='74' column='1'/>
- <parameter type-id='type-id-29' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='74' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <typedef-decl name='__int32_t' type-id='type-id-20' filepath='/usr/include/x86_64-linux-gnu/bits/types.h' line='40' column='1' id='type-id-131'/>
- <typedef-decl name='int32_t' type-id='type-id-131' filepath='/usr/include/x86_64-linux-gnu/bits/stdint-intn.h' line='26' column='1' id='type-id-132'/>
- <function-decl name='atomic_add_32' mangled-name='atomic_add_32' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='75' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_32'>
- <parameter type-id='type-id-120' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='75' column='1'/>
- <parameter type-id='type-id-132' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='75' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='atomic_add_int' mangled-name='atomic_add_int' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='76' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_int'>
- <parameter type-id='type-id-122' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='76' column='1'/>
- <parameter type-id='type-id-20' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='76' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='atomic_add_long' mangled-name='atomic_add_long' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='77' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_long'>
- <parameter type-id='type-id-124' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='77' column='1'/>
- <parameter type-id='type-id-28' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='77' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <typedef-decl name='__int64_t' type-id='type-id-28' filepath='/usr/include/x86_64-linux-gnu/bits/types.h' line='43' column='1' id='type-id-133'/>
- <typedef-decl name='int64_t' type-id='type-id-133' filepath='/usr/include/x86_64-linux-gnu/bits/stdint-intn.h' line='27' column='1' id='type-id-134'/>
- <function-decl name='atomic_add_64' mangled-name='atomic_add_64' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='78' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_64'>
- <parameter type-id='type-id-128' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='78' column='1'/>
- <parameter type-id='type-id-134' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='78' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <qualified-type-def type-id='type-id-1' volatile='yes' id='type-id-135'/>
- <pointer-type-def type-id='type-id-135' size-in-bits='64' id='type-id-136'/>
- <typedef-decl name='__ssize_t' type-id='type-id-28' filepath='/usr/include/x86_64-linux-gnu/bits/types.h' line='191' column='1' id='type-id-137'/>
- <typedef-decl name='ssize_t' type-id='type-id-137' filepath='/usr/include/x86_64-linux-gnu/sys/types.h' line='108' column='1' id='type-id-138'/>
- <function-decl name='atomic_add_ptr' mangled-name='atomic_add_ptr' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='81' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_ptr'>
- <parameter type-id='type-id-136' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='81' column='1'/>
- <parameter type-id='type-id-138' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='81' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='atomic_sub_8' mangled-name='atomic_sub_8' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='93' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_8'>
- <parameter type-id='type-id-107' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='71' column='1'/>
- <parameter type-id='type-id-48' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='71' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='atomic_sub_char' mangled-name='atomic_sub_char' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='94' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_char'>
- <parameter type-id='type-id-110' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='72' column='1'/>
- <parameter type-id='type-id-49' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='72' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='atomic_sub_16' mangled-name='atomic_sub_16' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='95' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_16'>
- <parameter type-id='type-id-115' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='73' column='1'/>
- <parameter type-id='type-id-130' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='73' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='atomic_sub_short' mangled-name='atomic_sub_short' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='96' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_short'>
- <parameter type-id='type-id-118' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='74' column='1'/>
- <parameter type-id='type-id-29' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='74' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='atomic_sub_32' mangled-name='atomic_sub_32' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='97' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_32'>
- <parameter type-id='type-id-120' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='75' column='1'/>
- <parameter type-id='type-id-132' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='75' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='atomic_sub_int' mangled-name='atomic_sub_int' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='98' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_int'>
- <parameter type-id='type-id-122' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='76' column='1'/>
- <parameter type-id='type-id-20' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='76' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='atomic_sub_long' mangled-name='atomic_sub_long' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='99' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_long'>
- <parameter type-id='type-id-124' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='77' column='1'/>
- <parameter type-id='type-id-28' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='77' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='atomic_sub_64' mangled-name='atomic_sub_64' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='100' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_64'>
- <parameter type-id='type-id-128' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='78' column='1'/>
- <parameter type-id='type-id-134' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='78' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='atomic_sub_ptr' mangled-name='atomic_sub_ptr' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='103' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_ptr'>
- <parameter type-id='type-id-136' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='81' column='1'/>
- <parameter type-id='type-id-138' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='81' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='atomic_or_8' mangled-name='atomic_or_8' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='115' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_8'>
- <parameter type-id='type-id-107' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='115' column='1'/>
- <parameter type-id='type-id-14' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='115' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='atomic_or_uchar' mangled-name='atomic_or_uchar' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='116' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_uchar'>
- <parameter type-id='type-id-110' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='116' column='1'/>
- <parameter type-id='type-id-108' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='116' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='atomic_or_16' mangled-name='atomic_or_16' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='117' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_16'>
- <parameter type-id='type-id-115' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='117' column='1'/>
- <parameter type-id='type-id-113' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='117' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='atomic_or_ushort' mangled-name='atomic_or_ushort' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='118' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_ushort'>
- <parameter type-id='type-id-118' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='118' column='1'/>
- <parameter type-id='type-id-116' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='118' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='atomic_or_32' mangled-name='atomic_or_32' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='119' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_32'>
- <parameter type-id='type-id-120' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='119' column='1'/>
- <parameter type-id='type-id-52' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='119' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='atomic_or_uint' mangled-name='atomic_or_uint' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='120' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_uint'>
- <parameter type-id='type-id-122' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='120' column='1'/>
- <parameter type-id='type-id-71' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='120' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='atomic_or_ulong' mangled-name='atomic_or_ulong' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='121' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_ulong'>
- <parameter type-id='type-id-124' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='121' column='1'/>
- <parameter type-id='type-id-40' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='121' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='atomic_or_64' mangled-name='atomic_or_64' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='122' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_64'>
- <parameter type-id='type-id-128' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='122' column='1'/>
- <parameter type-id='type-id-126' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='122' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='atomic_and_8' mangled-name='atomic_and_8' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='131' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_8'>
- <parameter type-id='type-id-107' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='115' column='1'/>
- <parameter type-id='type-id-14' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='115' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='atomic_and_uchar' mangled-name='atomic_and_uchar' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='132' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_uchar'>
- <parameter type-id='type-id-110' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='116' column='1'/>
- <parameter type-id='type-id-108' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='116' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='atomic_and_16' mangled-name='atomic_and_16' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='133' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_16'>
- <parameter type-id='type-id-115' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='117' column='1'/>
- <parameter type-id='type-id-113' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='117' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='atomic_and_ushort' mangled-name='atomic_and_ushort' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='134' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_ushort'>
- <parameter type-id='type-id-118' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='118' column='1'/>
- <parameter type-id='type-id-116' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='118' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='atomic_and_32' mangled-name='atomic_and_32' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='135' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_32'>
- <parameter type-id='type-id-120' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='119' column='1'/>
- <parameter type-id='type-id-52' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='119' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='atomic_and_uint' mangled-name='atomic_and_uint' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='136' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_uint'>
- <parameter type-id='type-id-122' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='120' column='1'/>
- <parameter type-id='type-id-71' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='120' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='atomic_and_ulong' mangled-name='atomic_and_ulong' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='137' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_ulong'>
- <parameter type-id='type-id-124' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='121' column='1'/>
- <parameter type-id='type-id-40' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='121' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='atomic_and_64' mangled-name='atomic_and_64' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='138' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_64'>
- <parameter type-id='type-id-128' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='122' column='1'/>
- <parameter type-id='type-id-126' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='122' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='atomic_inc_8_nv' mangled-name='atomic_inc_8_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='151' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_8_nv'>
- <parameter type-id='type-id-107' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='151' column='1'/>
- <return type-id='type-id-14'/>
- </function-decl>
- <function-decl name='atomic_inc_uchar_nv' mangled-name='atomic_inc_uchar_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='152' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_uchar_nv'>
- <parameter type-id='type-id-110' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='152' column='1'/>
- <return type-id='type-id-108'/>
- </function-decl>
- <function-decl name='atomic_inc_16_nv' mangled-name='atomic_inc_16_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='153' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_16_nv'>
- <parameter type-id='type-id-115' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='153' column='1'/>
- <return type-id='type-id-113'/>
- </function-decl>
- <function-decl name='atomic_inc_ushort_nv' mangled-name='atomic_inc_ushort_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='154' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_ushort_nv'>
- <parameter type-id='type-id-118' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='154' column='1'/>
- <return type-id='type-id-116'/>
- </function-decl>
- <function-decl name='atomic_inc_32_nv' mangled-name='atomic_inc_32_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='155' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_32_nv'>
- <parameter type-id='type-id-120' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='155' column='1'/>
- <return type-id='type-id-52'/>
- </function-decl>
- <function-decl name='atomic_inc_uint_nv' mangled-name='atomic_inc_uint_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='156' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_uint_nv'>
- <parameter type-id='type-id-122' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='156' column='1'/>
- <return type-id='type-id-71'/>
- </function-decl>
- <function-decl name='atomic_inc_ulong_nv' mangled-name='atomic_inc_ulong_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='157' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_ulong_nv'>
- <parameter type-id='type-id-124' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='157' column='1'/>
- <return type-id='type-id-40'/>
- </function-decl>
- <function-decl name='atomic_inc_64_nv' mangled-name='atomic_inc_64_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='158' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_64_nv'>
- <parameter type-id='type-id-128' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='158' column='1'/>
- <return type-id='type-id-126'/>
- </function-decl>
- <function-decl name='atomic_dec_8_nv' mangled-name='atomic_dec_8_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='167' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_8_nv'>
- <parameter type-id='type-id-107' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='151' column='1'/>
- <return type-id='type-id-14'/>
- </function-decl>
- <function-decl name='atomic_dec_uchar_nv' mangled-name='atomic_dec_uchar_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='168' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_uchar_nv'>
- <parameter type-id='type-id-110' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='152' column='1'/>
- <return type-id='type-id-108'/>
- </function-decl>
- <function-decl name='atomic_dec_16_nv' mangled-name='atomic_dec_16_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='169' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_16_nv'>
- <parameter type-id='type-id-115' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='153' column='1'/>
- <return type-id='type-id-113'/>
- </function-decl>
- <function-decl name='atomic_dec_ushort_nv' mangled-name='atomic_dec_ushort_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='170' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_ushort_nv'>
- <parameter type-id='type-id-118' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='154' column='1'/>
- <return type-id='type-id-116'/>
- </function-decl>
- <function-decl name='atomic_dec_32_nv' mangled-name='atomic_dec_32_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='171' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_32_nv'>
- <parameter type-id='type-id-120' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='155' column='1'/>
- <return type-id='type-id-52'/>
- </function-decl>
- <function-decl name='atomic_dec_uint_nv' mangled-name='atomic_dec_uint_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='172' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_uint_nv'>
- <parameter type-id='type-id-122' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='156' column='1'/>
- <return type-id='type-id-71'/>
- </function-decl>
- <function-decl name='atomic_dec_ulong_nv' mangled-name='atomic_dec_ulong_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='173' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_ulong_nv'>
- <parameter type-id='type-id-124' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='157' column='1'/>
- <return type-id='type-id-40'/>
- </function-decl>
- <function-decl name='atomic_dec_64_nv' mangled-name='atomic_dec_64_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='174' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_64_nv'>
- <parameter type-id='type-id-128' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='158' column='1'/>
- <return type-id='type-id-126'/>
- </function-decl>
- <function-decl name='atomic_add_8_nv' mangled-name='atomic_add_8_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='183' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_8_nv'>
- <parameter type-id='type-id-107' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='183' column='1'/>
- <parameter type-id='type-id-48' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='183' column='1'/>
- <return type-id='type-id-14'/>
- </function-decl>
- <function-decl name='atomic_add_char_nv' mangled-name='atomic_add_char_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='184' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_char_nv'>
- <parameter type-id='type-id-110' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='184' column='1'/>
- <parameter type-id='type-id-49' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='184' column='1'/>
- <return type-id='type-id-108'/>
- </function-decl>
- <function-decl name='atomic_add_16_nv' mangled-name='atomic_add_16_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='185' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_16_nv'>
- <parameter type-id='type-id-115' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='185' column='1'/>
- <parameter type-id='type-id-130' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='185' column='1'/>
- <return type-id='type-id-113'/>
- </function-decl>
- <function-decl name='atomic_add_short_nv' mangled-name='atomic_add_short_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='186' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_short_nv'>
- <parameter type-id='type-id-118' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='186' column='1'/>
- <parameter type-id='type-id-29' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='186' column='1'/>
- <return type-id='type-id-116'/>
- </function-decl>
- <function-decl name='atomic_add_32_nv' mangled-name='atomic_add_32_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='187' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_32_nv'>
- <parameter type-id='type-id-120' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='187' column='1'/>
- <parameter type-id='type-id-132' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='187' column='1'/>
- <return type-id='type-id-52'/>
- </function-decl>
- <function-decl name='atomic_add_int_nv' mangled-name='atomic_add_int_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='188' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_int_nv'>
- <parameter type-id='type-id-122' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='188' column='1'/>
- <parameter type-id='type-id-20' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='188' column='1'/>
- <return type-id='type-id-71'/>
- </function-decl>
- <function-decl name='atomic_add_long_nv' mangled-name='atomic_add_long_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='189' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_long_nv'>
- <parameter type-id='type-id-124' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='189' column='1'/>
- <parameter type-id='type-id-28' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='189' column='1'/>
- <return type-id='type-id-40'/>
- </function-decl>
- <function-decl name='atomic_add_64_nv' mangled-name='atomic_add_64_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='190' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_64_nv'>
- <parameter type-id='type-id-128' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='190' column='1'/>
- <parameter type-id='type-id-134' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='190' column='1'/>
- <return type-id='type-id-126'/>
- </function-decl>
- <function-decl name='atomic_add_ptr_nv' mangled-name='atomic_add_ptr_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='193' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_ptr_nv'>
- <parameter type-id='type-id-136' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='193' column='1'/>
- <parameter type-id='type-id-138' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='193' column='1'/>
- <return type-id='type-id-2'/>
+ <abi-instr version='1.0' address-size='64' path='../../module/avl/avl.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libavl' language='LANG_C99'>
+ <typedef-decl name='avl_tree_t' type-id='type-id-13' id='type-id-88'/>
+ <pointer-type-def type-id='type-id-88' size-in-bits='64' id='type-id-89'/>
+ <function-decl name='avl_destroy_nodes' mangled-name='avl_destroy_nodes' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_destroy_nodes'>
+ <parameter type-id='type-id-89' name='tree'/>
+ <parameter type-id='type-id-52' name='cookie'/>
+ <return type-id='type-id-6'/>
+ </function-decl>
+ <function-decl name='avl_is_empty' mangled-name='avl_is_empty' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_is_empty'>
+ <parameter type-id='type-id-89' name='tree'/>
+ <return type-id='type-id-87'/>
+ </function-decl>
+ <function-decl name='avl_numnodes' mangled-name='avl_numnodes' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_numnodes'>
+ <parameter type-id='type-id-89' name='tree'/>
+ <return type-id='type-id-39'/>
+ </function-decl>
+ <function-decl name='avl_destroy' mangled-name='avl_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_destroy'>
+ <parameter type-id='type-id-89' name='tree'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='avl_create' mangled-name='avl_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_create'>
+ <parameter type-id='type-id-89' name='tree'/>
+ <parameter type-id='type-id-38' name='compar'/>
+ <parameter type-id='type-id-8' name='size'/>
+ <parameter type-id='type-id-8' name='offset'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='avl_swap' mangled-name='avl_swap' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_swap'>
+ <parameter type-id='type-id-89' name='tree1'/>
+ <parameter type-id='type-id-89' name='tree2'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='avl_update' mangled-name='avl_update' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_update'>
+ <parameter type-id='type-id-89' name='t'/>
+ <parameter type-id='type-id-6' name='obj'/>
+ <return type-id='type-id-87'/>
+ </function-decl>
+ <function-decl name='avl_update_gt' mangled-name='avl_update_gt' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_update_gt'>
+ <parameter type-id='type-id-89' name='t'/>
+ <parameter type-id='type-id-6' name='obj'/>
+ <return type-id='type-id-87'/>
+ </function-decl>
+ <function-decl name='avl_update_lt' mangled-name='avl_update_lt' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_update_lt'>
+ <parameter type-id='type-id-89' name='t'/>
+ <parameter type-id='type-id-6' name='obj'/>
+ <return type-id='type-id-87'/>
+ </function-decl>
+ <function-decl name='avl_remove' mangled-name='avl_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_remove'>
+ <parameter type-id='type-id-89' name='tree'/>
+ <parameter type-id='type-id-6' name='data'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='avl_add' mangled-name='avl_add' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_add'>
+ <parameter type-id='type-id-89' name='tree'/>
+ <parameter type-id='type-id-6' name='new_node'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='avl_insert_here' mangled-name='avl_insert_here' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_insert_here'>
+ <parameter type-id='type-id-89' name='tree'/>
+ <parameter type-id='type-id-6' name='new_data'/>
+ <parameter type-id='type-id-6' name='here'/>
+ <parameter type-id='type-id-22' name='direction'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <typedef-decl name='avl_index_t' type-id='type-id-10' id='type-id-90'/>
+ <function-decl name='avl_insert' mangled-name='avl_insert' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_insert'>
+ <parameter type-id='type-id-89' name='tree'/>
+ <parameter type-id='type-id-6' name='new_data'/>
+ <parameter type-id='type-id-90' name='where'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <pointer-type-def type-id='type-id-90' size-in-bits='64' id='type-id-91'/>
+ <function-decl name='avl_find' mangled-name='avl_find' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_find'>
+ <parameter type-id='type-id-89' name='tree'/>
+ <parameter type-id='type-id-6' name='value'/>
+ <parameter type-id='type-id-91' name='where'/>
+ <return type-id='type-id-6'/>
+ </function-decl>
+ <function-decl name='avl_nearest' mangled-name='avl_nearest' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_nearest'>
+ <parameter type-id='type-id-89' name='tree'/>
+ <parameter type-id='type-id-90' name='where'/>
+ <parameter type-id='type-id-22' name='direction'/>
+ <return type-id='type-id-6'/>
+ </function-decl>
+ <function-decl name='avl_last' mangled-name='avl_last' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_last'>
+ <parameter type-id='type-id-89' name='tree'/>
+ <return type-id='type-id-6'/>
+ </function-decl>
+ <function-decl name='avl_first' mangled-name='avl_first' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_first'>
+ <parameter type-id='type-id-89' name='tree'/>
+ <return type-id='type-id-6'/>
+ </function-decl>
+ <function-decl name='avl_walk' mangled-name='avl_walk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_walk'>
+ <parameter type-id='type-id-89' name='tree'/>
+ <parameter type-id='type-id-6' name='oldnode'/>
+ <parameter type-id='type-id-22' name='left'/>
+ <return type-id='type-id-6'/>
+ </function-decl>
+ <function-decl name='libspl_assertf' mangled-name='libspl_assertf' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
- <function-decl name='atomic_sub_8_nv' mangled-name='atomic_sub_8_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='205' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_8_nv'>
- <parameter type-id='type-id-107' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='183' column='1'/>
- <parameter type-id='type-id-48' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='183' column='1'/>
- <return type-id='type-id-14'/>
- </function-decl>
- <function-decl name='atomic_sub_char_nv' mangled-name='atomic_sub_char_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='206' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_char_nv'>
- <parameter type-id='type-id-110' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='184' column='1'/>
- <parameter type-id='type-id-49' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='184' column='1'/>
- <return type-id='type-id-108'/>
- </function-decl>
- <function-decl name='atomic_sub_16_nv' mangled-name='atomic_sub_16_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='207' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_16_nv'>
- <parameter type-id='type-id-115' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='185' column='1'/>
- <parameter type-id='type-id-130' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='185' column='1'/>
- <return type-id='type-id-113'/>
- </function-decl>
- <function-decl name='atomic_sub_short_nv' mangled-name='atomic_sub_short_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='208' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_short_nv'>
- <parameter type-id='type-id-118' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='186' column='1'/>
- <parameter type-id='type-id-29' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='186' column='1'/>
- <return type-id='type-id-116'/>
- </function-decl>
- <function-decl name='atomic_sub_32_nv' mangled-name='atomic_sub_32_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='209' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_32_nv'>
- <parameter type-id='type-id-120' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='187' column='1'/>
- <parameter type-id='type-id-132' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='187' column='1'/>
- <return type-id='type-id-52'/>
- </function-decl>
- <function-decl name='atomic_sub_int_nv' mangled-name='atomic_sub_int_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='210' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_int_nv'>
- <parameter type-id='type-id-122' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='188' column='1'/>
- <parameter type-id='type-id-20' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='188' column='1'/>
- <return type-id='type-id-71'/>
- </function-decl>
- <function-decl name='atomic_sub_long_nv' mangled-name='atomic_sub_long_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='211' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_long_nv'>
- <parameter type-id='type-id-124' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='189' column='1'/>
- <parameter type-id='type-id-28' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='189' column='1'/>
- <return type-id='type-id-40'/>
- </function-decl>
- <function-decl name='atomic_sub_64_nv' mangled-name='atomic_sub_64_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='212' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_64_nv'>
- <parameter type-id='type-id-128' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='190' column='1'/>
- <parameter type-id='type-id-134' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='190' column='1'/>
- <return type-id='type-id-126'/>
- </function-decl>
- <function-decl name='atomic_sub_ptr_nv' mangled-name='atomic_sub_ptr_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='215' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_ptr_nv'>
- <parameter type-id='type-id-136' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='193' column='1'/>
- <parameter type-id='type-id-138' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='193' column='1'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='atomic_or_8_nv' mangled-name='atomic_or_8_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='227' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_8_nv'>
- <parameter type-id='type-id-107' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='227' column='1'/>
- <parameter type-id='type-id-14' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='227' column='1'/>
- <return type-id='type-id-14'/>
- </function-decl>
- <function-decl name='atomic_or_uchar_nv' mangled-name='atomic_or_uchar_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='228' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_uchar_nv'>
- <parameter type-id='type-id-110' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='228' column='1'/>
- <parameter type-id='type-id-108' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='228' column='1'/>
- <return type-id='type-id-108'/>
- </function-decl>
- <function-decl name='atomic_or_16_nv' mangled-name='atomic_or_16_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='229' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_16_nv'>
- <parameter type-id='type-id-115' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='229' column='1'/>
- <parameter type-id='type-id-113' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='229' column='1'/>
- <return type-id='type-id-113'/>
- </function-decl>
- <function-decl name='atomic_or_ushort_nv' mangled-name='atomic_or_ushort_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='230' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_ushort_nv'>
- <parameter type-id='type-id-118' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='230' column='1'/>
- <parameter type-id='type-id-116' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='230' column='1'/>
- <return type-id='type-id-116'/>
- </function-decl>
- <function-decl name='atomic_or_32_nv' mangled-name='atomic_or_32_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='231' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_32_nv'>
- <parameter type-id='type-id-120' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='231' column='1'/>
- <parameter type-id='type-id-52' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='231' column='1'/>
- <return type-id='type-id-52'/>
- </function-decl>
- <function-decl name='atomic_or_uint_nv' mangled-name='atomic_or_uint_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='232' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_uint_nv'>
- <parameter type-id='type-id-122' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='232' column='1'/>
- <parameter type-id='type-id-71' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='232' column='1'/>
- <return type-id='type-id-71'/>
- </function-decl>
- <function-decl name='atomic_or_ulong_nv' mangled-name='atomic_or_ulong_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='233' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_ulong_nv'>
- <parameter type-id='type-id-124' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='233' column='1'/>
- <parameter type-id='type-id-40' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='233' column='1'/>
- <return type-id='type-id-40'/>
- </function-decl>
- <function-decl name='atomic_or_64_nv' mangled-name='atomic_or_64_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='234' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_64_nv'>
- <parameter type-id='type-id-128' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='234' column='1'/>
- <parameter type-id='type-id-126' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='234' column='1'/>
- <return type-id='type-id-126'/>
- </function-decl>
- <function-decl name='atomic_and_8_nv' mangled-name='atomic_and_8_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='243' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_8_nv'>
- <parameter type-id='type-id-107' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='227' column='1'/>
- <parameter type-id='type-id-14' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='227' column='1'/>
- <return type-id='type-id-14'/>
- </function-decl>
- <function-decl name='atomic_and_uchar_nv' mangled-name='atomic_and_uchar_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='244' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_uchar_nv'>
- <parameter type-id='type-id-110' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='228' column='1'/>
- <parameter type-id='type-id-108' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='228' column='1'/>
- <return type-id='type-id-108'/>
- </function-decl>
- <function-decl name='atomic_and_16_nv' mangled-name='atomic_and_16_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='245' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_16_nv'>
- <parameter type-id='type-id-115' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='229' column='1'/>
- <parameter type-id='type-id-113' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='229' column='1'/>
- <return type-id='type-id-113'/>
- </function-decl>
- <function-decl name='atomic_and_ushort_nv' mangled-name='atomic_and_ushort_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='246' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_ushort_nv'>
- <parameter type-id='type-id-118' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='230' column='1'/>
- <parameter type-id='type-id-116' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='230' column='1'/>
- <return type-id='type-id-116'/>
- </function-decl>
- <function-decl name='atomic_and_32_nv' mangled-name='atomic_and_32_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='247' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_32_nv'>
- <parameter type-id='type-id-120' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='231' column='1'/>
- <parameter type-id='type-id-52' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='231' column='1'/>
- <return type-id='type-id-52'/>
- </function-decl>
- <function-decl name='atomic_and_uint_nv' mangled-name='atomic_and_uint_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='248' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_uint_nv'>
- <parameter type-id='type-id-122' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='232' column='1'/>
- <parameter type-id='type-id-71' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='232' column='1'/>
- <return type-id='type-id-71'/>
- </function-decl>
- <function-decl name='atomic_and_ulong_nv' mangled-name='atomic_and_ulong_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='249' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_ulong_nv'>
- <parameter type-id='type-id-124' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='233' column='1'/>
- <parameter type-id='type-id-40' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='233' column='1'/>
- <return type-id='type-id-40'/>
- </function-decl>
- <function-decl name='atomic_and_64_nv' mangled-name='atomic_and_64_nv' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='250' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_64_nv'>
- <parameter type-id='type-id-128' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='234' column='1'/>
- <parameter type-id='type-id-126' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='234' column='1'/>
- <return type-id='type-id-126'/>
- </function-decl>
- <function-decl name='atomic_cas_8' mangled-name='atomic_cas_8' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='271' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_8'>
- <parameter type-id='type-id-107' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='271' column='1'/>
- <parameter type-id='type-id-14' name='exp' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='271' column='1'/>
- <parameter type-id='type-id-14' name='des' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='271' column='1'/>
- <return type-id='type-id-14'/>
- </function-decl>
- <function-decl name='atomic_cas_uchar' mangled-name='atomic_cas_uchar' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='272' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_uchar'>
- <parameter type-id='type-id-110' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='272' column='1'/>
- <parameter type-id='type-id-108' name='exp' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='272' column='1'/>
- <parameter type-id='type-id-108' name='des' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='272' column='1'/>
- <return type-id='type-id-108'/>
- </function-decl>
- <function-decl name='atomic_cas_16' mangled-name='atomic_cas_16' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='273' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_16'>
- <parameter type-id='type-id-115' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='273' column='1'/>
- <parameter type-id='type-id-113' name='exp' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='273' column='1'/>
- <parameter type-id='type-id-113' name='des' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='273' column='1'/>
- <return type-id='type-id-113'/>
- </function-decl>
- <function-decl name='atomic_cas_ushort' mangled-name='atomic_cas_ushort' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='274' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_ushort'>
- <parameter type-id='type-id-118' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='274' column='1'/>
- <parameter type-id='type-id-116' name='exp' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='274' column='1'/>
- <parameter type-id='type-id-116' name='des' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='274' column='1'/>
- <return type-id='type-id-116'/>
- </function-decl>
- <function-decl name='atomic_cas_32' mangled-name='atomic_cas_32' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='275' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_32'>
- <parameter type-id='type-id-120' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='275' column='1'/>
- <parameter type-id='type-id-52' name='exp' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='275' column='1'/>
- <parameter type-id='type-id-52' name='des' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='275' column='1'/>
- <return type-id='type-id-52'/>
- </function-decl>
- <function-decl name='atomic_cas_uint' mangled-name='atomic_cas_uint' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='276' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_uint'>
- <parameter type-id='type-id-122' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='276' column='1'/>
- <parameter type-id='type-id-71' name='exp' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='276' column='1'/>
- <parameter type-id='type-id-71' name='des' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='276' column='1'/>
- <return type-id='type-id-71'/>
- </function-decl>
- <function-decl name='atomic_cas_ulong' mangled-name='atomic_cas_ulong' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='277' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_ulong'>
- <parameter type-id='type-id-124' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='277' column='1'/>
- <parameter type-id='type-id-40' name='exp' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='277' column='1'/>
- <parameter type-id='type-id-40' name='des' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='277' column='1'/>
- <return type-id='type-id-40'/>
- </function-decl>
- <function-decl name='atomic_cas_64' mangled-name='atomic_cas_64' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='278' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_64'>
- <parameter type-id='type-id-128' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='278' column='1'/>
- <parameter type-id='type-id-126' name='exp' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='278' column='1'/>
- <parameter type-id='type-id-126' name='des' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='278' column='1'/>
- <return type-id='type-id-126'/>
- </function-decl>
- <function-decl name='atomic_cas_ptr' mangled-name='atomic_cas_ptr' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='281' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_ptr'>
- <parameter type-id='type-id-136' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='281' column='1'/>
- <parameter type-id='type-id-2' name='exp' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='281' column='1'/>
- <parameter type-id='type-id-2' name='des' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='281' column='1'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='atomic_swap_8' mangled-name='atomic_swap_8' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='300' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_8'>
- <parameter type-id='type-id-107' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='227' column='1'/>
- <parameter type-id='type-id-14' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='227' column='1'/>
- <return type-id='type-id-14'/>
- </function-decl>
- <function-decl name='atomic_swap_uchar' mangled-name='atomic_swap_uchar' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='301' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_uchar'>
- <parameter type-id='type-id-110' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='228' column='1'/>
- <parameter type-id='type-id-108' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='228' column='1'/>
- <return type-id='type-id-108'/>
- </function-decl>
- <function-decl name='atomic_swap_16' mangled-name='atomic_swap_16' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='302' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_16'>
- <parameter type-id='type-id-115' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='229' column='1'/>
- <parameter type-id='type-id-113' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='229' column='1'/>
- <return type-id='type-id-113'/>
- </function-decl>
- <function-decl name='atomic_swap_ushort' mangled-name='atomic_swap_ushort' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='303' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_ushort'>
- <parameter type-id='type-id-118' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='230' column='1'/>
- <parameter type-id='type-id-116' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='230' column='1'/>
- <return type-id='type-id-116'/>
- </function-decl>
- <function-decl name='atomic_swap_32' mangled-name='atomic_swap_32' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='304' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_32'>
- <parameter type-id='type-id-120' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='231' column='1'/>
- <parameter type-id='type-id-52' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='231' column='1'/>
- <return type-id='type-id-52'/>
- </function-decl>
- <function-decl name='atomic_swap_uint' mangled-name='atomic_swap_uint' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='305' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_uint'>
- <parameter type-id='type-id-122' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='232' column='1'/>
- <parameter type-id='type-id-71' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='232' column='1'/>
- <return type-id='type-id-71'/>
- </function-decl>
- <function-decl name='atomic_swap_ulong' mangled-name='atomic_swap_ulong' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='306' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_ulong'>
- <parameter type-id='type-id-124' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='233' column='1'/>
- <parameter type-id='type-id-40' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='233' column='1'/>
- <return type-id='type-id-40'/>
- </function-decl>
- <function-decl name='atomic_swap_64' mangled-name='atomic_swap_64' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='307' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_64'>
- <parameter type-id='type-id-128' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='234' column='1'/>
- <parameter type-id='type-id-126' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='234' column='1'/>
- <return type-id='type-id-126'/>
- </function-decl>
- <function-decl name='atomic_swap_ptr' mangled-name='atomic_swap_ptr' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='311' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_ptr'>
- <parameter type-id='type-id-136' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='311' column='1'/>
- <parameter type-id='type-id-2' name='bits' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='311' column='1'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='atomic_set_long_excl' mangled-name='atomic_set_long_excl' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='318' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_set_long_excl'>
- <parameter type-id='type-id-124' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='318' column='1'/>
- <parameter type-id='type-id-71' name='value' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='318' column='1'/>
- <return type-id='type-id-20'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='atomic.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
+ <function-decl name='membar_consumer' mangled-name='membar_consumer' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='membar_consumer'>
+ <return type-id='type-id-5'/>
</function-decl>
- <function-decl name='atomic_clear_long_excl' mangled-name='atomic_clear_long_excl' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='326' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_clear_long_excl'>
- <parameter type-id='type-id-124' name='target' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='318' column='1'/>
- <parameter type-id='type-id-71' name='value' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='318' column='1'/>
- <return type-id='type-id-20'/>
+ <function-decl name='membar_producer' mangled-name='membar_producer' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='membar_producer'>
+ <return type-id='type-id-5'/>
</function-decl>
- <function-decl name='membar_enter' mangled-name='membar_enter' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='334' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='membar_enter'>
- <return type-id='type-id-1'/>
+ <function-decl name='membar_enter' mangled-name='membar_enter' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='membar_enter'>
+ <return type-id='type-id-5'/>
</function-decl>
- <function-decl name='membar_exit' mangled-name='membar_exit' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='340' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='membar_exit'>
- <return type-id='type-id-1'/>
+ <qualified-type-def type-id='type-id-39' volatile='yes' id='type-id-92'/>
+ <pointer-type-def type-id='type-id-92' size-in-bits='64' id='type-id-93'/>
+ <function-decl name='atomic_clear_long_excl' mangled-name='atomic_clear_long_excl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_clear_long_excl'>
+ <parameter type-id='type-id-93' name='target'/>
+ <parameter type-id='type-id-62' name='value'/>
+ <return type-id='type-id-22'/>
</function-decl>
- <function-decl name='membar_producer' mangled-name='membar_producer' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='346' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='membar_producer'>
- <return type-id='type-id-1'/>
+ <function-decl name='atomic_set_long_excl' mangled-name='atomic_set_long_excl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_set_long_excl'>
+ <parameter type-id='type-id-93' name='target'/>
+ <parameter type-id='type-id-62' name='value'/>
+ <return type-id='type-id-22'/>
</function-decl>
- <function-decl name='membar_consumer' mangled-name='membar_consumer' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/atomic.c' line='352' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='membar_consumer'>
- <return type-id='type-id-1'/>
+ <qualified-type-def type-id='type-id-5' volatile='yes' id='type-id-94'/>
+ <pointer-type-def type-id='type-id-94' size-in-bits='64' id='type-id-95'/>
+ <function-decl name='atomic_swap_ptr' mangled-name='atomic_swap_ptr' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_ptr'>
+ <parameter type-id='type-id-95' name='target'/>
+ <parameter type-id='type-id-6' name='bits'/>
+ <return type-id='type-id-6'/>
+ </function-decl>
+ <function-decl name='atomic_swap_ulong' mangled-name='atomic_swap_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_ulong'>
+ <parameter type-id='type-id-93' name='target'/>
+ <parameter type-id='type-id-39' name='bits'/>
+ <return type-id='type-id-39'/>
+ </function-decl>
+ <qualified-type-def type-id='type-id-56' volatile='yes' id='type-id-96'/>
+ <pointer-type-def type-id='type-id-96' size-in-bits='64' id='type-id-97'/>
+ <function-decl name='atomic_swap_32' mangled-name='atomic_swap_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_32'>
+ <parameter type-id='type-id-97' name='target'/>
+ <parameter type-id='type-id-56' name='bits'/>
+ <return type-id='type-id-56'/>
+ </function-decl>
+ <type-decl name='unsigned short int' size-in-bits='16' id='type-id-98'/>
+ <typedef-decl name='__uint16_t' type-id='type-id-98' id='type-id-99'/>
+ <typedef-decl name='uint16_t' type-id='type-id-99' id='type-id-100'/>
+ <qualified-type-def type-id='type-id-100' volatile='yes' id='type-id-101'/>
+ <pointer-type-def type-id='type-id-101' size-in-bits='64' id='type-id-102'/>
+ <function-decl name='atomic_swap_16' mangled-name='atomic_swap_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_16'>
+ <parameter type-id='type-id-102' name='target'/>
+ <parameter type-id='type-id-100' name='bits'/>
+ <return type-id='type-id-100'/>
+ </function-decl>
+ <qualified-type-def type-id='type-id-12' volatile='yes' id='type-id-103'/>
+ <pointer-type-def type-id='type-id-103' size-in-bits='64' id='type-id-104'/>
+ <function-decl name='atomic_swap_8' mangled-name='atomic_swap_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_8'>
+ <parameter type-id='type-id-104' name='target'/>
+ <parameter type-id='type-id-12' name='bits'/>
+ <return type-id='type-id-12'/>
+ </function-decl>
+ <function-decl name='atomic_cas_ptr' mangled-name='atomic_cas_ptr' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_ptr'>
+ <parameter type-id='type-id-95' name='target'/>
+ <parameter type-id='type-id-6' name='exp'/>
+ <parameter type-id='type-id-6' name='des'/>
+ <return type-id='type-id-6'/>
+ </function-decl>
+ <function-decl name='atomic_and_ulong_nv' mangled-name='atomic_and_ulong_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_ulong_nv'>
+ <parameter type-id='type-id-93' name='target'/>
+ <parameter type-id='type-id-39' name='bits'/>
+ <return type-id='type-id-39'/>
+ </function-decl>
+ <function-decl name='atomic_and_32_nv' mangled-name='atomic_and_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_32_nv'>
+ <parameter type-id='type-id-97' name='target'/>
+ <parameter type-id='type-id-56' name='bits'/>
+ <return type-id='type-id-56'/>
+ </function-decl>
+ <function-decl name='atomic_and_16_nv' mangled-name='atomic_and_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_16_nv'>
+ <parameter type-id='type-id-102' name='target'/>
+ <parameter type-id='type-id-100' name='bits'/>
+ <return type-id='type-id-100'/>
+ </function-decl>
+ <function-decl name='atomic_and_8_nv' mangled-name='atomic_and_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_8_nv'>
+ <parameter type-id='type-id-104' name='target'/>
+ <parameter type-id='type-id-12' name='bits'/>
+ <return type-id='type-id-12'/>
+ </function-decl>
+ <function-decl name='atomic_or_ulong_nv' mangled-name='atomic_or_ulong_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_ulong_nv'>
+ <parameter type-id='type-id-93' name='target'/>
+ <parameter type-id='type-id-39' name='bits'/>
+ <return type-id='type-id-39'/>
+ </function-decl>
+ <function-decl name='atomic_or_32_nv' mangled-name='atomic_or_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_32_nv'>
+ <parameter type-id='type-id-97' name='target'/>
+ <parameter type-id='type-id-56' name='bits'/>
+ <return type-id='type-id-56'/>
+ </function-decl>
+ <function-decl name='atomic_or_16_nv' mangled-name='atomic_or_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_16_nv'>
+ <parameter type-id='type-id-102' name='target'/>
+ <parameter type-id='type-id-100' name='bits'/>
+ <return type-id='type-id-100'/>
+ </function-decl>
+ <function-decl name='atomic_or_8_nv' mangled-name='atomic_or_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_8_nv'>
+ <parameter type-id='type-id-104' name='target'/>
+ <parameter type-id='type-id-12' name='bits'/>
+ <return type-id='type-id-12'/>
+ </function-decl>
+ <typedef-decl name='__ssize_t' type-id='type-id-30' id='type-id-105'/>
+ <typedef-decl name='ssize_t' type-id='type-id-105' id='type-id-106'/>
+ <function-decl name='atomic_sub_ptr_nv' mangled-name='atomic_sub_ptr_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_ptr_nv'>
+ <parameter type-id='type-id-95' name='target'/>
+ <parameter type-id='type-id-106' name='bits'/>
+ <return type-id='type-id-6'/>
+ </function-decl>
+ <function-decl name='atomic_sub_long_nv' mangled-name='atomic_sub_long_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_long_nv'>
+ <parameter type-id='type-id-93' name='target'/>
+ <parameter type-id='type-id-30' name='bits'/>
+ <return type-id='type-id-39'/>
+ </function-decl>
+ <typedef-decl name='__int32_t' type-id='type-id-22' id='type-id-107'/>
+ <typedef-decl name='int32_t' type-id='type-id-107' id='type-id-108'/>
+ <function-decl name='atomic_sub_32_nv' mangled-name='atomic_sub_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_32_nv'>
+ <parameter type-id='type-id-97' name='target'/>
+ <parameter type-id='type-id-108' name='bits'/>
+ <return type-id='type-id-56'/>
+ </function-decl>
+ <typedef-decl name='__int16_t' type-id='type-id-32' id='type-id-109'/>
+ <typedef-decl name='int16_t' type-id='type-id-109' id='type-id-110'/>
+ <function-decl name='atomic_sub_16_nv' mangled-name='atomic_sub_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_16_nv'>
+ <parameter type-id='type-id-102' name='target'/>
+ <parameter type-id='type-id-110' name='bits'/>
+ <return type-id='type-id-100'/>
+ </function-decl>
+ <function-decl name='atomic_sub_8_nv' mangled-name='atomic_sub_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_8_nv'>
+ <parameter type-id='type-id-104' name='target'/>
+ <parameter type-id='type-id-47' name='bits'/>
+ <return type-id='type-id-12'/>
+ </function-decl>
+ <function-decl name='atomic_add_ptr_nv' mangled-name='atomic_add_ptr_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_ptr_nv'>
+ <parameter type-id='type-id-95' name='target'/>
+ <parameter type-id='type-id-106' name='bits'/>
+ <return type-id='type-id-6'/>
+ </function-decl>
+ <function-decl name='atomic_add_long_nv' mangled-name='atomic_add_long_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_long_nv'>
+ <parameter type-id='type-id-93' name='target'/>
+ <parameter type-id='type-id-30' name='bits'/>
+ <return type-id='type-id-39'/>
+ </function-decl>
+ <function-decl name='atomic_add_32_nv' mangled-name='atomic_add_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_32_nv'>
+ <parameter type-id='type-id-97' name='target'/>
+ <parameter type-id='type-id-108' name='bits'/>
+ <return type-id='type-id-56'/>
+ </function-decl>
+ <function-decl name='atomic_add_16_nv' mangled-name='atomic_add_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_16_nv'>
+ <parameter type-id='type-id-102' name='target'/>
+ <parameter type-id='type-id-110' name='bits'/>
+ <return type-id='type-id-100'/>
+ </function-decl>
+ <function-decl name='atomic_add_8_nv' mangled-name='atomic_add_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_8_nv'>
+ <parameter type-id='type-id-104' name='target'/>
+ <parameter type-id='type-id-47' name='bits'/>
+ <return type-id='type-id-12'/>
+ </function-decl>
+ <function-decl name='atomic_dec_ulong_nv' mangled-name='atomic_dec_ulong_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_ulong_nv'>
+ <parameter type-id='type-id-93' name='target'/>
+ <return type-id='type-id-39'/>
+ </function-decl>
+ <function-decl name='atomic_dec_32_nv' mangled-name='atomic_dec_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_32_nv'>
+ <parameter type-id='type-id-97' name='target'/>
+ <return type-id='type-id-56'/>
+ </function-decl>
+ <function-decl name='atomic_dec_16_nv' mangled-name='atomic_dec_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_16_nv'>
+ <parameter type-id='type-id-102' name='target'/>
+ <return type-id='type-id-100'/>
+ </function-decl>
+ <function-decl name='atomic_dec_8_nv' mangled-name='atomic_dec_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_8_nv'>
+ <parameter type-id='type-id-104' name='target'/>
+ <return type-id='type-id-12'/>
+ </function-decl>
+ <function-decl name='atomic_inc_ulong_nv' mangled-name='atomic_inc_ulong_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_ulong_nv'>
+ <parameter type-id='type-id-93' name='target'/>
+ <return type-id='type-id-39'/>
+ </function-decl>
+ <function-decl name='atomic_inc_32_nv' mangled-name='atomic_inc_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_32_nv'>
+ <parameter type-id='type-id-97' name='target'/>
+ <return type-id='type-id-56'/>
+ </function-decl>
+ <function-decl name='atomic_inc_16_nv' mangled-name='atomic_inc_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_16_nv'>
+ <parameter type-id='type-id-102' name='target'/>
+ <return type-id='type-id-100'/>
+ </function-decl>
+ <function-decl name='atomic_inc_8_nv' mangled-name='atomic_inc_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_8_nv'>
+ <parameter type-id='type-id-104' name='target'/>
+ <return type-id='type-id-12'/>
+ </function-decl>
+ <function-decl name='atomic_and_ulong' mangled-name='atomic_and_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_ulong'>
+ <parameter type-id='type-id-93' name='target'/>
+ <parameter type-id='type-id-39' name='bits'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='atomic_and_32' mangled-name='atomic_and_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_32'>
+ <parameter type-id='type-id-97' name='target'/>
+ <parameter type-id='type-id-56' name='bits'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='atomic_and_16' mangled-name='atomic_and_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_16'>
+ <parameter type-id='type-id-102' name='target'/>
+ <parameter type-id='type-id-100' name='bits'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='atomic_and_8' mangled-name='atomic_and_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_8'>
+ <parameter type-id='type-id-104' name='target'/>
+ <parameter type-id='type-id-12' name='bits'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='atomic_or_ulong' mangled-name='atomic_or_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_ulong'>
+ <parameter type-id='type-id-93' name='target'/>
+ <parameter type-id='type-id-39' name='bits'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='atomic_or_32' mangled-name='atomic_or_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_32'>
+ <parameter type-id='type-id-97' name='target'/>
+ <parameter type-id='type-id-56' name='bits'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='atomic_or_16' mangled-name='atomic_or_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_16'>
+ <parameter type-id='type-id-102' name='target'/>
+ <parameter type-id='type-id-100' name='bits'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='atomic_or_8' mangled-name='atomic_or_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_8'>
+ <parameter type-id='type-id-104' name='target'/>
+ <parameter type-id='type-id-12' name='bits'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='atomic_sub_ptr' mangled-name='atomic_sub_ptr' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_ptr'>
+ <parameter type-id='type-id-95' name='target'/>
+ <parameter type-id='type-id-106' name='bits'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='atomic_sub_long' mangled-name='atomic_sub_long' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_long'>
+ <parameter type-id='type-id-93' name='target'/>
+ <parameter type-id='type-id-30' name='bits'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='atomic_sub_32' mangled-name='atomic_sub_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_32'>
+ <parameter type-id='type-id-97' name='target'/>
+ <parameter type-id='type-id-108' name='bits'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='atomic_sub_16' mangled-name='atomic_sub_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_16'>
+ <parameter type-id='type-id-102' name='target'/>
+ <parameter type-id='type-id-110' name='bits'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='atomic_sub_8' mangled-name='atomic_sub_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_8'>
+ <parameter type-id='type-id-104' name='target'/>
+ <parameter type-id='type-id-47' name='bits'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='atomic_add_ptr' mangled-name='atomic_add_ptr' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_ptr'>
+ <parameter type-id='type-id-95' name='target'/>
+ <parameter type-id='type-id-106' name='bits'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='atomic_add_long' mangled-name='atomic_add_long' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_long'>
+ <parameter type-id='type-id-93' name='target'/>
+ <parameter type-id='type-id-30' name='bits'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='atomic_add_32' mangled-name='atomic_add_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_32'>
+ <parameter type-id='type-id-97' name='target'/>
+ <parameter type-id='type-id-108' name='bits'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='atomic_add_16' mangled-name='atomic_add_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_16'>
+ <parameter type-id='type-id-102' name='target'/>
+ <parameter type-id='type-id-110' name='bits'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='atomic_add_8' mangled-name='atomic_add_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_8'>
+ <parameter type-id='type-id-104' name='target'/>
+ <parameter type-id='type-id-47' name='bits'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='atomic_dec_ulong' mangled-name='atomic_dec_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_ulong'>
+ <parameter type-id='type-id-93' name='target'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='atomic_dec_32' mangled-name='atomic_dec_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_32'>
+ <parameter type-id='type-id-97' name='target'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='atomic_dec_16' mangled-name='atomic_dec_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_16'>
+ <parameter type-id='type-id-102' name='target'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='atomic_dec_8' mangled-name='atomic_dec_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_8'>
+ <parameter type-id='type-id-104' name='target'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='atomic_inc_ulong' mangled-name='atomic_inc_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_ulong'>
+ <parameter type-id='type-id-93' name='target'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='atomic_inc_32' mangled-name='atomic_inc_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_32'>
+ <parameter type-id='type-id-97' name='target'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='atomic_inc_16' mangled-name='atomic_inc_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_16'>
+ <parameter type-id='type-id-102' name='target'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='atomic_inc_8' mangled-name='atomic_inc_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_8'>
+ <parameter type-id='type-id-104' name='target'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='atomic_cas_8' mangled-name='atomic_cas_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_8'>
+ <parameter type-id='type-id-104' name='target'/>
+ <parameter type-id='type-id-12' name='exp'/>
+ <parameter type-id='type-id-12' name='des'/>
+ <return type-id='type-id-12'/>
+ </function-decl>
+ <function-decl name='atomic_cas_16' mangled-name='atomic_cas_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_16'>
+ <parameter type-id='type-id-102' name='target'/>
+ <parameter type-id='type-id-100' name='exp'/>
+ <parameter type-id='type-id-100' name='des'/>
+ <return type-id='type-id-100'/>
+ </function-decl>
+ <function-decl name='atomic_cas_32' mangled-name='atomic_cas_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_32'>
+ <parameter type-id='type-id-97' name='target'/>
+ <parameter type-id='type-id-56' name='exp'/>
+ <parameter type-id='type-id-56' name='des'/>
+ <return type-id='type-id-56'/>
+ </function-decl>
+ <function-decl name='atomic_cas_ulong' mangled-name='atomic_cas_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_ulong'>
+ <parameter type-id='type-id-93' name='target'/>
+ <parameter type-id='type-id-39' name='exp'/>
+ <parameter type-id='type-id-39' name='des'/>
+ <return type-id='type-id-39'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='getexecname.c' comp-dir-path='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl' language='LANG_C99'>
- <function-decl name='getexecname_impl' filepath='./libspl_impl.h' line='24' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-7'/>
- <return type-id='type-id-28'/>
+ <abi-instr version='1.0' address-size='64' path='getexecname.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
+ <function-decl name='getexecname' mangled-name='getexecname' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getexecname'>
+ <return type-id='type-id-4'/>
+ </function-decl>
+ <function-decl name='getexecname_impl' mangled-name='getexecname_impl' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='list.c' comp-dir-path='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl' language='LANG_C99'>
- <class-decl name='list' size-in-bits='256' is-struct='yes' visibility='default' filepath='../../lib/libspl/include/sys/list_impl.h' line='41' column='1' id='type-id-139'>
+ <abi-instr version='1.0' address-size='64' path='list.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
+ <class-decl name='list' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-111'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='list_size' type-id='type-id-4' visibility='default' filepath='../../lib/libspl/include/sys/list_impl.h' line='42' column='1'/>
+ <var-decl name='list_size' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='list_offset' type-id='type-id-4' visibility='default' filepath='../../lib/libspl/include/sys/list_impl.h' line='43' column='1'/>
+ <var-decl name='list_offset' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='list_head' type-id='type-id-140' visibility='default' filepath='../../lib/libspl/include/sys/list_impl.h' line='44' column='1'/>
+ <var-decl name='list_head' type-id='type-id-112' visibility='default'/>
</data-member>
</class-decl>
- <class-decl name='list_node' size-in-bits='128' is-struct='yes' visibility='default' filepath='../../lib/libspl/include/sys/list_impl.h' line='36' column='1' id='type-id-140'>
+ <class-decl name='list_node' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-112'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='next' type-id='type-id-141' visibility='default' filepath='../../lib/libspl/include/sys/list_impl.h' line='37' column='1'/>
+ <var-decl name='next' type-id='type-id-113' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='prev' type-id='type-id-141' visibility='default' filepath='../../lib/libspl/include/sys/list_impl.h' line='38' column='1'/>
+ <var-decl name='prev' type-id='type-id-113' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-140' size-in-bits='64' id='type-id-141'/>
- <typedef-decl name='list_t' type-id='type-id-139' filepath='../../lib/libspl/include/sys/list.h' line='36' column='1' id='type-id-142'/>
- <pointer-type-def type-id='type-id-142' size-in-bits='64' id='type-id-143'/>
- <function-decl name='list_create' mangled-name='list_create' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='62' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_create'>
- <parameter type-id='type-id-143' name='list' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='62' column='1'/>
- <parameter type-id='type-id-4' name='size' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='62' column='1'/>
- <parameter type-id='type-id-4' name='offset' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='62' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='list_destroy' mangled-name='list_destroy' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='74' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_destroy'>
- <parameter type-id='type-id-143' name='list' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='74' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='list_insert_after' mangled-name='list_insert_after' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='86' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_insert_after'>
- <parameter type-id='type-id-143' name='list' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='86' column='1'/>
- <parameter type-id='type-id-2' name='object' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='86' column='1'/>
- <parameter type-id='type-id-2' name='nobject' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='86' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='list_insert_head' mangled-name='list_insert_head' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='108' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_insert_head'>
- <parameter type-id='type-id-143' name='list' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='108' column='1'/>
- <parameter type-id='type-id-2' name='object' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='108' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='list_insert_before' mangled-name='list_insert_before' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='97' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_insert_before'>
- <parameter type-id='type-id-143' name='list' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='86' column='1'/>
- <parameter type-id='type-id-2' name='object' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='86' column='1'/>
- <parameter type-id='type-id-2' name='nobject' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='86' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='list_insert_tail' mangled-name='list_insert_tail' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='115' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_insert_tail'>
- <parameter type-id='type-id-143' name='list' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='108' column='1'/>
- <parameter type-id='type-id-2' name='object' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='108' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='list_remove' mangled-name='list_remove' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='122' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_remove'>
- <parameter type-id='type-id-143' name='list' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='122' column='1'/>
- <parameter type-id='type-id-2' name='object' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='122' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='list_remove_head' mangled-name='list_remove_head' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='131' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_remove_head'>
- <parameter type-id='type-id-143' name='list' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='131' column='1'/>
- <return type-id='type-id-2'/>
+ <pointer-type-def type-id='type-id-112' size-in-bits='64' id='type-id-113'/>
+ <typedef-decl name='list_t' type-id='type-id-111' id='type-id-114'/>
+ <pointer-type-def type-id='type-id-114' size-in-bits='64' id='type-id-115'/>
+ <function-decl name='list_is_empty' mangled-name='list_is_empty' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_is_empty'>
+ <parameter type-id='type-id-115' name='list'/>
+ <return type-id='type-id-22'/>
+ </function-decl>
+ <typedef-decl name='list_node_t' type-id='type-id-112' id='type-id-116'/>
+ <pointer-type-def type-id='type-id-116' size-in-bits='64' id='type-id-117'/>
+ <function-decl name='list_link_active' mangled-name='list_link_active' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_link_active'>
+ <parameter type-id='type-id-117' name='ln'/>
+ <return type-id='type-id-22'/>
+ </function-decl>
+ <function-decl name='list_link_init' mangled-name='list_link_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_link_init'>
+ <parameter type-id='type-id-117' name='ln'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='list_link_replace' mangled-name='list_link_replace' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_link_replace'>
+ <parameter type-id='type-id-117' name='lold'/>
+ <parameter type-id='type-id-117' name='lnew'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='list_move_tail' mangled-name='list_move_tail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_move_tail'>
+ <parameter type-id='type-id-115' name='dst'/>
+ <parameter type-id='type-id-115' name='src'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='list_prev' mangled-name='list_prev' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_prev'>
+ <parameter type-id='type-id-115' name='list'/>
+ <parameter type-id='type-id-6' name='object'/>
+ <return type-id='type-id-6'/>
+ </function-decl>
+ <function-decl name='list_next' mangled-name='list_next' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_next'>
+ <parameter type-id='type-id-115' name='list'/>
+ <parameter type-id='type-id-6' name='object'/>
+ <return type-id='type-id-6'/>
+ </function-decl>
+ <function-decl name='list_tail' mangled-name='list_tail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_tail'>
+ <parameter type-id='type-id-115' name='list'/>
+ <return type-id='type-id-6'/>
+ </function-decl>
+ <function-decl name='list_head' mangled-name='list_head' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_head'>
+ <parameter type-id='type-id-115' name='list'/>
+ <return type-id='type-id-6'/>
+ </function-decl>
+ <function-decl name='list_remove_tail' mangled-name='list_remove_tail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_remove_tail'>
+ <parameter type-id='type-id-115' name='list'/>
+ <return type-id='type-id-6'/>
+ </function-decl>
+ <function-decl name='list_remove_head' mangled-name='list_remove_head' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_remove_head'>
+ <parameter type-id='type-id-115' name='list'/>
+ <return type-id='type-id-6'/>
+ </function-decl>
+ <function-decl name='list_remove' mangled-name='list_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_remove'>
+ <parameter type-id='type-id-115' name='list'/>
+ <parameter type-id='type-id-6' name='object'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='list_insert_tail' mangled-name='list_insert_tail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_insert_tail'>
+ <parameter type-id='type-id-115' name='list'/>
+ <parameter type-id='type-id-6' name='object'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='list_insert_head' mangled-name='list_insert_head' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_insert_head'>
+ <parameter type-id='type-id-115' name='list'/>
+ <parameter type-id='type-id-6' name='object'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='list_insert_before' mangled-name='list_insert_before' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_insert_before'>
+ <parameter type-id='type-id-115' name='list'/>
+ <parameter type-id='type-id-6' name='object'/>
+ <parameter type-id='type-id-6' name='nobject'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='list_insert_after' mangled-name='list_insert_after' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_insert_after'>
+ <parameter type-id='type-id-115' name='list'/>
+ <parameter type-id='type-id-6' name='object'/>
+ <parameter type-id='type-id-6' name='nobject'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='list_destroy' mangled-name='list_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_destroy'>
+ <parameter type-id='type-id-115' name='list'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='list_create' mangled-name='list_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_create'>
+ <parameter type-id='type-id-115' name='list'/>
+ <parameter type-id='type-id-8' name='size'/>
+ <parameter type-id='type-id-8' name='offset'/>
+ <return type-id='type-id-5'/>
</function-decl>
- <function-decl name='list_remove_tail' mangled-name='list_remove_tail' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='141' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_remove_tail'>
- <parameter type-id='type-id-143' name='list' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='131' column='1'/>
- <return type-id='type-id-2'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='mkdirp.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
+ <typedef-decl name='__mode_t' type-id='type-id-31' id='type-id-118'/>
+ <typedef-decl name='mode_t' type-id='type-id-118' id='type-id-119'/>
+ <function-decl name='mkdirp' mangled-name='mkdirp' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='mkdirp'>
+ <parameter type-id='type-id-4' name='d'/>
+ <parameter type-id='type-id-119' name='mode'/>
+ <return type-id='type-id-22'/>
</function-decl>
- <function-decl name='list_head' mangled-name='list_head' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='151' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_head'>
- <parameter type-id='type-id-143' name='list' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='151' column='1'/>
- <return type-id='type-id-2'/>
+ <function-decl name='__mbstowcs_alias' mangled-name='mbstowcs' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
- <function-decl name='list_tail' mangled-name='list_tail' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='159' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_tail'>
- <parameter type-id='type-id-143' name='list' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='151' column='1'/>
- <return type-id='type-id-2'/>
+ <function-decl name='__wcstombs_alias' mangled-name='wcstombs' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
- <function-decl name='list_next' mangled-name='list_next' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='167' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_next'>
- <parameter type-id='type-id-143' name='list' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='167' column='1'/>
- <parameter type-id='type-id-2' name='object' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='167' column='1'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='list_prev' mangled-name='list_prev' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='178' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_prev'>
- <parameter type-id='type-id-143' name='list' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='167' column='1'/>
- <parameter type-id='type-id-2' name='object' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='167' column='1'/>
- <return type-id='type-id-2'/>
+ <function-decl name='strdup' mangled-name='strdup' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
- <function-decl name='list_move_tail' mangled-name='list_move_tail' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='192' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_move_tail'>
- <parameter type-id='type-id-143' name='dst' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='192' column='1'/>
- <parameter type-id='type-id-143' name='src' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='192' column='1'/>
- <return type-id='type-id-1'/>
+ <function-decl name='mkdir' mangled-name='mkdir' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
- <typedef-decl name='list_node_t' type-id='type-id-140' filepath='../../lib/libspl/include/sys/list.h' line='35' column='1' id='type-id-144'/>
- <pointer-type-def type-id='type-id-144' size-in-bits='64' id='type-id-145'/>
- <function-decl name='list_link_replace' mangled-name='list_link_replace' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='213' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_link_replace'>
- <parameter type-id='type-id-145' name='lold' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='213' column='1'/>
- <parameter type-id='type-id-145' name='lnew' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='213' column='1'/>
- <return type-id='type-id-1'/>
+ <function-decl name='access' mangled-name='access' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
- <function-decl name='list_link_init' mangled-name='list_link_init' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='226' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_link_init'>
- <parameter type-id='type-id-145' name='ln' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='226' column='1'/>
- <return type-id='type-id-1'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='page.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
+ <function-decl name='spl_pagesize' mangled-name='spl_pagesize' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='spl_pagesize'>
+ <return type-id='type-id-8'/>
</function-decl>
- <function-decl name='list_link_active' mangled-name='list_link_active' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='233' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_link_active'>
- <parameter type-id='type-id-145' name='ln' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='233' column='1'/>
- <return type-id='type-id-20'/>
+ <function-decl name='sysconf' mangled-name='sysconf' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
- <function-decl name='list_is_empty' mangled-name='list_is_empty' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='240' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_is_empty'>
- <parameter type-id='type-id-143' name='list' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/list.c' line='240' column='1'/>
- <return type-id='type-id-20'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='strlcat.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
+ <function-decl name='strlcat' mangled-name='strlcat' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='strlcat'>
+ <parameter type-id='type-id-2' name='dst'/>
+ <parameter type-id='type-id-4' name='src'/>
+ <parameter type-id='type-id-8' name='dstsize'/>
+ <return type-id='type-id-8'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='mkdirp.c' comp-dir-path='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl' language='LANG_C99'>
- <typedef-decl name='__mode_t' type-id='type-id-5' filepath='/usr/include/x86_64-linux-gnu/bits/types.h' line='148' column='1' id='type-id-146'/>
- <typedef-decl name='mode_t' type-id='type-id-146' filepath='/usr/include/x86_64-linux-gnu/sys/types.h' line='69' column='1' id='type-id-147'/>
- <function-decl name='mkdirp' mangled-name='mkdirp' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/mkdirp.c' line='50' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='mkdirp'>
- <parameter type-id='type-id-9' name='d' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/mkdirp.c' line='50' column='1'/>
- <parameter type-id='type-id-147' name='mode' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/mkdirp.c' line='50' column='1'/>
- <return type-id='type-id-20'/>
- </function-decl>
- <function-decl name='mbstowcs' filepath='/usr/include/stdlib.h' line='930' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-96'/>
- <parameter type-id='type-id-9'/>
- <parameter type-id='type-id-3'/>
- <return type-id='type-id-3'/>
- </function-decl>
- <qualified-type-def type-id='type-id-20' const='yes' id='type-id-148'/>
- <pointer-type-def type-id='type-id-148' size-in-bits='64' id='type-id-149'/>
- <function-decl name='wcstombs' filepath='/usr/include/stdlib.h' line='933' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-7'/>
- <parameter type-id='type-id-149'/>
- <parameter type-id='type-id-3'/>
- <return type-id='type-id-3'/>
- </function-decl>
- <function-decl name='mkdir' filepath='/usr/include/x86_64-linux-gnu/sys/stat.h' line='317' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-9'/>
- <parameter type-id='type-id-5'/>
- <return type-id='type-id-20'/>
- </function-decl>
- <function-decl name='access' filepath='/usr/include/unistd.h' line='287' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-9'/>
- <parameter type-id='type-id-20'/>
- <return type-id='type-id-20'/>
+ <abi-instr version='1.0' address-size='64' path='strlcpy.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
+ <function-decl name='strlcpy' mangled-name='strlcpy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='strlcpy'>
+ <parameter type-id='type-id-2' name='dst'/>
+ <parameter type-id='type-id-4' name='src'/>
+ <parameter type-id='type-id-8' name='len'/>
+ <return type-id='type-id-8'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='page.c' comp-dir-path='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl' language='LANG_C99'>
- <function-decl name='spl_pagesize' mangled-name='spl_pagesize' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/page.c' line='28' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='spl_pagesize'>
- <return type-id='type-id-4'/>
+ <abi-instr version='1.0' address-size='64' path='timestamp.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
+ <function-decl name='print_timestamp' mangled-name='print_timestamp' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='print_timestamp'>
+ <parameter type-id='type-id-62' name='timestamp_fmt'/>
+ <return type-id='type-id-5'/>
</function-decl>
- <function-decl name='sysconf' filepath='/usr/include/unistd.h' line='619' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-20'/>
- <return type-id='type-id-28'/>
+ <function-decl name='__builtin_puts' mangled-name='puts' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='strlcat.c' comp-dir-path='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl' language='LANG_C99'>
- <function-decl name='strlcat' mangled-name='strlcat' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/strlcat.c' line='39' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='strlcat'>
- <parameter type-id='type-id-7' name='dst' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/strlcat.c' line='39' column='1'/>
- <parameter type-id='type-id-9' name='src' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/strlcat.c' line='39' column='1'/>
- <parameter type-id='type-id-4' name='dstsize' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/strlcat.c' line='39' column='1'/>
- <return type-id='type-id-4'/>
+ <function-decl name='localtime' mangled-name='localtime' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='strftime' mangled-name='strftime' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='__printf_chk' mangled-name='__printf_chk' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='time' mangled-name='time' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='nl_langinfo' mangled-name='nl_langinfo' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='strlcpy.c' comp-dir-path='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl' language='LANG_C99'>
- <function-decl name='strlcpy' mangled-name='strlcpy' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/strlcpy.c' line='39' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='strlcpy'>
- <parameter type-id='type-id-7' name='dst' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/strlcpy.c' line='39' column='1'/>
- <parameter type-id='type-id-9' name='src' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/strlcpy.c' line='39' column='1'/>
- <parameter type-id='type-id-4' name='len' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/strlcpy.c' line='39' column='1'/>
- <return type-id='type-id-4'/>
+ <abi-instr version='1.0' address-size='64' path='os/linux/getexecname.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
+ <function-decl name='__readlink_alias' mangled-name='readlink' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='timestamp.c' comp-dir-path='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl' language='LANG_C99'>
- <function-decl name='print_timestamp' mangled-name='print_timestamp' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/timestamp.c' line='44' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='print_timestamp'>
- <parameter type-id='type-id-71' name='timestamp_fmt' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/timestamp.c' line='44' column='1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <pointer-type-def type-id='type-id-28' size-in-bits='64' id='type-id-150'/>
- <function-decl name='time' filepath='/usr/include/time.h' line='75' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-150'/>
- <return type-id='type-id-28'/>
- </function-decl>
- <function-decl name='nl_langinfo' filepath='/usr/include/langinfo.h' line='661' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-20'/>
+ <abi-instr version='1.0' address-size='64' path='os/linux/gethostid.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
+ <function-decl name='get_system_hostid' mangled-name='get_system_hostid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='get_system_hostid'>
<return type-id='type-id-7'/>
</function-decl>
- <class-decl name='tm' size-in-bits='448' is-struct='yes' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_tm.h' line='7' column='1' id='type-id-151'>
+ <function-decl name='__open_alias' mangled-name='open64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='__read_alias' mangled-name='read' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='close' mangled-name='close' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='getenv' mangled-name='getenv' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='strtoull' mangled-name='strtoull' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='fopen' mangled-name='fopen64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='fscanf' mangled-name='fscanf' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='fclose' mangled-name='fclose' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='os/linux/getmntany.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
+ <class-decl name='extmnttab' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-120'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='tm_sec' type-id='type-id-20' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_tm.h' line='9' column='1'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='tm_min' type-id='type-id-20' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_tm.h' line='10' column='1'/>
+ <var-decl name='mnt_special' type-id='type-id-2' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='tm_hour' type-id='type-id-20' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_tm.h' line='11' column='1'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='96'>
- <var-decl name='tm_mday' type-id='type-id-20' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_tm.h' line='12' column='1'/>
+ <var-decl name='mnt_mountp' type-id='type-id-2' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='tm_mon' type-id='type-id-20' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_tm.h' line='13' column='1'/>
+ <var-decl name='mnt_fstype' type-id='type-id-2' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='160'>
- <var-decl name='tm_year' type-id='type-id-20' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_tm.h' line='14' column='1'/>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='mnt_mntopts' type-id='type-id-2' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='mnt_major' type-id='type-id-62' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='288'>
+ <var-decl name='mnt_minor' type-id='type-id-62' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <pointer-type-def type-id='type-id-120' size-in-bits='64' id='type-id-121'/>
+ <class-decl name='stat64' size-in-bits='1152' is-struct='yes' visibility='default' id='type-id-122'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='st_dev' type-id='type-id-123' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='st_ino' type-id='type-id-124' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='st_nlink' type-id='type-id-125' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='tm_wday' type-id='type-id-20' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_tm.h' line='15' column='1'/>
+ <var-decl name='st_mode' type-id='type-id-118' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='224'>
- <var-decl name='tm_yday' type-id='type-id-20' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_tm.h' line='16' column='1'/>
+ <var-decl name='st_uid' type-id='type-id-126' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='tm_isdst' type-id='type-id-20' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_tm.h' line='17' column='1'/>
+ <var-decl name='st_gid' type-id='type-id-127' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='288'>
+ <var-decl name='__pad0' type-id='type-id-22' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='tm_gmtoff' type-id='type-id-28' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_tm.h' line='20' column='1'/>
+ <var-decl name='st_rdev' type-id='type-id-123' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='tm_zone' type-id='type-id-9' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_tm.h' line='21' column='1'/>
+ <var-decl name='st_size' type-id='type-id-128' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='448'>
+ <var-decl name='st_blksize' type-id='type-id-129' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='512'>
+ <var-decl name='st_blocks' type-id='type-id-130' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='576'>
+ <var-decl name='st_atim' type-id='type-id-131' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='704'>
+ <var-decl name='st_mtim' type-id='type-id-131' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='832'>
+ <var-decl name='st_ctim' type-id='type-id-131' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='960'>
+ <var-decl name='__glibc_reserved' type-id='type-id-132' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-151' size-in-bits='64' id='type-id-152'/>
- <qualified-type-def type-id='type-id-28' const='yes' id='type-id-153'/>
- <pointer-type-def type-id='type-id-153' size-in-bits='64' id='type-id-154'/>
- <function-decl name='localtime' filepath='/usr/include/time.h' line='123' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-154'/>
- <return type-id='type-id-152'/>
- </function-decl>
- <qualified-type-def type-id='type-id-151' const='yes' id='type-id-155'/>
- <pointer-type-def type-id='type-id-155' size-in-bits='64' id='type-id-156'/>
- <function-decl name='strftime' filepath='/usr/include/time.h' line='88' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-7'/>
- <parameter type-id='type-id-3'/>
- <parameter type-id='type-id-9'/>
- <parameter type-id='type-id-156'/>
- <return type-id='type-id-3'/>
- </function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='os/linux/getexecname.c' comp-dir-path='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl' language='LANG_C99'>
- <function-decl name='readlink' filepath='/usr/include/unistd.h' line='808' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-9'/>
- <parameter type-id='type-id-7'/>
- <parameter type-id='type-id-3'/>
- <return type-id='type-id-28'/>
- </function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='os/linux/gethostid.c' comp-dir-path='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl' language='LANG_C99'>
- <function-decl name='get_system_hostid' mangled-name='get_system_hostid' filepath='os/linux/gethostid.c' line='59' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='get_system_hostid'>
- <return type-id='type-id-3'/>
- </function-decl>
- <function-decl name='getenv' filepath='/usr/include/stdlib.h' line='631' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-9'/>
- <return type-id='type-id-7'/>
- </function-decl>
- <class-decl name='_IO_FILE' size-in-bits='1728' is-struct='yes' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h' line='49' column='1' id='type-id-157'>
+ <typedef-decl name='__dev_t' type-id='type-id-7' id='type-id-123'/>
+ <typedef-decl name='__ino64_t' type-id='type-id-7' id='type-id-124'/>
+ <typedef-decl name='__nlink_t' type-id='type-id-7' id='type-id-125'/>
+ <typedef-decl name='__uid_t' type-id='type-id-31' id='type-id-126'/>
+ <typedef-decl name='__gid_t' type-id='type-id-31' id='type-id-127'/>
+ <typedef-decl name='__off_t' type-id='type-id-30' id='type-id-128'/>
+ <typedef-decl name='__blksize_t' type-id='type-id-30' id='type-id-129'/>
+ <typedef-decl name='__blkcnt64_t' type-id='type-id-30' id='type-id-130'/>
+ <class-decl name='timespec' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-131'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='tv_sec' type-id='type-id-133' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='tv_nsec' type-id='type-id-134' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='__time_t' type-id='type-id-30' id='type-id-133'/>
+ <typedef-decl name='__syscall_slong_t' type-id='type-id-30' id='type-id-134'/>
+
+ <array-type-def dimensions='1' type-id='type-id-134' size-in-bits='192' id='type-id-132'>
+ <subrange length='3' type-id='type-id-7' id='type-id-59'/>
+
+ </array-type-def>
+ <pointer-type-def type-id='type-id-122' size-in-bits='64' id='type-id-135'/>
+ <function-decl name='getextmntent' mangled-name='getextmntent' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getextmntent'>
+ <parameter type-id='type-id-4' name='path'/>
+ <parameter type-id='type-id-121' name='entry'/>
+ <parameter type-id='type-id-135' name='statbuf'/>
+ <return type-id='type-id-22'/>
+ </function-decl>
+ <class-decl name='_IO_FILE' size-in-bits='1728' is-struct='yes' visibility='default' id='type-id-136'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='_flags' type-id='type-id-20' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h' line='51' column='1'/>
+ <var-decl name='_flags' type-id='type-id-22' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='_IO_read_ptr' type-id='type-id-7' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h' line='54' column='1'/>
+ <var-decl name='_IO_read_ptr' type-id='type-id-2' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='_IO_read_end' type-id='type-id-7' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h' line='55' column='1'/>
+ <var-decl name='_IO_read_end' type-id='type-id-2' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='_IO_read_base' type-id='type-id-7' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h' line='56' column='1'/>
+ <var-decl name='_IO_read_base' type-id='type-id-2' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='_IO_write_base' type-id='type-id-7' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h' line='57' column='1'/>
+ <var-decl name='_IO_write_base' type-id='type-id-2' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='_IO_write_ptr' type-id='type-id-7' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h' line='58' column='1'/>
+ <var-decl name='_IO_write_ptr' type-id='type-id-2' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='_IO_write_end' type-id='type-id-7' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h' line='59' column='1'/>
+ <var-decl name='_IO_write_end' type-id='type-id-2' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='_IO_buf_base' type-id='type-id-7' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h' line='60' column='1'/>
+ <var-decl name='_IO_buf_base' type-id='type-id-2' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='_IO_buf_end' type-id='type-id-7' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h' line='61' column='1'/>
+ <var-decl name='_IO_buf_end' type-id='type-id-2' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='576'>
- <var-decl name='_IO_save_base' type-id='type-id-7' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h' line='64' column='1'/>
+ <var-decl name='_IO_save_base' type-id='type-id-2' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='640'>
- <var-decl name='_IO_backup_base' type-id='type-id-7' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h' line='65' column='1'/>
+ <var-decl name='_IO_backup_base' type-id='type-id-2' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='704'>
- <var-decl name='_IO_save_end' type-id='type-id-7' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h' line='66' column='1'/>
+ <var-decl name='_IO_save_end' type-id='type-id-2' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='768'>
- <var-decl name='_markers' type-id='type-id-158' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h' line='68' column='1'/>
+ <var-decl name='_markers' type-id='type-id-137' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='832'>
- <var-decl name='_chain' type-id='type-id-159' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h' line='70' column='1'/>
+ <var-decl name='_chain' type-id='type-id-138' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='896'>
- <var-decl name='_fileno' type-id='type-id-20' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h' line='72' column='1'/>
+ <var-decl name='_fileno' type-id='type-id-22' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='928'>
- <var-decl name='_flags2' type-id='type-id-20' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h' line='73' column='1'/>
+ <var-decl name='_flags2' type-id='type-id-22' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='960'>
- <var-decl name='_old_offset' type-id='type-id-160' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h' line='74' column='1'/>
+ <var-decl name='_old_offset' type-id='type-id-128' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1024'>
- <var-decl name='_cur_column' type-id='type-id-111' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h' line='77' column='1'/>
+ <var-decl name='_cur_column' type-id='type-id-98' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1040'>
- <var-decl name='_vtable_offset' type-id='type-id-49' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h' line='78' column='1'/>
+ <var-decl name='_vtable_offset' type-id='type-id-48' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1048'>
- <var-decl name='_shortbuf' type-id='type-id-161' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h' line='79' column='1'/>
+ <var-decl name='_shortbuf' type-id='type-id-139' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1152'>
- <var-decl name='_offset' type-id='type-id-162' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h' line='89' column='1'/>
+ <var-decl name='_offset' type-id='type-id-140' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1216'>
- <var-decl name='_codecvt' type-id='type-id-163' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h' line='91' column='1'/>
+ <var-decl name='__pad1' type-id='type-id-6' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1280'>
- <var-decl name='_wide_data' type-id='type-id-164' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h' line='92' column='1'/>
+ <var-decl name='__pad2' type-id='type-id-6' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1344'>
- <var-decl name='_freeres_list' type-id='type-id-159' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h' line='93' column='1'/>
+ <var-decl name='__pad3' type-id='type-id-6' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1408'>
- <var-decl name='_freeres_buf' type-id='type-id-2' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h' line='94' column='1'/>
+ <var-decl name='__pad4' type-id='type-id-6' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1472'>
- <var-decl name='__pad5' type-id='type-id-4' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h' line='95' column='1'/>
+ <var-decl name='__pad5' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1536'>
- <var-decl name='_mode' type-id='type-id-20' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h' line='96' column='1'/>
+ <var-decl name='_mode' type-id='type-id-22' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1568'>
- <var-decl name='_unused2' type-id='type-id-165' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_FILE.h' line='98' column='1'/>
+ <var-decl name='_unused2' type-id='type-id-141' visibility='default'/>
</data-member>
</class-decl>
- <class-decl name='_IO_marker' is-struct='yes' visibility='default' is-declaration-only='yes' id='type-id-166'/>
- <pointer-type-def type-id='type-id-166' size-in-bits='64' id='type-id-158'/>
- <pointer-type-def type-id='type-id-157' size-in-bits='64' id='type-id-159'/>
- <typedef-decl name='__off_t' type-id='type-id-28' filepath='/usr/include/x86_64-linux-gnu/bits/types.h' line='150' column='1' id='type-id-160'/>
-
- <array-type-def dimensions='1' type-id='type-id-6' size-in-bits='8' id='type-id-161'>
- <subrange length='1' type-id='type-id-18' id='type-id-167'/>
-
- </array-type-def>
- <typedef-decl name='__off64_t' type-id='type-id-28' filepath='/usr/include/x86_64-linux-gnu/bits/types.h' line='151' column='1' id='type-id-162'/>
- <class-decl name='_IO_codecvt' is-struct='yes' visibility='default' is-declaration-only='yes' id='type-id-168'/>
- <pointer-type-def type-id='type-id-168' size-in-bits='64' id='type-id-163'/>
- <class-decl name='_IO_wide_data' is-struct='yes' visibility='default' is-declaration-only='yes' id='type-id-169'/>
- <pointer-type-def type-id='type-id-169' size-in-bits='64' id='type-id-164'/>
-
- <array-type-def dimensions='1' type-id='type-id-6' size-in-bits='160' id='type-id-165'>
- <subrange length='20' type-id='type-id-18' id='type-id-170'/>
-
- </array-type-def>
- <function-decl name='fclose' filepath='/usr/include/stdio.h' line='213' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-159'/>
- <return type-id='type-id-20'/>
- </function-decl>
- <function-decl name='open' mangled-name='open64' filepath='/usr/include/fcntl.h' line='171' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-9'/>
- <parameter type-id='type-id-20'/>
- <parameter is-variadic='yes'/>
- <return type-id='type-id-20'/>
- </function-decl>
- <function-decl name='read' filepath='/usr/include/unistd.h' line='360' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-20'/>
- <parameter type-id='type-id-2'/>
- <parameter type-id='type-id-3'/>
- <return type-id='type-id-28'/>
- </function-decl>
- <function-decl name='close' filepath='/usr/include/unistd.h' line='353' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-20'/>
- <return type-id='type-id-20'/>
- </function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='os/linux/getmntany.c' comp-dir-path='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl' language='LANG_C99'>
- <typedef-decl name='FILE' type-id='type-id-157' filepath='/usr/include/x86_64-linux-gnu/bits/types/FILE.h' line='7' column='1' id='type-id-171'/>
- <pointer-type-def type-id='type-id-171' size-in-bits='64' id='type-id-172'/>
- <class-decl name='mnttab' size-in-bits='256' is-struct='yes' visibility='default' filepath='../../lib/libspl/include/os/linux/sys/mnttab.h' line='49' column='1' id='type-id-173'>
+ <class-decl name='_IO_marker' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-142'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='mnt_special' type-id='type-id-7' visibility='default' filepath='../../lib/libspl/include/os/linux/sys/mnttab.h' line='50' column='1'/>
+ <var-decl name='_next' type-id='type-id-137' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='mnt_mountp' type-id='type-id-7' visibility='default' filepath='../../lib/libspl/include/os/linux/sys/mnttab.h' line='51' column='1'/>
+ <var-decl name='_sbuf' type-id='type-id-138' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='mnt_fstype' type-id='type-id-7' visibility='default' filepath='../../lib/libspl/include/os/linux/sys/mnttab.h' line='52' column='1'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='mnt_mntopts' type-id='type-id-7' visibility='default' filepath='../../lib/libspl/include/os/linux/sys/mnttab.h' line='53' column='1'/>
+ <var-decl name='_pos' type-id='type-id-22' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-173' size-in-bits='64' id='type-id-174'/>
- <function-decl name='getmntany' mangled-name='getmntany' filepath='os/linux/getmntany.c' line='51' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getmntany'>
- <parameter type-id='type-id-172' name='fp' filepath='os/linux/getmntany.c' line='51' column='1'/>
- <parameter type-id='type-id-174' name='mgetp' filepath='os/linux/getmntany.c' line='51' column='1'/>
- <parameter type-id='type-id-174' name='mrefp' filepath='os/linux/getmntany.c' line='51' column='1'/>
- <return type-id='type-id-20'/>
- </function-decl>
- <class-decl name='mntent' size-in-bits='320' is-struct='yes' visibility='default' filepath='/usr/include/mntent.h' line='51' column='1' id='type-id-175'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='mnt_fsname' type-id='type-id-7' visibility='default' filepath='/usr/include/mntent.h' line='53' column='1'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='mnt_dir' type-id='type-id-7' visibility='default' filepath='/usr/include/mntent.h' line='54' column='1'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='mnt_type' type-id='type-id-7' visibility='default' filepath='/usr/include/mntent.h' line='55' column='1'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='mnt_opts' type-id='type-id-7' visibility='default' filepath='/usr/include/mntent.h' line='56' column='1'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='mnt_freq' type-id='type-id-20' visibility='default' filepath='/usr/include/mntent.h' line='57' column='1'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='288'>
- <var-decl name='mnt_passno' type-id='type-id-20' visibility='default' filepath='/usr/include/mntent.h' line='58' column='1'/>
- </data-member>
- </class-decl>
- <pointer-type-def type-id='type-id-175' size-in-bits='64' id='type-id-176'/>
- <function-decl name='getmntent_r' filepath='/usr/include/mntent.h' line='73' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-159'/>
- <parameter type-id='type-id-176'/>
- <parameter type-id='type-id-7'/>
- <parameter type-id='type-id-20'/>
- <return type-id='type-id-176'/>
- </function-decl>
- <function-decl name='feof' filepath='/usr/include/stdio.h' line='765' column='1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-159'/>
- <return type-id='type-id-20'/>
- </function-decl>
- <function-decl name='_sol_getmntent' mangled-name='_sol_getmntent' filepath='os/linux/getmntany.c' line='64' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='_sol_getmntent'>
- <parameter type-id='type-id-172' name='fp' filepath='os/linux/getmntany.c' line='64' column='1'/>
- <parameter type-id='type-id-174' name='mgetp' filepath='os/linux/getmntany.c' line='64' column='1'/>
- <return type-id='type-id-20'/>
- </function-decl>
- <class-decl name='extmnttab' size-in-bits='320' is-struct='yes' visibility='default' filepath='../../lib/libspl/include/os/linux/sys/mnttab.h' line='62' column='1' id='type-id-177'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='mnt_special' type-id='type-id-7' visibility='default' filepath='../../lib/libspl/include/os/linux/sys/mnttab.h' line='63' column='1'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='mnt_mountp' type-id='type-id-7' visibility='default' filepath='../../lib/libspl/include/os/linux/sys/mnttab.h' line='64' column='1'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='mnt_fstype' type-id='type-id-7' visibility='default' filepath='../../lib/libspl/include/os/linux/sys/mnttab.h' line='65' column='1'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='mnt_mntopts' type-id='type-id-7' visibility='default' filepath='../../lib/libspl/include/os/linux/sys/mnttab.h' line='66' column='1'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='mnt_major' type-id='type-id-71' visibility='default' filepath='../../lib/libspl/include/os/linux/sys/mnttab.h' line='67' column='1'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='288'>
- <var-decl name='mnt_minor' type-id='type-id-71' visibility='default' filepath='../../lib/libspl/include/os/linux/sys/mnttab.h' line='68' column='1'/>
- </data-member>
- </class-decl>
- <pointer-type-def type-id='type-id-177' size-in-bits='64' id='type-id-178'/>
- <class-decl name='stat64' size-in-bits='1152' is-struct='yes' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/stat.h' line='119' column='1' id='type-id-179'>
+ <pointer-type-def type-id='type-id-142' size-in-bits='64' id='type-id-137'/>
+ <pointer-type-def type-id='type-id-136' size-in-bits='64' id='type-id-138'/>
+
+ <array-type-def dimensions='1' type-id='type-id-1' size-in-bits='8' id='type-id-139'>
+ <subrange length='1' type-id='type-id-7' id='type-id-143'/>
+
+ </array-type-def>
+ <typedef-decl name='__off64_t' type-id='type-id-30' id='type-id-140'/>
+
+ <array-type-def dimensions='1' type-id='type-id-1' size-in-bits='160' id='type-id-141'>
+ <subrange length='20' type-id='type-id-7' id='type-id-144'/>
+
+ </array-type-def>
+ <typedef-decl name='FILE' type-id='type-id-136' id='type-id-145'/>
+ <pointer-type-def type-id='type-id-145' size-in-bits='64' id='type-id-146'/>
+ <class-decl name='mnttab' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-147'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='st_dev' type-id='type-id-180' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/stat.h' line='121' column='1'/>
+ <var-decl name='mnt_special' type-id='type-id-2' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='st_ino' type-id='type-id-181' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/stat.h' line='123' column='1'/>
+ <var-decl name='mnt_mountp' type-id='type-id-2' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='st_nlink' type-id='type-id-182' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/stat.h' line='124' column='1'/>
+ <var-decl name='mnt_fstype' type-id='type-id-2' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='st_mode' type-id='type-id-146' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/stat.h' line='125' column='1'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='224'>
- <var-decl name='st_uid' type-id='type-id-183' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/stat.h' line='132' column='1'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='st_gid' type-id='type-id-184' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/stat.h' line='133' column='1'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='288'>
- <var-decl name='__pad0' type-id='type-id-20' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/stat.h' line='135' column='1'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='st_rdev' type-id='type-id-180' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/stat.h' line='136' column='1'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='st_size' type-id='type-id-160' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/stat.h' line='137' column='1'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='st_blksize' type-id='type-id-185' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/stat.h' line='143' column='1'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='st_blocks' type-id='type-id-186' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/stat.h' line='144' column='1'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='576'>
- <var-decl name='st_atim' type-id='type-id-187' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/stat.h' line='152' column='1'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='704'>
- <var-decl name='st_mtim' type-id='type-id-187' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/stat.h' line='153' column='1'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='832'>
- <var-decl name='st_ctim' type-id='type-id-187' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/stat.h' line='154' column='1'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='960'>
- <var-decl name='__glibc_reserved' type-id='type-id-188' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/stat.h' line='164' column='1'/>
+ <var-decl name='mnt_mntopts' type-id='type-id-2' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='__dev_t' type-id='type-id-3' filepath='/usr/include/x86_64-linux-gnu/bits/types.h' line='143' column='1' id='type-id-180'/>
- <typedef-decl name='__ino64_t' type-id='type-id-3' filepath='/usr/include/x86_64-linux-gnu/bits/types.h' line='147' column='1' id='type-id-181'/>
- <typedef-decl name='__nlink_t' type-id='type-id-3' filepath='/usr/include/x86_64-linux-gnu/bits/types.h' line='149' column='1' id='type-id-182'/>
- <typedef-decl name='__uid_t' type-id='type-id-5' filepath='/usr/include/x86_64-linux-gnu/bits/types.h' line='144' column='1' id='type-id-183'/>
- <typedef-decl name='__gid_t' type-id='type-id-5' filepath='/usr/include/x86_64-linux-gnu/bits/types.h' line='145' column='1' id='type-id-184'/>
- <typedef-decl name='__blksize_t' type-id='type-id-28' filepath='/usr/include/x86_64-linux-gnu/bits/types.h' line='172' column='1' id='type-id-185'/>
- <typedef-decl name='__blkcnt64_t' type-id='type-id-28' filepath='/usr/include/x86_64-linux-gnu/bits/types.h' line='178' column='1' id='type-id-186'/>
- <class-decl name='timespec' size-in-bits='128' is-struct='yes' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_timespec.h' line='9' column='1' id='type-id-187'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='tv_sec' type-id='type-id-189' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_timespec.h' line='11' column='1'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='tv_nsec' type-id='type-id-190' visibility='default' filepath='/usr/include/x86_64-linux-gnu/bits/types/struct_timespec.h' line='12' column='1'/>
- </data-member>
- </class-decl>
- <typedef-decl name='__time_t' type-id='type-id-28' filepath='/usr/include/x86_64-linux-gnu/bits/types.h' line='158' column='1' id='type-id-189'/>
- <typedef-decl name='__syscall_slong_t' type-id='type-id-28' filepath='/usr/include/x86_64-linux-gnu/bits/types.h' line='194' column='1' id='type-id-190'/>
-
- <array-type-def dimensions='1' type-id='type-id-190' size-in-bits='192' id='type-id-188'>
- <subrange length='3' type-id='type-id-18' id='type-id-61'/>
-
- </array-type-def>
- <pointer-type-def type-id='type-id-179' size-in-bits='64' id='type-id-191'/>
- <function-decl name='getextmntent' mangled-name='getextmntent' filepath='os/linux/getmntany.c' line='106' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getextmntent'>
- <parameter type-id='type-id-9' name='path' filepath='os/linux/getmntany.c' line='106' column='1'/>
- <parameter type-id='type-id-178' name='entry' filepath='os/linux/getmntany.c' line='106' column='1'/>
- <parameter type-id='type-id-191' name='statbuf' filepath='os/linux/getmntany.c' line='106' column='1'/>
- <return type-id='type-id-20'/>
+ <pointer-type-def type-id='type-id-147' size-in-bits='64' id='type-id-148'/>
+ <function-decl name='getmntany' mangled-name='getmntany' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getmntany'>
+ <parameter type-id='type-id-146' name='fp'/>
+ <parameter type-id='type-id-148' name='mgetp'/>
+ <parameter type-id='type-id-148' name='mrefp'/>
+ <return type-id='type-id-22'/>
+ </function-decl>
+ <function-decl name='_sol_getmntent' mangled-name='_sol_getmntent' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='_sol_getmntent'>
+ <parameter type-id='type-id-146' name='fp'/>
+ <parameter type-id='type-id-148' name='mgetp'/>
+ <return type-id='type-id-22'/>
+ </function-decl>
+ <function-decl name='__xstat64' mangled-name='__xstat64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='__builtin_fwrite' mangled-name='fwrite' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='feof' mangled-name='feof' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='getmntent_r' mangled-name='getmntent_r' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='os/linux/zone.c' comp-dir-path='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl' language='LANG_C99'>
- <typedef-decl name='zoneid_t' type-id='type-id-20' filepath='../../lib/libspl/include/sys/types.h' line='47' column='1' id='type-id-192'/>
- <function-decl name='getzoneid' mangled-name='getzoneid' filepath='os/linux/zone.c' line='29' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getzoneid'>
- <return type-id='type-id-192'/>
+ <abi-instr version='1.0' address-size='64' path='os/linux/zone.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
+ <typedef-decl name='zoneid_t' type-id='type-id-22' id='type-id-149'/>
+ <function-decl name='getzoneid' mangled-name='getzoneid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getzoneid'>
+ <return type-id='type-id-149'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='assert.c' comp-dir-path='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl' language='LANG_C99'>
- <var-decl name='libspl_assert_ok' type-id='type-id-20' mangled-name='libspl_assert_ok' visibility='default' filepath='/mnt/filling/store/nabijaczleweli/code/zfs/lib/libspl/assert.c' line='28' column='1' elf-symbol-id='libspl_assert_ok'/>
+ <abi-instr version='1.0' address-size='64' path='assert.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
+ <var-decl name='libspl_assert_ok' type-id='type-id-22' mangled-name='libspl_assert_ok' visibility='default' elf-symbol-id='libspl_assert_ok'/>
+ <function-decl name='libspl_assertf' mangled-name='libspl_assertf' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libspl_assertf'>
+ <parameter type-id='type-id-4' name='file'/>
+ <parameter type-id='type-id-4' name='func'/>
+ <parameter type-id='type-id-22' name='line'/>
+ <parameter type-id='type-id-4' name='format'/>
+ <parameter is-variadic='yes'/>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ <function-decl name='__builtin_fputc' mangled-name='fputc' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
+ </function-decl>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='pthread_atfork.c' comp-dir-path='/build/glibc-S9d2JN/glibc-2.27/nptl' language='LANG_C99'>
+ <function-decl name='__register_atfork' mangled-name='__register_atfork' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-5'/>
+ </function-decl>
</abi-instr>
</abi-corpus>
diff --git a/sys/contrib/openzfs/lib/libuutil/uu_pname.c b/sys/contrib/openzfs/lib/libuutil/uu_pname.c
index a6a0f22661e5..28c4a8a9cf7b 100644
--- a/sys/contrib/openzfs/lib/libuutil/uu_pname.c
+++ b/sys/contrib/openzfs/lib/libuutil/uu_pname.c
@@ -1,207 +1,201 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include "libuutil_common.h"
#include <libintl.h>
#include <limits.h>
#include <string.h>
#include <stdlib.h>
#include <stdarg.h>
#include <stdio.h>
#include <errno.h>
#include <wchar.h>
#include <unistd.h>
-static const char PNAME_FMT[] = "%s: ";
-static const char ERRNO_FMT[] = ": %s\n";
-
static const char *pname;
static void
uu_die_internal(int status, const char *format, va_list alist) __NORETURN;
int uu_exit_ok_value = EXIT_SUCCESS;
int uu_exit_fatal_value = EXIT_FAILURE;
int uu_exit_usage_value = 2;
int *
uu_exit_ok(void)
{
return (&uu_exit_ok_value);
}
int *
uu_exit_fatal(void)
{
return (&uu_exit_fatal_value);
}
int *
uu_exit_usage(void)
{
return (&uu_exit_usage_value);
}
void
uu_alt_exit(int profile)
{
switch (profile) {
case UU_PROFILE_DEFAULT:
uu_exit_ok_value = EXIT_SUCCESS;
uu_exit_fatal_value = EXIT_FAILURE;
uu_exit_usage_value = 2;
break;
case UU_PROFILE_LAUNCHER:
uu_exit_ok_value = EXIT_SUCCESS;
uu_exit_fatal_value = 124;
uu_exit_usage_value = 125;
break;
}
}
-static void
+static __attribute__((format(printf, 2, 0))) void
uu_warn_internal(int err, const char *format, va_list alist)
{
if (pname != NULL)
- (void) fprintf(stderr, PNAME_FMT, pname);
+ (void) fprintf(stderr, "%s: ", pname);
(void) vfprintf(stderr, format, alist);
if (strrchr(format, '\n') == NULL)
- (void) fprintf(stderr, ERRNO_FMT, strerror(err));
+ (void) fprintf(stderr, ": %s\n", strerror(err));
}
void
uu_vwarn(const char *format, va_list alist)
{
uu_warn_internal(errno, format, alist);
}
-/*PRINTFLIKE1*/
void
uu_warn(const char *format, ...)
{
va_list alist;
va_start(alist, format);
uu_warn_internal(errno, format, alist);
va_end(alist);
}
-static void
+static __attribute__((format(printf, 2, 0))) __NORETURN void
uu_die_internal(int status, const char *format, va_list alist)
{
uu_warn_internal(errno, format, alist);
#ifdef DEBUG
{
char *cp;
if (!issetugid()) {
cp = getenv("UU_DIE_ABORTS");
if (cp != NULL && *cp != '\0')
abort();
}
}
#endif
exit(status);
}
void
uu_vdie(const char *format, va_list alist)
{
uu_die_internal(UU_EXIT_FATAL, format, alist);
}
-/*PRINTFLIKE1*/
void
uu_die(const char *format, ...)
{
va_list alist;
va_start(alist, format);
uu_die_internal(UU_EXIT_FATAL, format, alist);
va_end(alist);
}
void
uu_vxdie(int status, const char *format, va_list alist)
{
uu_die_internal(status, format, alist);
}
-/*PRINTFLIKE2*/
void
uu_xdie(int status, const char *format, ...)
{
va_list alist;
va_start(alist, format);
uu_die_internal(status, format, alist);
va_end(alist);
}
const char *
uu_setpname(char *arg0)
{
/*
* Having a NULL argv[0], while uncommon, is possible. It
* makes more sense to handle this event in uu_setpname rather
* than in each of its consumers.
*/
if (arg0 == NULL) {
pname = getexecname();
if (pname == NULL)
pname = "unknown_command";
return (pname);
}
/*
* Guard against '/' at end of command invocation.
*/
for (;;) {
char *p = strrchr(arg0, '/');
if (p == NULL) {
pname = arg0;
break;
} else {
if (*(p + 1) == '\0') {
*p = '\0';
continue;
}
pname = p + 1;
break;
}
}
return (pname);
}
const char *
uu_getpname(void)
{
return (pname);
}
diff --git a/sys/contrib/openzfs/lib/libzfs/libzfs.abi b/sys/contrib/openzfs/lib/libzfs/libzfs.abi
index 9a1d95d96ce9..bb4bde2473bb 100644
--- a/sys/contrib/openzfs/lib/libzfs/libzfs.abi
+++ b/sys/contrib/openzfs/lib/libzfs/libzfs.abi
@@ -1,7767 +1,5415 @@
-<abi-corpus path='libzfs.so' architecture='elf-amd-x86_64' soname='libzfs.so.4'>
+<abi-corpus architecture='elf-amd-x86_64' soname='libzfs.so.4'>
<elf-needed>
<dependency name='libzfs_core.so.3'/>
<dependency name='libnvpair.so.3'/>
<dependency name='libuutil.so.3'/>
<dependency name='libm.so.6'/>
<dependency name='libcrypto.so.1.1'/>
<dependency name='libz.so.1'/>
- <dependency name='libdl.so.2'/>
<dependency name='libpthread.so.0'/>
<dependency name='libc.so.6'/>
</elf-needed>
<elf-function-symbols>
+ <elf-symbol name='_fini' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='bookmark_namecheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='cityhash4' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='color_end' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='color_start' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='dataset_namecheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='dataset_nestcheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='entity_namecheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_2_byteswap' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_2_incremental_byteswap' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_2_incremental_native' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_2_native' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_byteswap' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_fini' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_impl_set' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_incremental_byteswap' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_incremental_native' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_native' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_native_varsize' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='get_dataset_depth' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getprop_uint64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='is_mounted' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_add_handle' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_envvar_is_set' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_errno' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_error_action' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_error_description' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_error_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_fini' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_free_str_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_mnttab_add' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_mnttab_cache' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_mnttab_find' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_mnttab_fini' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_mnttab_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_mnttab_remove' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_print_on_error' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_run_process' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_run_process_get_stdout' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_run_process_get_stdout_nopath' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='mountpoint_namecheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='permset_namecheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='pool_namecheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='printf_color' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='sa_commit_shares' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='sa_disable_share' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='sa_enable_share' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='sa_errorstr' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='sa_is_shared' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='sa_validate_shareopts' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='snapshot_namecheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfeature_depends_on' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfeature_is_supported' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfeature_is_valid_guid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfeature_lookup_guid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfeature_lookup_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_adjust_mount_options' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_allocatable_devs' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_bookmark_exists' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_clone' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_close' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_commit_all_shares' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_commit_nfs_shares' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_commit_shares' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_commit_smb_shares' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_component_namecheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_create_ancestors' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_crypto_attempt_load_keys' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_crypto_clone_check' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_crypto_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_crypto_get_encryption_root' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_crypto_load_key' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_crypto_rewrap' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_crypto_unload_key' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_dataset_exists' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_dataset_name_hidden' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_deleg_canonicalize_perm' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_deleg_verify_nvlist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_deleg_whokey' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_destroy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_destroy_snaps' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_destroy_snaps_nvl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_expand_proplist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_foreach_mountpoint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_all_props' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_clones_nvl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_fsacl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_handle' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_holds' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_pool_handle' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_pool_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_recvd_props' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_type' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_underlying_type' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_user_props' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_handle_dup' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_hold' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_hold_nvl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_ioctl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_is_mounted' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_is_shared' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_is_shared_nfs' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_is_shared_smb' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_bookmarks' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_children' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_dependents' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_filesystems' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_mounted' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_root' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_snapshots' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_snapshots_sorted' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_snapspec' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_mod_supported' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_mount' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_mount_at' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_mount_delegation_check' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_name_to_prop' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_name_valid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_nicestrtonum' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_open' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_parent_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_parse_mount_options' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_path_to_zhandle' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_promote' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_align_right' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_column_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_default_numeric' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_default_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_delegatable' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_encryption_key_param' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_int' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_numeric' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_recvd' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_table' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_type' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_userquota' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_userquota_int' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_written' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_written_int' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_index_to_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_inherit' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_inheritable' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_is_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_random_value' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_readonly' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_set' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_set_list' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_setonce' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_string_to_index' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_to_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_user' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_userquota' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_valid_for_type' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_valid_keylocation' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_values' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_visible' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_written' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prune_proplist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_receive' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_refresh_properties' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_release' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_rename' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_rollback' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_save_arguments' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_send' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_send_one' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_send_progress' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_send_resume' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_send_resume_token_to_nvlist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_send_saved' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_set_fsacl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_share' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_share_nfs' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_share_smb' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_shareall' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_show_diffs' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_smb_acl_add' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_smb_acl_purge' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_smb_acl_remove' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_smb_acl_rename' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_snapshot' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_snapshot_nvl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_spa_version' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_spa_version_map' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_special_devs' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_standard_error' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_type_to_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_unmount' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_unmountall' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_unshare' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_unshare_nfs' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_unshare_smb' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_unshareall' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_unshareall_bypath' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_unshareall_bytype' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_unshareall_nfs' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_unshareall_smb' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_userspace' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_valid_proplist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_version_kernel' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_version_print' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_version_userland' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_wait_status' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_zpl_version_map' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_add' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_checkpoint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_clear' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_clear_label' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_close' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_destroy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_disable_datasets' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_discard_checkpoint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_enable_datasets' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_events_clear' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_events_next' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_events_seek' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_expand_proplist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_explain_recover' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_export' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_export_force' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_feature_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_find_vdev' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_find_vdev_by_physpath' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_free_handles' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_bootenv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_config' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_errlog' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_features' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_handle' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_history' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_load_policy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_physpath' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_prop' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_prop_int' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_state' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_state_str' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_status' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_import' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_import_props' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_import_status' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_in_use' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_initialize' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_initialize_wait' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_is_draid_spare' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_iter' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_label_disk' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_load_compat' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_log_history' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_name_to_prop' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_obj_to_path' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_obj_to_path_ds' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_open' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_open_canfail' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_pool_state_to_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_print_unsup_feat' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_align_right' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_column_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_default_numeric' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_default_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_feature' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_get_feature' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_get_table' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_get_type' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_index_to_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_random_value' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_readonly' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_setonce' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_string_to_index' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_to_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_unsupported' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_values' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_props_refresh' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_refresh_stats' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_reguid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_reopen_one' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_scan' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_set_bootenv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_set_prop' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_skip_pool' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_state_to_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_sync_one' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_trim' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_upgrade' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_attach' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_clear' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_degrade' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_detach' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_fault' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_indirect_size' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_offline' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_online' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_path_to_guid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_remove' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_remove_cancel' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_split' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_wait' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_wait_status' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_free_list' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_get_list' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_index_to_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_iter' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_iter_common' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_name_to_prop' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_print_one_property' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_random_value' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_register_hidden' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_register_impl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_register_index' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_register_number' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_register_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_string_to_index' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_valid_for_type' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_values' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_width' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zvol_volsize_to_reservation' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
</elf-function-symbols>
<elf-variable-symbols>
<elf-symbol name='fletcher_4_abd_ops' size='24' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_avx2_ops' size='64' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_avx512bw_ops' size='64' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_avx512f_ops' size='64' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_sse2_ops' size='64' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_ssse3_ops' size='64' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_superscalar4_ops' size='64' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_superscalar_ops' size='64' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_config_ops' size='16' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='spa_feature_table' size='1904' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfeature_checks_disable' size='4' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_deleg_perm_tab' size='512' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_history_event_names' size='328' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_max_dataset_nesting' size='4' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_userquota_prop_prefixes' size='96' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
</elf-variable-symbols>
- <abi-instr version='1.0' address-size='64' path='libzfs_changelist.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libzfs' language='LANG_C99'>
- <class-decl name='uu_avl_walk' is-struct='yes' visibility='default' is-declaration-only='yes' id='type-id-1'/>
- <pointer-type-def type-id='type-id-1' size-in-bits='64' id='type-id-2'/>
- <class-decl name='uu_avl' is-struct='yes' visibility='default' is-declaration-only='yes' id='type-id-3'/>
- <pointer-type-def type-id='type-id-3' size-in-bits='64' id='type-id-4'/>
- <type-decl name='unsigned int' size-in-bits='32' id='type-id-5'/>
- <function-decl name='uu_avl_walk_start' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-4'/>
- <parameter type-id='type-id-5'/>
- <return type-id='type-id-2'/>
+ <abi-instr version='1.0' address-size='64' path='libzfs_changelist.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
+ <type-decl name='void' id='type-id-1'/>
+ <function-decl name='zfs_alloc' mangled-name='zfs_alloc' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <type-decl name='void' id='type-id-6'/>
- <pointer-type-def type-id='type-id-6' size-in-bits='64' id='type-id-7'/>
- <function-decl name='uu_avl_walk_next' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-2'/>
- <return type-id='type-id-7'/>
+ <function-decl name='uu_avl_pool_create' mangled-name='uu_avl_pool_create' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <type-decl name='int' size-in-bits='32' id='type-id-8'/>
- <function-decl name='getzoneid' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-8'/>
+ <function-decl name='uu_avl_create' mangled-name='uu_avl_create' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <class-decl name='zfs_handle' size-in-bits='4928' is-struct='yes' visibility='default' id='type-id-9'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='zfs_hdl' type-id='type-id-10' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='zpool_hdl' type-id='type-id-11' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='zfs_name' type-id='type-id-12' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='2176'>
- <var-decl name='zfs_type' type-id='type-id-13' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='2208'>
- <var-decl name='zfs_head_type' type-id='type-id-13' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='2240'>
- <var-decl name='zfs_dmustats' type-id='type-id-14' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='4544'>
- <var-decl name='zfs_props' type-id='type-id-15' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='4608'>
- <var-decl name='zfs_user_props' type-id='type-id-15' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='4672'>
- <var-decl name='zfs_recvd_props' type-id='type-id-15' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='4736'>
- <var-decl name='zfs_mntcheck' type-id='type-id-16' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='4800'>
- <var-decl name='zfs_mntopts' type-id='type-id-17' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='4864'>
- <var-decl name='zfs_props_table' type-id='type-id-18' visibility='default'/>
- </data-member>
- </class-decl>
- <class-decl name='libzfs_handle' size-in-bits='18240' is-struct='yes' visibility='default' id='type-id-19'>
+ <function-decl name='zfs_prop_get' mangled-name='zfs_prop_get' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zfs_iter_dependents' mangled-name='zfs_iter_dependents' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zfs_get_name' mangled-name='zfs_get_name' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zfs_open' mangled-name='zfs_open' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zfs_is_shared' mangled-name='zfs_is_shared' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zfs_prop_get_int' mangled-name='zfs_prop_get_int' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='uu_avl_node_init' mangled-name='uu_avl_node_init' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='uu_avl_find' mangled-name='uu_avl_find' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='free' mangled-name='free' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zfs_close' mangled-name='zfs_close' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zfs_iter_children' mangled-name='zfs_iter_children' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zfs_iter_mounted' mangled-name='zfs_iter_mounted' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zfs_is_mounted' mangled-name='zfs_is_mounted' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='uu_avl_insert' mangled-name='uu_avl_insert' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zfs_error' mangled-name='zfs_error' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='__stack_chk_fail' mangled-name='__stack_chk_fail' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='strcmp' mangled-name='strcmp' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zfs_get_handle' mangled-name='zfs_get_handle' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='getzoneid' mangled-name='getzoneid' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='uu_avl_walk_start' mangled-name='uu_avl_walk_start' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='uu_avl_remove' mangled-name='uu_avl_remove' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='uu_avl_walk_next' mangled-name='uu_avl_walk_next' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='uu_avl_walk_end' mangled-name='uu_avl_walk_end' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='uu_avl_destroy' mangled-name='uu_avl_destroy' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='uu_avl_pool_destroy' mangled-name='uu_avl_pool_destroy' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zfs_unshare_proto' mangled-name='zfs_unshare_proto' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zfs_commit_proto' mangled-name='zfs_commit_proto' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='strlen' mangled-name='strlen' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='strncmp' mangled-name='strncmp' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='remove_mountpoint' mangled-name='remove_mountpoint' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='strlcpy' mangled-name='strlcpy' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='strlcat' mangled-name='strlcat' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zfs_refresh_properties' mangled-name='zfs_refresh_properties' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zfs_share_nfs' mangled-name='zfs_share_nfs' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zfs_unshare_smb' mangled-name='zfs_unshare_smb' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zfs_unshare_nfs' mangled-name='zfs_unshare_nfs' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zfs_share_smb' mangled-name='zfs_share_smb' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zfs_mount' mangled-name='zfs_mount' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='uu_avl_last' mangled-name='uu_avl_last' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zfs_commit_smb_shares' mangled-name='zfs_commit_smb_shares' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zfs_commit_nfs_shares' mangled-name='zfs_commit_nfs_shares' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zfs_unmount' mangled-name='zfs_unmount' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='libzfs_config.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
+ <type-decl name='int' size-in-bits='32' id='type-id-2'/>
+ <class-decl name='libzfs_handle' size-in-bits='18240' is-struct='yes' visibility='default' id='type-id-3'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='libzfs_error' type-id='type-id-8' visibility='default'/>
+ <var-decl name='libzfs_error' type-id='type-id-2' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='libzfs_fd' type-id='type-id-8' visibility='default'/>
+ <var-decl name='libzfs_fd' type-id='type-id-2' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='libzfs_pool_handles' type-id='type-id-11' visibility='default'/>
+ <var-decl name='libzfs_pool_handles' type-id='type-id-4' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='libzfs_ns_avlpool' type-id='type-id-20' visibility='default'/>
+ <var-decl name='libzfs_ns_avlpool' type-id='type-id-5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='libzfs_ns_avl' type-id='type-id-21' visibility='default'/>
+ <var-decl name='libzfs_ns_avl' type-id='type-id-6' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='libzfs_ns_gen' type-id='type-id-22' visibility='default'/>
+ <var-decl name='libzfs_ns_gen' type-id='type-id-7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='libzfs_desc_active' type-id='type-id-8' visibility='default'/>
+ <var-decl name='libzfs_desc_active' type-id='type-id-2' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='352'>
- <var-decl name='libzfs_action' type-id='type-id-23' visibility='default'/>
+ <var-decl name='libzfs_action' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='8544'>
- <var-decl name='libzfs_desc' type-id='type-id-23' visibility='default'/>
+ <var-decl name='libzfs_desc' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='16736'>
- <var-decl name='libzfs_printerr' type-id='type-id-8' visibility='default'/>
+ <var-decl name='libzfs_printerr' type-id='type-id-2' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='16768'>
- <var-decl name='libzfs_mnttab_enable' type-id='type-id-16' visibility='default'/>
+ <var-decl name='libzfs_mnttab_enable' type-id='type-id-9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='16832'>
- <var-decl name='libzfs_mnttab_cache_lock' type-id='type-id-24' visibility='default'/>
+ <var-decl name='libzfs_mnttab_cache_lock' type-id='type-id-10' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='17152'>
- <var-decl name='libzfs_mnttab_cache' type-id='type-id-25' visibility='default'/>
+ <var-decl name='libzfs_mnttab_cache' type-id='type-id-11' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='17472'>
- <var-decl name='libzfs_pool_iter' type-id='type-id-8' visibility='default'/>
+ <var-decl name='libzfs_pool_iter' type-id='type-id-2' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='17504'>
- <var-decl name='libzfs_prop_debug' type-id='type-id-16' visibility='default'/>
+ <var-decl name='libzfs_prop_debug' type-id='type-id-9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='17536'>
- <var-decl name='libzfs_urire' type-id='type-id-26' visibility='default'/>
+ <var-decl name='libzfs_urire' type-id='type-id-12' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='18048'>
- <var-decl name='libzfs_max_nvlist' type-id='type-id-22' visibility='default'/>
+ <var-decl name='libzfs_max_nvlist' type-id='type-id-7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='18112'>
- <var-decl name='libfetch' type-id='type-id-7' visibility='default'/>
+ <var-decl name='libfetch' type-id='type-id-13' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='18176'>
- <var-decl name='libfetch_load_error' type-id='type-id-17' visibility='default'/>
+ <var-decl name='libfetch_load_error' type-id='type-id-14' visibility='default'/>
</data-member>
</class-decl>
- <class-decl name='zpool_handle' size-in-bits='2560' is-struct='yes' visibility='default' id='type-id-27'>
+ <class-decl name='zpool_handle' size-in-bits='2560' is-struct='yes' visibility='default' id='type-id-15'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='zpool_hdl' type-id='type-id-10' visibility='default'/>
+ <var-decl name='zpool_hdl' type-id='type-id-16' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='zpool_next' type-id='type-id-11' visibility='default'/>
+ <var-decl name='zpool_next' type-id='type-id-4' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='zpool_name' type-id='type-id-12' visibility='default'/>
+ <var-decl name='zpool_name' type-id='type-id-17' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2176'>
- <var-decl name='zpool_state' type-id='type-id-8' visibility='default'/>
+ <var-decl name='zpool_state' type-id='type-id-2' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2240'>
- <var-decl name='zpool_config_size' type-id='type-id-28' visibility='default'/>
+ <var-decl name='zpool_config_size' type-id='type-id-18' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2304'>
- <var-decl name='zpool_config' type-id='type-id-15' visibility='default'/>
+ <var-decl name='zpool_config' type-id='type-id-19' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2368'>
- <var-decl name='zpool_old_config' type-id='type-id-15' visibility='default'/>
+ <var-decl name='zpool_old_config' type-id='type-id-19' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2432'>
- <var-decl name='zpool_props' type-id='type-id-15' visibility='default'/>
+ <var-decl name='zpool_props' type-id='type-id-19' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2496'>
- <var-decl name='zpool_start_block' type-id='type-id-29' visibility='default'/>
+ <var-decl name='zpool_start_block' type-id='type-id-20' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='libzfs_handle_t' type-id='type-id-19' id='type-id-30'/>
- <pointer-type-def type-id='type-id-30' size-in-bits='64' id='type-id-10'/>
- <typedef-decl name='zpool_handle_t' type-id='type-id-27' id='type-id-31'/>
- <pointer-type-def type-id='type-id-31' size-in-bits='64' id='type-id-11'/>
- <type-decl name='char' size-in-bits='8' id='type-id-32'/>
- <type-decl name='__ARRAY_SIZE_TYPE__' size-in-bits='64' id='type-id-33'/>
+ <typedef-decl name='libzfs_handle_t' type-id='type-id-3' id='type-id-21'/>
+ <pointer-type-def type-id='type-id-21' size-in-bits='64' id='type-id-16'/>
+ <typedef-decl name='zpool_handle_t' type-id='type-id-15' id='type-id-22'/>
+ <pointer-type-def type-id='type-id-22' size-in-bits='64' id='type-id-4'/>
+ <type-decl name='char' size-in-bits='8' id='type-id-23'/>
+ <type-decl name='unsigned long int' size-in-bits='64' id='type-id-24'/>
- <array-type-def dimensions='1' type-id='type-id-32' size-in-bits='2048' id='type-id-12'>
- <subrange length='256' type-id='type-id-33' id='type-id-34'/>
+ <array-type-def dimensions='1' type-id='type-id-23' size-in-bits='2048' id='type-id-17'>
+ <subrange length='256' type-id='type-id-24' id='type-id-25'/>
</array-type-def>
- <type-decl name='unsigned long int' size-in-bits='64' id='type-id-35'/>
- <typedef-decl name='size_t' type-id='type-id-35' id='type-id-28'/>
- <class-decl name='nvlist' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-36'>
+ <typedef-decl name='size_t' type-id='type-id-24' id='type-id-18'/>
+ <class-decl name='nvlist' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-26'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='nvl_version' type-id='type-id-37' visibility='default'/>
+ <var-decl name='nvl_version' type-id='type-id-27' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='nvl_nvflag' type-id='type-id-38' visibility='default'/>
+ <var-decl name='nvl_nvflag' type-id='type-id-28' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='nvl_priv' type-id='type-id-22' visibility='default'/>
+ <var-decl name='nvl_priv' type-id='type-id-7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='nvl_flag' type-id='type-id-38' visibility='default'/>
+ <var-decl name='nvl_flag' type-id='type-id-28' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='160'>
- <var-decl name='nvl_pad' type-id='type-id-37' visibility='default'/>
+ <var-decl name='nvl_pad' type-id='type-id-27' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='__int32_t' type-id='type-id-8' id='type-id-39'/>
- <typedef-decl name='int32_t' type-id='type-id-39' id='type-id-37'/>
- <typedef-decl name='__uint32_t' type-id='type-id-5' id='type-id-40'/>
- <typedef-decl name='uint32_t' type-id='type-id-40' id='type-id-38'/>
- <typedef-decl name='__uint64_t' type-id='type-id-35' id='type-id-41'/>
- <typedef-decl name='uint64_t' type-id='type-id-41' id='type-id-22'/>
- <typedef-decl name='nvlist_t' type-id='type-id-36' id='type-id-42'/>
- <pointer-type-def type-id='type-id-42' size-in-bits='64' id='type-id-15'/>
- <type-decl name='long long int' size-in-bits='64' id='type-id-43'/>
- <typedef-decl name='longlong_t' type-id='type-id-43' id='type-id-44'/>
- <typedef-decl name='diskaddr_t' type-id='type-id-44' id='type-id-29'/>
- <class-decl name='uu_avl_pool' is-struct='yes' visibility='default' is-declaration-only='yes' id='type-id-45'/>
- <typedef-decl name='uu_avl_pool_t' type-id='type-id-45' id='type-id-46'/>
- <pointer-type-def type-id='type-id-46' size-in-bits='64' id='type-id-20'/>
- <typedef-decl name='uu_avl_t' type-id='type-id-3' id='type-id-47'/>
- <pointer-type-def type-id='type-id-47' size-in-bits='64' id='type-id-21'/>
+ <typedef-decl name='__int32_t' type-id='type-id-2' id='type-id-29'/>
+ <typedef-decl name='int32_t' type-id='type-id-29' id='type-id-27'/>
+ <type-decl name='unsigned int' size-in-bits='32' id='type-id-30'/>
+ <typedef-decl name='__uint32_t' type-id='type-id-30' id='type-id-31'/>
+ <typedef-decl name='uint32_t' type-id='type-id-31' id='type-id-28'/>
+ <typedef-decl name='__uint64_t' type-id='type-id-24' id='type-id-32'/>
+ <typedef-decl name='uint64_t' type-id='type-id-32' id='type-id-7'/>
+ <typedef-decl name='nvlist_t' type-id='type-id-26' id='type-id-33'/>
+ <pointer-type-def type-id='type-id-33' size-in-bits='64' id='type-id-19'/>
+ <type-decl name='long long int' size-in-bits='64' id='type-id-34'/>
+ <typedef-decl name='longlong_t' type-id='type-id-34' id='type-id-35'/>
+ <typedef-decl name='diskaddr_t' type-id='type-id-35' id='type-id-20'/>
+ <class-decl name='uu_avl_pool' is-struct='yes' visibility='default' is-declaration-only='yes' id='type-id-36'/>
+ <typedef-decl name='uu_avl_pool_t' type-id='type-id-36' id='type-id-37'/>
+ <pointer-type-def type-id='type-id-37' size-in-bits='64' id='type-id-5'/>
+ <class-decl name='uu_avl' is-struct='yes' visibility='default' is-declaration-only='yes' id='type-id-38'/>
+ <typedef-decl name='uu_avl_t' type-id='type-id-38' id='type-id-39'/>
+ <pointer-type-def type-id='type-id-39' size-in-bits='64' id='type-id-6'/>
- <array-type-def dimensions='1' type-id='type-id-32' size-in-bits='8192' id='type-id-23'>
- <subrange length='1024' type-id='type-id-33' id='type-id-48'/>
+ <array-type-def dimensions='1' type-id='type-id-23' size-in-bits='8192' id='type-id-8'>
+ <subrange length='1024' type-id='type-id-24' id='type-id-40'/>
</array-type-def>
- <type-decl name='unnamed-enum-underlying-type' is-anonymous='yes' size-in-bits='32' alignment-in-bits='32' id='type-id-49'/>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-50'>
- <underlying-type type-id='type-id-49'/>
+ <type-decl name='unnamed-enum-underlying-type' is-anonymous='yes' size-in-bits='32' alignment-in-bits='32' id='type-id-41'/>
+ <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-42'>
+ <underlying-type type-id='type-id-41'/>
<enumerator name='B_FALSE' value='0'/>
<enumerator name='B_TRUE' value='1'/>
</enum-decl>
- <typedef-decl name='boolean_t' type-id='type-id-50' id='type-id-16'/>
- <union-decl name='__anonymous_union__' size-in-bits='320' is-anonymous='yes' visibility='default' id='type-id-51'>
+ <typedef-decl name='boolean_t' type-id='type-id-42' id='type-id-9'/>
+ <union-decl name='__anonymous_union__' size-in-bits='320' is-anonymous='yes' visibility='default' id='type-id-43'>
<data-member access='private'>
- <var-decl name='__data' type-id='type-id-52' visibility='default'/>
+ <var-decl name='__data' type-id='type-id-44' visibility='default'/>
</data-member>
<data-member access='private'>
- <var-decl name='__size' type-id='type-id-53' visibility='default'/>
+ <var-decl name='__size' type-id='type-id-45' visibility='default'/>
</data-member>
<data-member access='private'>
- <var-decl name='__align' type-id='type-id-54' visibility='default'/>
+ <var-decl name='__align' type-id='type-id-46' visibility='default'/>
</data-member>
</union-decl>
- <class-decl name='__pthread_mutex_s' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-52'>
+ <class-decl name='__pthread_mutex_s' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-44'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='__lock' type-id='type-id-8' visibility='default'/>
+ <var-decl name='__lock' type-id='type-id-2' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='__count' type-id='type-id-5' visibility='default'/>
+ <var-decl name='__count' type-id='type-id-30' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='__owner' type-id='type-id-8' visibility='default'/>
+ <var-decl name='__owner' type-id='type-id-2' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='96'>
- <var-decl name='__nusers' type-id='type-id-5' visibility='default'/>
+ <var-decl name='__nusers' type-id='type-id-30' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='__kind' type-id='type-id-8' visibility='default'/>
+ <var-decl name='__kind' type-id='type-id-2' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='160'>
- <var-decl name='__spins' type-id='type-id-55' visibility='default'/>
+ <var-decl name='__spins' type-id='type-id-47' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='176'>
- <var-decl name='__elision' type-id='type-id-55' visibility='default'/>
+ <var-decl name='__elision' type-id='type-id-47' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='__list' type-id='type-id-56' visibility='default'/>
+ <var-decl name='__list' type-id='type-id-48' visibility='default'/>
</data-member>
</class-decl>
- <type-decl name='short int' size-in-bits='16' id='type-id-55'/>
- <class-decl name='__pthread_internal_list' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-57'>
+ <type-decl name='short int' size-in-bits='16' id='type-id-47'/>
+ <class-decl name='__pthread_internal_list' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-49'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='__prev' type-id='type-id-58' visibility='default'/>
+ <var-decl name='__prev' type-id='type-id-50' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='__next' type-id='type-id-58' visibility='default'/>
+ <var-decl name='__next' type-id='type-id-50' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-57' size-in-bits='64' id='type-id-58'/>
- <typedef-decl name='__pthread_list_t' type-id='type-id-57' id='type-id-56'/>
+ <pointer-type-def type-id='type-id-49' size-in-bits='64' id='type-id-50'/>
+ <typedef-decl name='__pthread_list_t' type-id='type-id-49' id='type-id-48'/>
- <array-type-def dimensions='1' type-id='type-id-32' size-in-bits='320' id='type-id-53'>
- <subrange length='40' type-id='type-id-33' id='type-id-59'/>
+ <array-type-def dimensions='1' type-id='type-id-23' size-in-bits='320' id='type-id-45'>
+ <subrange length='40' type-id='type-id-24' id='type-id-51'/>
</array-type-def>
- <type-decl name='long int' size-in-bits='64' id='type-id-54'/>
- <typedef-decl name='pthread_mutex_t' type-id='type-id-51' id='type-id-24'/>
- <class-decl name='avl_tree' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-60'>
+ <type-decl name='long int' size-in-bits='64' id='type-id-46'/>
+ <typedef-decl name='pthread_mutex_t' type-id='type-id-43' id='type-id-10'/>
+ <class-decl name='avl_tree' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-52'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='avl_root' type-id='type-id-61' visibility='default'/>
+ <var-decl name='avl_root' type-id='type-id-53' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='avl_compar' type-id='type-id-62' visibility='default'/>
+ <var-decl name='avl_compar' type-id='type-id-54' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='avl_offset' type-id='type-id-28' visibility='default'/>
+ <var-decl name='avl_offset' type-id='type-id-18' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='avl_numnodes' type-id='type-id-63' visibility='default'/>
+ <var-decl name='avl_numnodes' type-id='type-id-55' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='avl_size' type-id='type-id-28' visibility='default'/>
+ <var-decl name='avl_pad' type-id='type-id-18' visibility='default'/>
</data-member>
</class-decl>
- <class-decl name='avl_node' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-64'>
+ <class-decl name='avl_node' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-56'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='avl_child' type-id='type-id-65' visibility='default'/>
+ <var-decl name='avl_child' type-id='type-id-57' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='avl_pcb' type-id='type-id-66' visibility='default'/>
+ <var-decl name='avl_pcb' type-id='type-id-58' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-64' size-in-bits='64' id='type-id-61'/>
+ <pointer-type-def type-id='type-id-56' size-in-bits='64' id='type-id-53'/>
- <array-type-def dimensions='1' type-id='type-id-61' size-in-bits='128' id='type-id-65'>
- <subrange length='2' type-id='type-id-33' id='type-id-67'/>
+ <array-type-def dimensions='1' type-id='type-id-53' size-in-bits='128' id='type-id-57'>
+ <subrange length='2' type-id='type-id-24' id='type-id-59'/>
</array-type-def>
- <typedef-decl name='uintptr_t' type-id='type-id-35' id='type-id-66'/>
- <pointer-type-def type-id='type-id-68' size-in-bits='64' id='type-id-62'/>
- <typedef-decl name='ulong_t' type-id='type-id-35' id='type-id-63'/>
- <typedef-decl name='avl_tree_t' type-id='type-id-60' id='type-id-25'/>
- <class-decl name='re_pattern_buffer' size-in-bits='512' is-struct='yes' visibility='default' id='type-id-69'>
+ <typedef-decl name='uintptr_t' type-id='type-id-24' id='type-id-58'/>
+ <pointer-type-def type-id='type-id-1' size-in-bits='64' id='type-id-13'/>
+ <pointer-type-def type-id='type-id-60' size-in-bits='64' id='type-id-54'/>
+ <typedef-decl name='ulong_t' type-id='type-id-24' id='type-id-55'/>
+ <typedef-decl name='avl_tree_t' type-id='type-id-52' id='type-id-11'/>
+ <class-decl name='re_pattern_buffer' size-in-bits='512' is-struct='yes' visibility='default' id='type-id-61'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='buffer' type-id='type-id-70' visibility='default'/>
+ <var-decl name='buffer' type-id='type-id-62' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='allocated' type-id='type-id-71' visibility='default'/>
+ <var-decl name='allocated' type-id='type-id-24' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='used' type-id='type-id-71' visibility='default'/>
+ <var-decl name='used' type-id='type-id-24' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='syntax' type-id='type-id-72' visibility='default'/>
+ <var-decl name='syntax' type-id='type-id-63' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='fastmap' type-id='type-id-17' visibility='default'/>
+ <var-decl name='fastmap' type-id='type-id-14' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='translate' type-id='type-id-73' visibility='default'/>
+ <var-decl name='translate' type-id='type-id-62' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='re_nsub' type-id='type-id-28' visibility='default'/>
+ <var-decl name='re_nsub' type-id='type-id-18' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='31'>
- <var-decl name='can_be_null' type-id='type-id-5' visibility='default'/>
+ <var-decl name='can_be_null' type-id='type-id-30' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='29'>
- <var-decl name='regs_allocated' type-id='type-id-5' visibility='default'/>
+ <var-decl name='regs_allocated' type-id='type-id-30' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='28'>
- <var-decl name='fastmap_accurate' type-id='type-id-5' visibility='default'/>
+ <var-decl name='fastmap_accurate' type-id='type-id-30' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='27'>
- <var-decl name='no_sub' type-id='type-id-5' visibility='default'/>
+ <var-decl name='no_sub' type-id='type-id-30' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='26'>
- <var-decl name='not_bol' type-id='type-id-5' visibility='default'/>
+ <var-decl name='not_bol' type-id='type-id-30' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='25'>
- <var-decl name='not_eol' type-id='type-id-5' visibility='default'/>
+ <var-decl name='not_eol' type-id='type-id-30' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='24'>
- <var-decl name='newline_anchor' type-id='type-id-5' visibility='default'/>
+ <var-decl name='newline_anchor' type-id='type-id-30' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <type-decl name='unsigned char' size-in-bits='8' id='type-id-64'/>
+ <pointer-type-def type-id='type-id-64' size-in-bits='64' id='type-id-62'/>
+ <typedef-decl name='reg_syntax_t' type-id='type-id-24' id='type-id-63'/>
+ <pointer-type-def type-id='type-id-23' size-in-bits='64' id='type-id-14'/>
+ <typedef-decl name='regex_t' type-id='type-id-61' id='type-id-12'/>
+ <class-decl name='zfs_handle' size-in-bits='4928' is-struct='yes' visibility='default' id='type-id-65'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='zfs_hdl' type-id='type-id-16' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='zpool_hdl' type-id='type-id-4' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='zfs_name' type-id='type-id-17' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='2176'>
+ <var-decl name='zfs_type' type-id='type-id-66' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='2208'>
+ <var-decl name='zfs_head_type' type-id='type-id-66' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='2240'>
+ <var-decl name='zfs_dmustats' type-id='type-id-67' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='4544'>
+ <var-decl name='zfs_props' type-id='type-id-19' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='4608'>
+ <var-decl name='zfs_user_props' type-id='type-id-19' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='4672'>
+ <var-decl name='zfs_recvd_props' type-id='type-id-19' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='4736'>
+ <var-decl name='zfs_mntcheck' type-id='type-id-9' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='4800'>
+ <var-decl name='zfs_mntopts' type-id='type-id-14' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='4864'>
+ <var-decl name='zfs_props_table' type-id='type-id-68' visibility='default'/>
</data-member>
</class-decl>
- <class-decl name='re_dfa_t' is-struct='yes' visibility='default' is-declaration-only='yes' id='type-id-74'/>
- <pointer-type-def type-id='type-id-74' size-in-bits='64' id='type-id-70'/>
- <typedef-decl name='__re_long_size_t' type-id='type-id-35' id='type-id-71'/>
- <typedef-decl name='reg_syntax_t' type-id='type-id-35' id='type-id-72'/>
- <pointer-type-def type-id='type-id-32' size-in-bits='64' id='type-id-17'/>
- <type-decl name='unsigned char' size-in-bits='8' id='type-id-75'/>
- <pointer-type-def type-id='type-id-75' size-in-bits='64' id='type-id-73'/>
- <typedef-decl name='regex_t' type-id='type-id-69' id='type-id-26'/>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-76'>
- <underlying-type type-id='type-id-49'/>
+ <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-69'>
+ <underlying-type type-id='type-id-41'/>
<enumerator name='ZFS_TYPE_FILESYSTEM' value='1'/>
<enumerator name='ZFS_TYPE_SNAPSHOT' value='2'/>
<enumerator name='ZFS_TYPE_VOLUME' value='4'/>
<enumerator name='ZFS_TYPE_POOL' value='8'/>
<enumerator name='ZFS_TYPE_BOOKMARK' value='16'/>
</enum-decl>
- <typedef-decl name='zfs_type_t' type-id='type-id-76' id='type-id-13'/>
- <class-decl name='dmu_objset_stats' size-in-bits='2304' is-struct='yes' visibility='default' id='type-id-77'>
+ <typedef-decl name='zfs_type_t' type-id='type-id-69' id='type-id-66'/>
+ <class-decl name='dmu_objset_stats' size-in-bits='2304' is-struct='yes' visibility='default' id='type-id-70'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='dds_num_clones' type-id='type-id-22' visibility='default'/>
+ <var-decl name='dds_num_clones' type-id='type-id-7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='dds_creation_txg' type-id='type-id-22' visibility='default'/>
+ <var-decl name='dds_creation_txg' type-id='type-id-7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='dds_guid' type-id='type-id-22' visibility='default'/>
+ <var-decl name='dds_guid' type-id='type-id-7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='dds_type' type-id='type-id-78' visibility='default'/>
+ <var-decl name='dds_type' type-id='type-id-71' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='224'>
- <var-decl name='dds_is_snapshot' type-id='type-id-79' visibility='default'/>
+ <var-decl name='dds_is_snapshot' type-id='type-id-72' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='232'>
- <var-decl name='dds_inconsistent' type-id='type-id-79' visibility='default'/>
+ <var-decl name='dds_inconsistent' type-id='type-id-72' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='240'>
- <var-decl name='dds_redacted' type-id='type-id-79' visibility='default'/>
+ <var-decl name='dds_redacted' type-id='type-id-72' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='248'>
- <var-decl name='dds_origin' type-id='type-id-12' visibility='default'/>
+ <var-decl name='dds_origin' type-id='type-id-17' visibility='default'/>
</data-member>
</class-decl>
- <enum-decl name='dmu_objset_type' id='type-id-80'>
- <underlying-type type-id='type-id-49'/>
+ <enum-decl name='dmu_objset_type' id='type-id-73'>
+ <underlying-type type-id='type-id-41'/>
<enumerator name='DMU_OST_NONE' value='0'/>
<enumerator name='DMU_OST_META' value='1'/>
<enumerator name='DMU_OST_ZFS' value='2'/>
<enumerator name='DMU_OST_ZVOL' value='3'/>
<enumerator name='DMU_OST_OTHER' value='4'/>
<enumerator name='DMU_OST_ANY' value='5'/>
<enumerator name='DMU_OST_NUMTYPES' value='6'/>
</enum-decl>
- <typedef-decl name='dmu_objset_type_t' type-id='type-id-80' id='type-id-78'/>
- <typedef-decl name='__uint8_t' type-id='type-id-75' id='type-id-81'/>
- <typedef-decl name='uint8_t' type-id='type-id-81' id='type-id-79'/>
- <typedef-decl name='dmu_objset_stats_t' type-id='type-id-77' id='type-id-14'/>
- <pointer-type-def type-id='type-id-79' size-in-bits='64' id='type-id-18'/>
- <pointer-type-def type-id='type-id-9' size-in-bits='64' id='type-id-82'/>
- <qualified-type-def type-id='type-id-32' const='yes' id='type-id-83'/>
- <pointer-type-def type-id='type-id-83' size-in-bits='64' id='type-id-84'/>
- <function-decl name='zfs_unmount' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-82'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-8'/>
- <return type-id='type-id-8'/>
+ <typedef-decl name='dmu_objset_type_t' type-id='type-id-73' id='type-id-71'/>
+ <typedef-decl name='__uint8_t' type-id='type-id-64' id='type-id-74'/>
+ <typedef-decl name='uint8_t' type-id='type-id-74' id='type-id-72'/>
+ <typedef-decl name='dmu_objset_stats_t' type-id='type-id-70' id='type-id-67'/>
+ <pointer-type-def type-id='type-id-72' size-in-bits='64' id='type-id-68'/>
+ <typedef-decl name='zfs_handle_t' type-id='type-id-65' id='type-id-75'/>
+ <pointer-type-def type-id='type-id-75' size-in-bits='64' id='type-id-76'/>
+ <pointer-type-def type-id='type-id-77' size-in-bits='64' id='type-id-78'/>
+ <typedef-decl name='zfs_iter_f' type-id='type-id-78' id='type-id-79'/>
+ <function-decl name='zfs_iter_root' mangled-name='zfs_iter_root' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_root'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <parameter type-id='type-id-79' name='func'/>
+ <parameter type-id='type-id-13' name='data'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zfs_unshare_smb' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-82'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-8'/>
+ <pointer-type-def type-id='type-id-80' size-in-bits='64' id='type-id-81'/>
+ <typedef-decl name='zpool_iter_f' type-id='type-id-81' id='type-id-82'/>
+ <function-decl name='zpool_iter' mangled-name='zpool_iter' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_iter'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <parameter type-id='type-id-82' name='func'/>
+ <parameter type-id='type-id-13' name='data'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zfs_commit_smb_shares' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-6'/>
+ <qualified-type-def type-id='type-id-23' const='yes' id='type-id-83'/>
+ <pointer-type-def type-id='type-id-83' size-in-bits='64' id='type-id-84'/>
+ <function-decl name='zpool_skip_pool' mangled-name='zpool_skip_pool' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_skip_pool'>
+ <parameter type-id='type-id-84' name='poolname'/>
+ <return type-id='type-id-9'/>
</function-decl>
- <function-decl name='uu_avl_walk_end' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-2'/>
- <return type-id='type-id-6'/>
+ <pointer-type-def type-id='type-id-9' size-in-bits='64' id='type-id-85'/>
+ <function-decl name='zpool_refresh_stats' mangled-name='zpool_refresh_stats' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_refresh_stats'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-85' name='missing'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='uu_avl_last' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-4'/>
- <return type-id='type-id-7'/>
+ <function-decl name='zpool_get_features' mangled-name='zpool_get_features' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_features'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <return type-id='type-id-19'/>
</function-decl>
- <function-decl name='remove_mountpoint' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-82'/>
- <return type-id='type-id-6'/>
+ <pointer-type-def type-id='type-id-19' size-in-bits='64' id='type-id-86'/>
+ <function-decl name='zpool_get_config' mangled-name='zpool_get_config' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_config'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-86' name='oldconfig'/>
+ <return type-id='type-id-19'/>
</function-decl>
- <function-decl name='zfs_share_smb' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-82'/>
- <return type-id='type-id-8'/>
+ <function-decl name='uu_avl_first' mangled-name='uu_avl_first' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_refresh_properties' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-82'/>
- <return type-id='type-id-6'/>
+ <function-decl name='make_dataset_handle' mangled-name='make_dataset_handle' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-85'>
- <underlying-type type-id='type-id-49'/>
- <enumerator name='ZPROP_CONT' value='-2'/>
- <enumerator name='ZPROP_INVAL' value='-1'/>
- <enumerator name='ZFS_PROP_TYPE' value='0'/>
- <enumerator name='ZFS_PROP_CREATION' value='1'/>
- <enumerator name='ZFS_PROP_USED' value='2'/>
- <enumerator name='ZFS_PROP_AVAILABLE' value='3'/>
- <enumerator name='ZFS_PROP_REFERENCED' value='4'/>
- <enumerator name='ZFS_PROP_COMPRESSRATIO' value='5'/>
- <enumerator name='ZFS_PROP_MOUNTED' value='6'/>
- <enumerator name='ZFS_PROP_ORIGIN' value='7'/>
- <enumerator name='ZFS_PROP_QUOTA' value='8'/>
- <enumerator name='ZFS_PROP_RESERVATION' value='9'/>
- <enumerator name='ZFS_PROP_VOLSIZE' value='10'/>
- <enumerator name='ZFS_PROP_VOLBLOCKSIZE' value='11'/>
- <enumerator name='ZFS_PROP_RECORDSIZE' value='12'/>
- <enumerator name='ZFS_PROP_MOUNTPOINT' value='13'/>
- <enumerator name='ZFS_PROP_SHARENFS' value='14'/>
- <enumerator name='ZFS_PROP_CHECKSUM' value='15'/>
- <enumerator name='ZFS_PROP_COMPRESSION' value='16'/>
- <enumerator name='ZFS_PROP_ATIME' value='17'/>
- <enumerator name='ZFS_PROP_DEVICES' value='18'/>
- <enumerator name='ZFS_PROP_EXEC' value='19'/>
- <enumerator name='ZFS_PROP_SETUID' value='20'/>
- <enumerator name='ZFS_PROP_READONLY' value='21'/>
- <enumerator name='ZFS_PROP_ZONED' value='22'/>
- <enumerator name='ZFS_PROP_SNAPDIR' value='23'/>
- <enumerator name='ZFS_PROP_ACLMODE' value='24'/>
- <enumerator name='ZFS_PROP_ACLINHERIT' value='25'/>
- <enumerator name='ZFS_PROP_CREATETXG' value='26'/>
- <enumerator name='ZFS_PROP_NAME' value='27'/>
- <enumerator name='ZFS_PROP_CANMOUNT' value='28'/>
- <enumerator name='ZFS_PROP_ISCSIOPTIONS' value='29'/>
- <enumerator name='ZFS_PROP_XATTR' value='30'/>
- <enumerator name='ZFS_PROP_NUMCLONES' value='31'/>
- <enumerator name='ZFS_PROP_COPIES' value='32'/>
- <enumerator name='ZFS_PROP_VERSION' value='33'/>
- <enumerator name='ZFS_PROP_UTF8ONLY' value='34'/>
- <enumerator name='ZFS_PROP_NORMALIZE' value='35'/>
- <enumerator name='ZFS_PROP_CASE' value='36'/>
- <enumerator name='ZFS_PROP_VSCAN' value='37'/>
- <enumerator name='ZFS_PROP_NBMAND' value='38'/>
- <enumerator name='ZFS_PROP_SHARESMB' value='39'/>
- <enumerator name='ZFS_PROP_REFQUOTA' value='40'/>
- <enumerator name='ZFS_PROP_REFRESERVATION' value='41'/>
- <enumerator name='ZFS_PROP_GUID' value='42'/>
- <enumerator name='ZFS_PROP_PRIMARYCACHE' value='43'/>
- <enumerator name='ZFS_PROP_SECONDARYCACHE' value='44'/>
- <enumerator name='ZFS_PROP_USEDSNAP' value='45'/>
- <enumerator name='ZFS_PROP_USEDDS' value='46'/>
- <enumerator name='ZFS_PROP_USEDCHILD' value='47'/>
- <enumerator name='ZFS_PROP_USEDREFRESERV' value='48'/>
- <enumerator name='ZFS_PROP_USERACCOUNTING' value='49'/>
- <enumerator name='ZFS_PROP_STMF_SHAREINFO' value='50'/>
- <enumerator name='ZFS_PROP_DEFER_DESTROY' value='51'/>
- <enumerator name='ZFS_PROP_USERREFS' value='52'/>
- <enumerator name='ZFS_PROP_LOGBIAS' value='53'/>
- <enumerator name='ZFS_PROP_UNIQUE' value='54'/>
- <enumerator name='ZFS_PROP_OBJSETID' value='55'/>
- <enumerator name='ZFS_PROP_DEDUP' value='56'/>
- <enumerator name='ZFS_PROP_MLSLABEL' value='57'/>
- <enumerator name='ZFS_PROP_SYNC' value='58'/>
- <enumerator name='ZFS_PROP_DNODESIZE' value='59'/>
- <enumerator name='ZFS_PROP_REFRATIO' value='60'/>
- <enumerator name='ZFS_PROP_WRITTEN' value='61'/>
- <enumerator name='ZFS_PROP_CLONES' value='62'/>
- <enumerator name='ZFS_PROP_LOGICALUSED' value='63'/>
- <enumerator name='ZFS_PROP_LOGICALREFERENCED' value='64'/>
- <enumerator name='ZFS_PROP_INCONSISTENT' value='65'/>
- <enumerator name='ZFS_PROP_VOLMODE' value='66'/>
- <enumerator name='ZFS_PROP_FILESYSTEM_LIMIT' value='67'/>
- <enumerator name='ZFS_PROP_SNAPSHOT_LIMIT' value='68'/>
- <enumerator name='ZFS_PROP_FILESYSTEM_COUNT' value='69'/>
- <enumerator name='ZFS_PROP_SNAPSHOT_COUNT' value='70'/>
- <enumerator name='ZFS_PROP_SNAPDEV' value='71'/>
- <enumerator name='ZFS_PROP_ACLTYPE' value='72'/>
- <enumerator name='ZFS_PROP_SELINUX_CONTEXT' value='73'/>
- <enumerator name='ZFS_PROP_SELINUX_FSCONTEXT' value='74'/>
- <enumerator name='ZFS_PROP_SELINUX_DEFCONTEXT' value='75'/>
- <enumerator name='ZFS_PROP_SELINUX_ROOTCONTEXT' value='76'/>
- <enumerator name='ZFS_PROP_RELATIME' value='77'/>
- <enumerator name='ZFS_PROP_REDUNDANT_METADATA' value='78'/>
- <enumerator name='ZFS_PROP_OVERLAY' value='79'/>
- <enumerator name='ZFS_PROP_PREV_SNAP' value='80'/>
- <enumerator name='ZFS_PROP_RECEIVE_RESUME_TOKEN' value='81'/>
- <enumerator name='ZFS_PROP_ENCRYPTION' value='82'/>
- <enumerator name='ZFS_PROP_KEYLOCATION' value='83'/>
- <enumerator name='ZFS_PROP_KEYFORMAT' value='84'/>
- <enumerator name='ZFS_PROP_PBKDF2_SALT' value='85'/>
- <enumerator name='ZFS_PROP_PBKDF2_ITERS' value='86'/>
- <enumerator name='ZFS_PROP_ENCRYPTION_ROOT' value='87'/>
- <enumerator name='ZFS_PROP_KEY_GUID' value='88'/>
- <enumerator name='ZFS_PROP_KEYSTATUS' value='89'/>
- <enumerator name='ZFS_PROP_REMAPTXG' value='90'/>
- <enumerator name='ZFS_PROP_SPECIAL_SMALL_BLOCKS' value='91'/>
- <enumerator name='ZFS_PROP_IVSET_GUID' value='92'/>
- <enumerator name='ZFS_PROP_REDACTED' value='93'/>
- <enumerator name='ZFS_PROP_REDACT_SNAPS' value='94'/>
- <enumerator name='ZFS_NUM_PROPS' value='95'/>
- </enum-decl>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-86'>
- <underlying-type type-id='type-id-49'/>
- <enumerator name='ZPROP_SRC_NONE' value='1'/>
- <enumerator name='ZPROP_SRC_DEFAULT' value='2'/>
- <enumerator name='ZPROP_SRC_TEMPORARY' value='4'/>
- <enumerator name='ZPROP_SRC_LOCAL' value='8'/>
- <enumerator name='ZPROP_SRC_INHERITED' value='16'/>
- <enumerator name='ZPROP_SRC_RECEIVED' value='32'/>
- </enum-decl>
- <pointer-type-def type-id='type-id-86' size-in-bits='64' id='type-id-87'/>
- <function-decl name='zfs_prop_get' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-82'/>
- <parameter type-id='type-id-85'/>
- <parameter type-id='type-id-17'/>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-87'/>
- <parameter type-id='type-id-17'/>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-50'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='zfs_prop_get_int' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-82'/>
- <parameter type-id='type-id-85'/>
- <return type-id='type-id-35'/>
+ <function-decl name='uu_avl_next' mangled-name='uu_avl_next' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <pointer-type-def type-id='type-id-17' size-in-bits='64' id='type-id-88'/>
- <function-decl name='zfs_is_mounted' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-82'/>
- <parameter type-id='type-id-88'/>
- <return type-id='type-id-50'/>
+ <function-decl name='zpool_open_silent' mangled-name='zpool_open_silent' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_mount' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-82'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-8'/>
- <return type-id='type-id-8'/>
+ <function-decl name='strchr' mangled-name='strchr' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_unshare_nfs' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-82'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-8'/>
+ <function-decl name='getenv' mangled-name='getenv' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_share_nfs' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-82'/>
- <return type-id='type-id-8'/>
+ <function-decl name='__builtin___strcpy_chk' mangled-name='__strcpy_chk' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_commit_nfs_shares' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-6'/>
+ <function-decl name='__builtin_memset' mangled-name='memset' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='strlcpy' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-17'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-35'/>
- <return type-id='type-id-35'/>
+ <function-decl name='zcmd_alloc_dst_nvlist' mangled-name='zcmd_alloc_dst_nvlist' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='strlcat' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-17'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-35'/>
- <return type-id='type-id-35'/>
- </function-decl>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-89'>
- <underlying-type type-id='type-id-49'/>
- <enumerator name='PROTO_NFS' value='0'/>
- <enumerator name='PROTO_SMB' value='1'/>
- <enumerator name='PROTO_END' value='2'/>
- </enum-decl>
- <function-decl name='zfs_unshare_proto' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-82'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-87'/>
- <return type-id='type-id-8'/>
+ <function-decl name='__errno_location' mangled-name='__errno_location' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_commit_proto' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-87'/>
- <return type-id='type-id-6'/>
+ <function-decl name='zcmd_expand_dst_nvlist' mangled-name='zcmd_expand_dst_nvlist' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='uu_avl_remove' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-4'/>
- <parameter type-id='type-id-7'/>
- <return type-id='type-id-6'/>
+ <function-decl name='zfs_ioctl' mangled-name='zfs_ioctl' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_close' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-82'/>
- <return type-id='type-id-6'/>
+ <function-decl name='zcmd_read_dst_nvlist' mangled-name='zcmd_read_dst_nvlist' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='uu_avl_destroy' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-4'/>
- <return type-id='type-id-6'/>
+ <function-decl name='zcmd_free_nvlists' mangled-name='zcmd_free_nvlists' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <pointer-type-def type-id='type-id-45' size-in-bits='64' id='type-id-90'/>
- <function-decl name='uu_avl_pool_destroy' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-90'/>
- <return type-id='type-id-6'/>
+ <function-decl name='nvlist_free' mangled-name='nvlist_free' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <pointer-type-def type-id='type-id-19' size-in-bits='64' id='type-id-91'/>
- <function-decl name='zfs_alloc' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <parameter type-id='type-id-35'/>
- <return type-id='type-id-7'/>
+ <function-decl name='nvlist_exists' mangled-name='nvlist_exists' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <pointer-type-def type-id='type-id-92' size-in-bits='64' id='type-id-93'/>
- <function-decl name='uu_avl_pool_create' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-93'/>
- <parameter type-id='type-id-5'/>
- <return type-id='type-id-90'/>
- </function-decl>
- <function-decl name='uu_avl_create' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-90'/>
- <parameter type-id='type-id-7'/>
- <parameter type-id='type-id-5'/>
- <return type-id='type-id-4'/>
+ <function-decl name='nvlist_lookup_nvlist' mangled-name='nvlist_lookup_nvlist' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_error' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-8'/>
+ <function-decl name='libspl_assertf' mangled-name='libspl_assertf' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <pointer-type-def type-id='type-id-94' size-in-bits='64' id='type-id-95'/>
- <function-decl name='zfs_iter_dependents' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-82'/>
- <parameter type-id='type-id-50'/>
- <parameter type-id='type-id-95'/>
- <parameter type-id='type-id-7'/>
- <return type-id='type-id-8'/>
+ <function-decl name='no_memory' mangled-name='no_memory' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_iter_mounted' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-82'/>
- <parameter type-id='type-id-95'/>
- <parameter type-id='type-id-7'/>
- <return type-id='type-id-8'/>
+ <function-decl name='nvlist_dup' mangled-name='nvlist_dup' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_iter_children' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-82'/>
- <parameter type-id='type-id-95'/>
- <parameter type-id='type-id-7'/>
- <return type-id='type-id-8'/>
+ <function-decl name='nvpair_name' mangled-name='nvpair_name' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <qualified-type-def type-id='type-id-9' const='yes' id='type-id-96'/>
- <pointer-type-def type-id='type-id-96' size-in-bits='64' id='type-id-97'/>
- <function-decl name='zfs_get_name' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-97'/>
- <return type-id='type-id-84'/>
+ <function-decl name='zfs_strdup' mangled-name='zfs_strdup' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_open' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-8'/>
- <return type-id='type-id-82'/>
+ <function-decl name='nvpair_value_nvlist' mangled-name='nvpair_value_nvlist' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_is_shared' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-82'/>
- <return type-id='type-id-50'/>
+ <function-decl name='dcgettext' mangled-name='dcgettext' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <class-decl name='uu_avl_node' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-98'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='uan_opaque' type-id='type-id-99' visibility='default'/>
- </data-member>
- </class-decl>
-
- <array-type-def dimensions='1' type-id='type-id-66' size-in-bits='192' id='type-id-99'>
- <subrange length='3' type-id='type-id-33' id='type-id-100'/>
-
- </array-type-def>
- <pointer-type-def type-id='type-id-98' size-in-bits='64' id='type-id-101'/>
- <function-decl name='uu_avl_node_init' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-7'/>
- <parameter type-id='type-id-101'/>
- <parameter type-id='type-id-90'/>
- <return type-id='type-id-6'/>
+ <function-decl name='zfs_standard_error' mangled-name='zfs_standard_error' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <pointer-type-def type-id='type-id-35' size-in-bits='64' id='type-id-102'/>
- <function-decl name='uu_avl_find' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-4'/>
- <parameter type-id='type-id-7'/>
- <parameter type-id='type-id-7'/>
- <parameter type-id='type-id-102'/>
- <return type-id='type-id-7'/>
+ <function-decl name='uu_avl_teardown' mangled-name='uu_avl_teardown' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='uu_avl_insert' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-4'/>
- <parameter type-id='type-id-7'/>
- <parameter type-id='type-id-35'/>
- <return type-id='type-id-6'/>
+ <function-decl name='nvlist_next_nvpair' mangled-name='nvlist_next_nvpair' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_get_handle' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-82'/>
- <return type-id='type-id-91'/>
- </function-decl>
- <function-type size-in-bits='64' id='type-id-68'>
- <parameter type-id='type-id-7'/>
- <parameter type-id='type-id-7'/>
- <return type-id='type-id-8'/>
+ <function-type size-in-bits='64' id='type-id-60'>
+ <parameter type-id='type-id-13'/>
+ <parameter type-id='type-id-13'/>
+ <return type-id='type-id-2'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-92'>
- <parameter type-id='type-id-7'/>
- <parameter type-id='type-id-7'/>
- <parameter type-id='type-id-7'/>
- <return type-id='type-id-8'/>
+ <function-type size-in-bits='64' id='type-id-77'>
+ <parameter type-id='type-id-76'/>
+ <parameter type-id='type-id-13'/>
+ <return type-id='type-id-2'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-94'>
- <parameter type-id='type-id-82'/>
- <parameter type-id='type-id-7'/>
- <return type-id='type-id-8'/>
+ <function-type size-in-bits='64' id='type-id-80'>
+ <parameter type-id='type-id-4'/>
+ <parameter type-id='type-id-13'/>
+ <return type-id='type-id-2'/>
</function-type>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='libzfs_config.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libzfs' language='LANG_C99'>
- <function-decl name='zpool_skip_pool' mangled-name='zpool_skip_pool' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_skip_pool'>
- <parameter type-id='type-id-84' name='poolname'/>
- <return type-id='type-id-16'/>
+ <abi-instr version='1.0' address-size='64' path='libzfs_crypto.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
+ <function-decl name='zfs_crypto_rewrap' mangled-name='zfs_crypto_rewrap' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_rewrap'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-19' name='raw_props'/>
+ <parameter type-id='type-id-9' name='inheritkey'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <pointer-type-def type-id='type-id-7' size-in-bits='64' id='type-id-103'/>
- <function-decl name='uu_avl_teardown' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-4'/>
- <parameter type-id='type-id-103'/>
+ <function-decl name='zfs_crypto_unload_key' mangled-name='zfs_crypto_unload_key' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_unload_key'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <return type-id='type-id-2'/>
+ </function-decl>
+ <function-decl name='zfs_crypto_load_key' mangled-name='zfs_crypto_load_key' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_load_key'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-9' name='noop'/>
+ <parameter type-id='type-id-14' name='alt_keylocation'/>
+ <return type-id='type-id-2'/>
+ </function-decl>
+ <function-decl name='zfs_crypto_attempt_load_keys' mangled-name='zfs_crypto_attempt_load_keys' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_attempt_load_keys'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <parameter type-id='type-id-14' name='fsname'/>
+ <return type-id='type-id-2'/>
+ </function-decl>
+ <function-decl name='zfs_crypto_clone_check' mangled-name='zfs_crypto_clone_check' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_clone_check'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <parameter type-id='type-id-76' name='origin_zhp'/>
+ <parameter type-id='type-id-14' name='parent_name'/>
+ <parameter type-id='type-id-19' name='props'/>
+ <return type-id='type-id-2'/>
+ </function-decl>
+ <pointer-type-def type-id='type-id-68' size-in-bits='64' id='type-id-87'/>
+ <typedef-decl name='uint_t' type-id='type-id-30' id='type-id-88'/>
+ <pointer-type-def type-id='type-id-88' size-in-bits='64' id='type-id-89'/>
+ <function-decl name='zfs_crypto_create' mangled-name='zfs_crypto_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_create'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <parameter type-id='type-id-14' name='parent_name'/>
+ <parameter type-id='type-id-19' name='props'/>
+ <parameter type-id='type-id-19' name='pool_props'/>
+ <parameter type-id='type-id-9' name='stdin_available'/>
+ <parameter type-id='type-id-87' name='wkeydata_out'/>
+ <parameter type-id='type-id-89' name='wkeylen_out'/>
+ <return type-id='type-id-2'/>
+ </function-decl>
+ <function-decl name='zfs_crypto_get_encryption_root' mangled-name='zfs_crypto_get_encryption_root' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_get_encryption_root'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-85' name='is_encroot'/>
+ <parameter type-id='type-id-14' name='buf'/>
+ <return type-id='type-id-2'/>
+ </function-decl>
+ <function-decl name='__builtin___snprintf_chk' mangled-name='__snprintf_chk' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zfs_name_to_prop' mangled-name='zfs_name_to_prop' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zfs_error_aux' mangled-name='zfs_error_aux' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='fnvlist_alloc' mangled-name='fnvlist_alloc' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zfs_valid_proplist' mangled-name='zfs_valid_proplist' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zfs_parent_name' mangled-name='zfs_parent_name' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='lzc_change_key' mangled-name='lzc_change_key' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zfs_prop_to_name' mangled-name='zfs_prop_to_name' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_uint64' mangled-name='nvlist_lookup_uint64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_string' mangled-name='nvlist_lookup_string' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='nvlist_add_uint64' mangled-name='nvlist_add_uint64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='nvlist_add_string' mangled-name='nvlist_add_string' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='lzc_unload_key' mangled-name='lzc_unload_key' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='lzc_load_key' mangled-name='lzc_load_key' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='__printf_chk' mangled-name='__printf_chk' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zfs_handle_dup' mangled-name='zfs_handle_dup' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zfs_iter_filesystems' mangled-name='zfs_iter_filesystems' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='__builtin_memcpy' mangled-name='memcpy' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='calloc' mangled-name='calloc' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='regexec' mangled-name='regexec' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='fileno' mangled-name='fileno' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='isatty' mangled-name='isatty' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='__builtin_putchar' mangled-name='putchar' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='__getdelim' mangled-name='__getdelim' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='sigemptyset' mangled-name='sigemptyset' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='sigaction' mangled-name='sigaction' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='fputc' mangled-name='fputc' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='fflush' mangled-name='fflush' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='tcgetattr' mangled-name='tcgetattr' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='tcsetattr' mangled-name='tcsetattr' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='getpid' mangled-name='getpid' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='kill' mangled-name='kill' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='__ctype_b_loc' mangled-name='__ctype_b_loc' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zpool_get_features' mangled-name='zpool_get_features' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zpool_get_prop_int' mangled-name='zpool_get_prop_int' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='__fread_alias' mangled-name='fread' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='malloc' mangled-name='malloc' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='ferror' mangled-name='ferror' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='fopen' mangled-name='fopen64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='fclose' mangled-name='fclose' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='__builtin_memmove' mangled-name='memmove' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='sscanf' mangled-name='sscanf' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='PKCS5_PBKDF2_HMAC_SHA1' mangled-name='PKCS5_PBKDF2_HMAC_SHA1' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='__open_alias' mangled-name='open64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='__read_alias' mangled-name='read' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='close' mangled-name='close' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='__builtin_strcpy' mangled-name='strcpy' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='libzfs_dataset.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
+ <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-90'>
+ <underlying-type type-id='type-id-41'/>
+ <enumerator name='ZFS_WAIT_DELETEQ' value='0'/>
+ <enumerator name='ZFS_WAIT_NUM_ACTIVITIES' value='1'/>
+ </enum-decl>
+ <typedef-decl name='zfs_wait_activity_t' type-id='type-id-90' id='type-id-91'/>
+ <function-decl name='zfs_wait_status' mangled-name='zfs_wait_status' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_wait_status'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-91' name='activity'/>
+ <parameter type-id='type-id-85' name='missing'/>
+ <parameter type-id='type-id-85' name='waited'/>
+ <return type-id='type-id-2'/>
+ </function-decl>
+ <function-decl name='zvol_volsize_to_reservation' mangled-name='zvol_volsize_to_reservation' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zvol_volsize_to_reservation'>
+ <parameter type-id='type-id-4' name='zph'/>
+ <parameter type-id='type-id-7' name='volsize'/>
+ <parameter type-id='type-id-19' name='props'/>
<return type-id='type-id-7'/>
</function-decl>
- <pointer-type-def type-id='type-id-36' size-in-bits='64' id='type-id-104'/>
- <function-decl name='nvlist_free' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <return type-id='type-id-6'/>
+ <function-decl name='zfs_get_holds' mangled-name='zfs_get_holds' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_holds'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-86' name='nvl'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <pointer-type-def type-id='type-id-15' size-in-bits='64' id='type-id-105'/>
- <function-decl name='zpool_get_config' mangled-name='zpool_get_config' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_config'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-105' name='oldconfig'/>
- <return type-id='type-id-15'/>
+ <function-decl name='zfs_set_fsacl' mangled-name='zfs_set_fsacl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_set_fsacl'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-9' name='un'/>
+ <parameter type-id='type-id-19' name='nvl'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zpool_get_features' mangled-name='zpool_get_features' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_features'>
- <parameter type-id='type-id-11' name='zhp'/>
- <return type-id='type-id-15'/>
+ <function-decl name='zfs_get_fsacl' mangled-name='zfs_get_fsacl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_fsacl'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-86' name='nvl'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='nvlist_exists' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-50'/>
+ <function-decl name='zfs_release' mangled-name='zfs_release' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_release'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-84' name='snapname'/>
+ <parameter type-id='type-id-84' name='tag'/>
+ <parameter type-id='type-id-9' name='recursive'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <pointer-type-def type-id='type-id-16' size-in-bits='64' id='type-id-106'/>
- <function-decl name='zpool_refresh_stats' mangled-name='zpool_refresh_stats' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_refresh_stats'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-106' name='missing'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_hold_nvl' mangled-name='zfs_hold_nvl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_hold_nvl'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-2' name='cleanup_fd'/>
+ <parameter type-id='type-id-19' name='holds'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <pointer-type-def type-id='type-id-104' size-in-bits='64' id='type-id-107'/>
- <function-decl name='nvlist_lookup_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-107'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_hold' mangled-name='zfs_hold' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_hold'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-84' name='snapname'/>
+ <parameter type-id='type-id-84' name='tag'/>
+ <parameter type-id='type-id-9' name='recursive'/>
+ <parameter type-id='type-id-2' name='cleanup_fd'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <class-decl name='zfs_cmd' size-in-bits='109952' is-struct='yes' visibility='default' id='type-id-108'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='zc_name' type-id='type-id-109' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32768'>
- <var-decl name='zc_nvlist_src' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32832'>
- <var-decl name='zc_nvlist_src_size' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32896'>
- <var-decl name='zc_nvlist_dst' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32960'>
- <var-decl name='zc_nvlist_dst_size' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='33024'>
- <var-decl name='zc_nvlist_dst_filled' type-id='type-id-16' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='33056'>
- <var-decl name='zc_pad2' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='33088'>
- <var-decl name='zc_history' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='33152'>
- <var-decl name='zc_value' type-id='type-id-110' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='98688'>
- <var-decl name='zc_string' type-id='type-id-12' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='100736'>
- <var-decl name='zc_guid' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='100800'>
- <var-decl name='zc_nvlist_conf' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='100864'>
- <var-decl name='zc_nvlist_conf_size' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='100928'>
- <var-decl name='zc_cookie' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='100992'>
- <var-decl name='zc_objset_type' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='101056'>
- <var-decl name='zc_perm_action' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='101120'>
- <var-decl name='zc_history_len' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='101184'>
- <var-decl name='zc_history_offset' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='101248'>
- <var-decl name='zc_obj' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='101312'>
- <var-decl name='zc_iflags' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='101376'>
- <var-decl name='zc_share' type-id='type-id-111' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='101632'>
- <var-decl name='zc_objset_stats' type-id='type-id-14' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='103936'>
- <var-decl name='zc_begin_record' type-id='type-id-112' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='106368'>
- <var-decl name='zc_inject_record' type-id='type-id-113' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109184'>
- <var-decl name='zc_defer_destroy' type-id='type-id-38' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109216'>
- <var-decl name='zc_flags' type-id='type-id-38' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109248'>
- <var-decl name='zc_action_handle' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109312'>
- <var-decl name='zc_cleanup_fd' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109344'>
- <var-decl name='zc_simple' type-id='type-id-79' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109352'>
- <var-decl name='zc_pad' type-id='type-id-114' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109376'>
- <var-decl name='zc_sendobj' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109440'>
- <var-decl name='zc_fromobj' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109504'>
- <var-decl name='zc_createtxg' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109568'>
- <var-decl name='zc_stat' type-id='type-id-115' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109888'>
- <var-decl name='zc_zoneid' type-id='type-id-22' visibility='default'/>
- </data-member>
- </class-decl>
-
- <array-type-def dimensions='1' type-id='type-id-32' size-in-bits='32768' id='type-id-109'>
- <subrange length='4096' type-id='type-id-33' id='type-id-116'/>
-
- </array-type-def>
-
- <array-type-def dimensions='1' type-id='type-id-32' size-in-bits='65536' id='type-id-110'>
- <subrange length='8192' type-id='type-id-33' id='type-id-117'/>
-
- </array-type-def>
- <class-decl name='zfs_share' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-118'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='z_exportdata' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='z_sharedata' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='z_sharetype' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='z_sharemax' type-id='type-id-22' visibility='default'/>
- </data-member>
- </class-decl>
- <typedef-decl name='zfs_share_t' type-id='type-id-118' id='type-id-111'/>
- <class-decl name='drr_begin' size-in-bits='2432' is-struct='yes' visibility='default' id='type-id-112'>
+ <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-92'>
+ <underlying-type type-id='type-id-41'/>
+ <enumerator name='ZFS_PROP_USERUSED' value='0'/>
+ <enumerator name='ZFS_PROP_USERQUOTA' value='1'/>
+ <enumerator name='ZFS_PROP_GROUPUSED' value='2'/>
+ <enumerator name='ZFS_PROP_GROUPQUOTA' value='3'/>
+ <enumerator name='ZFS_PROP_USEROBJUSED' value='4'/>
+ <enumerator name='ZFS_PROP_USEROBJQUOTA' value='5'/>
+ <enumerator name='ZFS_PROP_GROUPOBJUSED' value='6'/>
+ <enumerator name='ZFS_PROP_GROUPOBJQUOTA' value='7'/>
+ <enumerator name='ZFS_PROP_PROJECTUSED' value='8'/>
+ <enumerator name='ZFS_PROP_PROJECTQUOTA' value='9'/>
+ <enumerator name='ZFS_PROP_PROJECTOBJUSED' value='10'/>
+ <enumerator name='ZFS_PROP_PROJECTOBJQUOTA' value='11'/>
+ <enumerator name='ZFS_NUM_USERQUOTA_PROPS' value='12'/>
+ </enum-decl>
+ <typedef-decl name='zfs_userquota_prop_t' type-id='type-id-92' id='type-id-93'/>
+ <typedef-decl name='__uid_t' type-id='type-id-30' id='type-id-94'/>
+ <typedef-decl name='uid_t' type-id='type-id-94' id='type-id-95'/>
+ <pointer-type-def type-id='type-id-96' size-in-bits='64' id='type-id-97'/>
+ <typedef-decl name='zfs_userspace_cb_t' type-id='type-id-97' id='type-id-98'/>
+ <function-decl name='zfs_userspace' mangled-name='zfs_userspace' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_userspace'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-93' name='type'/>
+ <parameter type-id='type-id-98' name='func'/>
+ <parameter type-id='type-id-13' name='arg'/>
+ <return type-id='type-id-2'/>
+ </function-decl>
+ <function-decl name='zfs_smb_acl_rename' mangled-name='zfs_smb_acl_rename' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_smb_acl_rename'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <parameter type-id='type-id-14' name='dataset'/>
+ <parameter type-id='type-id-14' name='path'/>
+ <parameter type-id='type-id-14' name='oldname'/>
+ <parameter type-id='type-id-14' name='newname'/>
+ <return type-id='type-id-2'/>
+ </function-decl>
+ <function-decl name='zfs_smb_acl_purge' mangled-name='zfs_smb_acl_purge' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_smb_acl_purge'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <parameter type-id='type-id-14' name='dataset'/>
+ <parameter type-id='type-id-14' name='path'/>
+ <return type-id='type-id-2'/>
+ </function-decl>
+ <function-decl name='zfs_smb_acl_remove' mangled-name='zfs_smb_acl_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_smb_acl_remove'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <parameter type-id='type-id-14' name='dataset'/>
+ <parameter type-id='type-id-14' name='path'/>
+ <parameter type-id='type-id-14' name='resource'/>
+ <return type-id='type-id-2'/>
+ </function-decl>
+ <function-decl name='zfs_smb_acl_add' mangled-name='zfs_smb_acl_add' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_smb_acl_add'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <parameter type-id='type-id-14' name='dataset'/>
+ <parameter type-id='type-id-14' name='path'/>
+ <parameter type-id='type-id-14' name='resource'/>
+ <return type-id='type-id-2'/>
+ </function-decl>
+ <function-decl name='zfs_prune_proplist' mangled-name='zfs_prune_proplist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prune_proplist'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-68' name='props'/>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <class-decl name='zprop_list' size-in-bits='448' is-struct='yes' visibility='default' id='type-id-99'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='drr_magic' type-id='type-id-22' visibility='default'/>
+ <var-decl name='pl_prop' type-id='type-id-2' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='drr_versioninfo' type-id='type-id-22' visibility='default'/>
+ <var-decl name='pl_user_prop' type-id='type-id-14' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='drr_creation_time' type-id='type-id-22' visibility='default'/>
+ <var-decl name='pl_next' type-id='type-id-100' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='drr_type' type-id='type-id-78' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='224'>
- <var-decl name='drr_flags' type-id='type-id-38' visibility='default'/>
+ <var-decl name='pl_all' type-id='type-id-9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='drr_toguid' type-id='type-id-22' visibility='default'/>
+ <var-decl name='pl_width' type-id='type-id-18' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='drr_fromguid' type-id='type-id-22' visibility='default'/>
+ <var-decl name='pl_recvd_width' type-id='type-id-18' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='drr_toname' type-id='type-id-12' visibility='default'/>
+ <var-decl name='pl_fixed' type-id='type-id-9' visibility='default'/>
</data-member>
</class-decl>
- <class-decl name='zinject_record' size-in-bits='2816' is-struct='yes' visibility='default' id='type-id-119'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='zi_objset' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='zi_object' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='zi_start' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='zi_end' type-id='type-id-22' visibility='default'/>
+ <pointer-type-def type-id='type-id-99' size-in-bits='64' id='type-id-100'/>
+ <typedef-decl name='zprop_list_t' type-id='type-id-99' id='type-id-101'/>
+ <pointer-type-def type-id='type-id-101' size-in-bits='64' id='type-id-102'/>
+ <pointer-type-def type-id='type-id-102' size-in-bits='64' id='type-id-103'/>
+ <function-decl name='zfs_expand_proplist' mangled-name='zfs_expand_proplist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_expand_proplist'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-103' name='plp'/>
+ <parameter type-id='type-id-9' name='received'/>
+ <parameter type-id='type-id-9' name='literal'/>
+ <return type-id='type-id-2'/>
+ </function-decl>
+ <function-decl name='zfs_get_user_props' mangled-name='zfs_get_user_props' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_user_props'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <return type-id='type-id-19'/>
+ </function-decl>
+ <function-decl name='zfs_get_recvd_props' mangled-name='zfs_get_recvd_props' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_recvd_props'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <return type-id='type-id-19'/>
+ </function-decl>
+ <function-decl name='zfs_get_all_props' mangled-name='zfs_get_all_props' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_all_props'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <return type-id='type-id-19'/>
+ </function-decl>
+ <class-decl name='renameflags' size-in-bits='32' is-struct='yes' visibility='default' id='type-id-104'>
+ <data-member access='public' layout-offset-in-bits='31'>
+ <var-decl name='recursive' type-id='type-id-2' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='zi_guid' type-id='type-id-22' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='30'>
+ <var-decl name='nounmount' type-id='type-id-2' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='zi_level' type-id='type-id-38' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='352'>
- <var-decl name='zi_error' type-id='type-id-38' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='zi_type' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='zi_freq' type-id='type-id-38' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='480'>
- <var-decl name='zi_failfast' type-id='type-id-38' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='zi_func' type-id='type-id-12' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='2560'>
- <var-decl name='zi_iotype' type-id='type-id-38' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='2592'>
- <var-decl name='zi_duration' type-id='type-id-37' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='2624'>
- <var-decl name='zi_timer' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='2688'>
- <var-decl name='zi_nlanes' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='2752'>
- <var-decl name='zi_cmd' type-id='type-id-38' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='2784'>
- <var-decl name='zi_dvas' type-id='type-id-38' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='29'>
+ <var-decl name='forceunmount' type-id='type-id-2' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='zinject_record_t' type-id='type-id-119' id='type-id-113'/>
-
- <array-type-def dimensions='1' type-id='type-id-79' size-in-bits='24' id='type-id-114'>
- <subrange length='3' type-id='type-id-33' id='type-id-100'/>
-
- </array-type-def>
- <class-decl name='zfs_stat' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-120'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='zs_gen' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='zs_mode' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='zs_links' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='zs_ctime' type-id='type-id-121' visibility='default'/>
- </data-member>
- </class-decl>
-
- <array-type-def dimensions='1' type-id='type-id-22' size-in-bits='128' id='type-id-121'>
- <subrange length='2' type-id='type-id-33' id='type-id-67'/>
-
- </array-type-def>
- <typedef-decl name='zfs_stat_t' type-id='type-id-120' id='type-id-115'/>
- <pointer-type-def type-id='type-id-108' size-in-bits='64' id='type-id-122'/>
- <function-decl name='zcmd_alloc_dst_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <parameter type-id='type-id-122'/>
- <parameter type-id='type-id-35'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='zfs_ioctl' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-122'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='zcmd_expand_dst_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <parameter type-id='type-id-122'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='zcmd_free_nvlists' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-122'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <function-decl name='zcmd_read_dst_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <parameter type-id='type-id-122'/>
- <parameter type-id='type-id-107'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='getenv' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-17'/>
+ <typedef-decl name='renameflags_t' type-id='type-id-104' id='type-id-105'/>
+ <function-decl name='zfs_rename' mangled-name='zfs_rename' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_rename'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-84' name='target'/>
+ <parameter type-id='type-id-105' name='flags'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <pointer-type-def type-id='type-id-123' size-in-bits='64' id='type-id-124'/>
- <typedef-decl name='zpool_iter_f' type-id='type-id-124' id='type-id-125'/>
- <function-decl name='zpool_iter' mangled-name='zpool_iter' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_iter'>
- <parameter type-id='type-id-10' name='hdl'/>
- <parameter type-id='type-id-125' name='func'/>
- <parameter type-id='type-id-7' name='data'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_rollback' mangled-name='zfs_rollback' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_rollback'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-76' name='snap'/>
+ <parameter type-id='type-id-9' name='force'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='uu_avl_first' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-4'/>
- <return type-id='type-id-7'/>
+ <function-decl name='zfs_snapshot' mangled-name='zfs_snapshot' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_snapshot'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <parameter type-id='type-id-84' name='path'/>
+ <parameter type-id='type-id-9' name='recursive'/>
+ <parameter type-id='type-id-19' name='props'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='uu_avl_next' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-4'/>
- <parameter type-id='type-id-7'/>
- <return type-id='type-id-7'/>
+ <function-decl name='zfs_snapshot_nvl' mangled-name='zfs_snapshot_nvl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_snapshot_nvl'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <parameter type-id='type-id-19' name='snaps'/>
+ <parameter type-id='type-id-19' name='props'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <pointer-type-def type-id='type-id-27' size-in-bits='64' id='type-id-126'/>
- <pointer-type-def type-id='type-id-126' size-in-bits='64' id='type-id-127'/>
- <function-decl name='zpool_open_silent' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-127'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_promote' mangled-name='zfs_promote' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_promote'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='no_memory' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_clone' mangled-name='zfs_clone' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_clone'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-84' name='target'/>
+ <parameter type-id='type-id-19' name='props'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <class-decl name='nvpair' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-128'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='nvp_size' type-id='type-id-37' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='nvp_name_sz' type-id='type-id-129' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='48'>
- <var-decl name='nvp_reserve' type-id='type-id-129' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='nvp_value_elem' type-id='type-id-37' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='96'>
- <var-decl name='nvp_type' type-id='type-id-130' visibility='default'/>
- </data-member>
- </class-decl>
- <typedef-decl name='__int16_t' type-id='type-id-55' id='type-id-131'/>
- <typedef-decl name='int16_t' type-id='type-id-131' id='type-id-129'/>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-132'>
- <underlying-type type-id='type-id-49'/>
- <enumerator name='DATA_TYPE_DONTCARE' value='-1'/>
- <enumerator name='DATA_TYPE_UNKNOWN' value='0'/>
- <enumerator name='DATA_TYPE_BOOLEAN' value='1'/>
- <enumerator name='DATA_TYPE_BYTE' value='2'/>
- <enumerator name='DATA_TYPE_INT16' value='3'/>
- <enumerator name='DATA_TYPE_UINT16' value='4'/>
- <enumerator name='DATA_TYPE_INT32' value='5'/>
- <enumerator name='DATA_TYPE_UINT32' value='6'/>
- <enumerator name='DATA_TYPE_INT64' value='7'/>
- <enumerator name='DATA_TYPE_UINT64' value='8'/>
- <enumerator name='DATA_TYPE_STRING' value='9'/>
- <enumerator name='DATA_TYPE_BYTE_ARRAY' value='10'/>
- <enumerator name='DATA_TYPE_INT16_ARRAY' value='11'/>
- <enumerator name='DATA_TYPE_UINT16_ARRAY' value='12'/>
- <enumerator name='DATA_TYPE_INT32_ARRAY' value='13'/>
- <enumerator name='DATA_TYPE_UINT32_ARRAY' value='14'/>
- <enumerator name='DATA_TYPE_INT64_ARRAY' value='15'/>
- <enumerator name='DATA_TYPE_UINT64_ARRAY' value='16'/>
- <enumerator name='DATA_TYPE_STRING_ARRAY' value='17'/>
- <enumerator name='DATA_TYPE_HRTIME' value='18'/>
- <enumerator name='DATA_TYPE_NVLIST' value='19'/>
- <enumerator name='DATA_TYPE_NVLIST_ARRAY' value='20'/>
- <enumerator name='DATA_TYPE_BOOLEAN_VALUE' value='21'/>
- <enumerator name='DATA_TYPE_INT8' value='22'/>
- <enumerator name='DATA_TYPE_UINT8' value='23'/>
- <enumerator name='DATA_TYPE_BOOLEAN_ARRAY' value='24'/>
- <enumerator name='DATA_TYPE_INT8_ARRAY' value='25'/>
- <enumerator name='DATA_TYPE_UINT8_ARRAY' value='26'/>
- <enumerator name='DATA_TYPE_DOUBLE' value='27'/>
- </enum-decl>
- <typedef-decl name='data_type_t' type-id='type-id-132' id='type-id-130'/>
- <pointer-type-def type-id='type-id-128' size-in-bits='64' id='type-id-133'/>
- <function-decl name='nvlist_next_nvpair' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-133'/>
- <return type-id='type-id-133'/>
- </function-decl>
- <function-decl name='nvpair_name' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-133'/>
- <return type-id='type-id-17'/>
- </function-decl>
- <function-decl name='zfs_strdup' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-17'/>
+ <function-decl name='zfs_destroy_snaps_nvl' mangled-name='zfs_destroy_snaps_nvl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_destroy_snaps_nvl'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <parameter type-id='type-id-19' name='snaps'/>
+ <parameter type-id='type-id-9' name='defer'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='nvpair_value_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-133'/>
- <parameter type-id='type-id-107'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_destroy_snaps' mangled-name='zfs_destroy_snaps' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_destroy_snaps'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-14' name='snapname'/>
+ <parameter type-id='type-id-9' name='defer'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='libspl_assertf' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-84'/>
- <parameter is-variadic='yes'/>
- <return type-id='type-id-6'/>
+ <function-decl name='zfs_destroy' mangled-name='zfs_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_destroy'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-9' name='defer'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='nvlist_dup' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-107'/>
- <parameter type-id='type-id-8'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_create' mangled-name='zfs_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_create'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <parameter type-id='type-id-84' name='path'/>
+ <parameter type-id='type-id-66' name='type'/>
+ <parameter type-id='type-id-19' name='props'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='dcgettext' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-8'/>
- <return type-id='type-id-17'/>
+ <function-decl name='zfs_create_ancestors' mangled-name='zfs_create_ancestors' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_create_ancestors'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <parameter type-id='type-id-84' name='path'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zfs_standard_error' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_parent_name' mangled-name='zfs_parent_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_parent_name'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-14' name='buf'/>
+ <parameter type-id='type-id-18' name='buflen'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <typedef-decl name='zfs_handle_t' type-id='type-id-9' id='type-id-134'/>
- <pointer-type-def type-id='type-id-134' size-in-bits='64' id='type-id-135'/>
- <pointer-type-def type-id='type-id-136' size-in-bits='64' id='type-id-137'/>
- <typedef-decl name='zfs_iter_f' type-id='type-id-137' id='type-id-138'/>
- <function-decl name='zfs_iter_root' mangled-name='zfs_iter_root' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_root'>
- <parameter type-id='type-id-10' name='hdl'/>
- <parameter type-id='type-id-138' name='func'/>
- <parameter type-id='type-id-7' name='data'/>
- <return type-id='type-id-8'/>
+ <qualified-type-def type-id='type-id-75' const='yes' id='type-id-106'/>
+ <pointer-type-def type-id='type-id-106' size-in-bits='64' id='type-id-107'/>
+ <function-decl name='zfs_get_underlying_type' mangled-name='zfs_get_underlying_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_underlying_type'>
+ <parameter type-id='type-id-107' name='zhp'/>
+ <return type-id='type-id-66'/>
</function-decl>
- <function-decl name='make_dataset_handle' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-82'/>
+ <function-decl name='zfs_get_type' mangled-name='zfs_get_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_type'>
+ <parameter type-id='type-id-107' name='zhp'/>
+ <return type-id='type-id-66'/>
</function-decl>
- <function-type size-in-bits='64' id='type-id-136'>
- <parameter type-id='type-id-135'/>
- <parameter type-id='type-id-7'/>
- <return type-id='type-id-8'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-123'>
- <parameter type-id='type-id-11'/>
- <parameter type-id='type-id-7'/>
- <return type-id='type-id-8'/>
- </function-type>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='libzfs_crypto.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libzfs' language='LANG_C99'>
- <function-decl name='zfs_crypto_get_encryption_root' mangled-name='zfs_crypto_get_encryption_root' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_get_encryption_root'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-106' name='is_encroot'/>
- <parameter type-id='type-id-17' name='buf'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <pointer-type-def type-id='type-id-18' size-in-bits='64' id='type-id-139'/>
- <typedef-decl name='uint_t' type-id='type-id-5' id='type-id-140'/>
- <pointer-type-def type-id='type-id-140' size-in-bits='64' id='type-id-141'/>
- <function-decl name='zfs_crypto_create' mangled-name='zfs_crypto_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_create'>
- <parameter type-id='type-id-10' name='hdl'/>
- <parameter type-id='type-id-17' name='parent_name'/>
- <parameter type-id='type-id-15' name='props'/>
- <parameter type-id='type-id-15' name='pool_props'/>
- <parameter type-id='type-id-16' name='stdin_available'/>
- <parameter type-id='type-id-139' name='wkeydata_out'/>
- <parameter type-id='type-id-141' name='wkeylen_out'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='zfs_prop_to_name' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-85'/>
+ <function-decl name='zfs_get_pool_name' mangled-name='zfs_get_pool_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_pool_name'>
+ <parameter type-id='type-id-107' name='zhp'/>
<return type-id='type-id-84'/>
</function-decl>
- <function-decl name='nvlist_lookup_uint64' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-102'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='nvlist_lookup_string' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-88'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-142'>
- <underlying-type type-id='type-id-49'/>
- <enumerator name='ZPOOL_PROP_INVAL' value='-1'/>
- <enumerator name='ZPOOL_PROP_NAME' value='0'/>
- <enumerator name='ZPOOL_PROP_SIZE' value='1'/>
- <enumerator name='ZPOOL_PROP_CAPACITY' value='2'/>
- <enumerator name='ZPOOL_PROP_ALTROOT' value='3'/>
- <enumerator name='ZPOOL_PROP_HEALTH' value='4'/>
- <enumerator name='ZPOOL_PROP_GUID' value='5'/>
- <enumerator name='ZPOOL_PROP_VERSION' value='6'/>
- <enumerator name='ZPOOL_PROP_BOOTFS' value='7'/>
- <enumerator name='ZPOOL_PROP_DELEGATION' value='8'/>
- <enumerator name='ZPOOL_PROP_AUTOREPLACE' value='9'/>
- <enumerator name='ZPOOL_PROP_CACHEFILE' value='10'/>
- <enumerator name='ZPOOL_PROP_FAILUREMODE' value='11'/>
- <enumerator name='ZPOOL_PROP_LISTSNAPS' value='12'/>
- <enumerator name='ZPOOL_PROP_AUTOEXPAND' value='13'/>
- <enumerator name='ZPOOL_PROP_DEDUPDITTO' value='14'/>
- <enumerator name='ZPOOL_PROP_DEDUPRATIO' value='15'/>
- <enumerator name='ZPOOL_PROP_FREE' value='16'/>
- <enumerator name='ZPOOL_PROP_ALLOCATED' value='17'/>
- <enumerator name='ZPOOL_PROP_READONLY' value='18'/>
- <enumerator name='ZPOOL_PROP_ASHIFT' value='19'/>
- <enumerator name='ZPOOL_PROP_COMMENT' value='20'/>
- <enumerator name='ZPOOL_PROP_EXPANDSZ' value='21'/>
- <enumerator name='ZPOOL_PROP_FREEING' value='22'/>
- <enumerator name='ZPOOL_PROP_FRAGMENTATION' value='23'/>
- <enumerator name='ZPOOL_PROP_LEAKED' value='24'/>
- <enumerator name='ZPOOL_PROP_MAXBLOCKSIZE' value='25'/>
- <enumerator name='ZPOOL_PROP_TNAME' value='26'/>
- <enumerator name='ZPOOL_PROP_MAXDNODESIZE' value='27'/>
- <enumerator name='ZPOOL_PROP_MULTIHOST' value='28'/>
- <enumerator name='ZPOOL_PROP_CHECKPOINT' value='29'/>
- <enumerator name='ZPOOL_PROP_LOAD_GUID' value='30'/>
- <enumerator name='ZPOOL_PROP_AUTOTRIM' value='31'/>
- <enumerator name='ZPOOL_PROP_COMPATIBILITY' value='32'/>
- <enumerator name='ZPOOL_NUM_PROPS' value='33'/>
- </enum-decl>
- <function-decl name='zpool_get_prop_int' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-126'/>
- <parameter type-id='type-id-142'/>
- <parameter type-id='type-id-87'/>
- <return type-id='type-id-35'/>
- </function-decl>
- <function-decl name='zpool_get_features' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-126'/>
- <return type-id='type-id-104'/>
- </function-decl>
- <function-decl name='zfs_error_aux' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <parameter type-id='type-id-84'/>
- <parameter is-variadic='yes'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <function-decl name='nvlist_add_string' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='open' mangled-name='open64' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-8'/>
- <parameter is-variadic='yes'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='read' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-7'/>
- <parameter type-id='type-id-35'/>
- <return type-id='type-id-54'/>
- </function-decl>
- <function-decl name='close' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-8'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='nvlist_add_uint64' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-35'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='zfs_crypto_clone_check' mangled-name='zfs_crypto_clone_check' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_clone_check'>
- <parameter type-id='type-id-10' name='hdl'/>
- <parameter type-id='type-id-135' name='origin_zhp'/>
- <parameter type-id='type-id-17' name='parent_name'/>
- <parameter type-id='type-id-15' name='props'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='zfs_crypto_attempt_load_keys' mangled-name='zfs_crypto_attempt_load_keys' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_attempt_load_keys'>
- <parameter type-id='type-id-10' name='hdl'/>
- <parameter type-id='type-id-17' name='fsname'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='zfs_handle_dup' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-82'/>
- <return type-id='type-id-82'/>
- </function-decl>
- <function-decl name='zfs_iter_filesystems' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-82'/>
- <parameter type-id='type-id-95'/>
- <parameter type-id='type-id-7'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='zfs_crypto_load_key' mangled-name='zfs_crypto_load_key' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_load_key'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-16' name='noop'/>
- <parameter type-id='type-id-17' name='alt_keylocation'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='lzc_load_key' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-50'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-5'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <qualified-type-def type-id='type-id-69' const='yes' id='type-id-143'/>
- <pointer-type-def type-id='type-id-143' size-in-bits='64' id='type-id-144'/>
- <class-decl name='__anonymous_struct__' size-in-bits='64' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-145'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='rm_so' type-id='type-id-146' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='rm_eo' type-id='type-id-146' visibility='default'/>
- </data-member>
- </class-decl>
- <typedef-decl name='regoff_t' type-id='type-id-8' id='type-id-146'/>
- <pointer-type-def type-id='type-id-145' size-in-bits='64' id='type-id-147'/>
- <function-decl name='regexec' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-144'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-147'/>
- <parameter type-id='type-id-8'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <class-decl name='_IO_FILE' size-in-bits='1728' is-struct='yes' visibility='default' id='type-id-148'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='_flags' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='_IO_read_ptr' type-id='type-id-17' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='_IO_read_end' type-id='type-id-17' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='_IO_read_base' type-id='type-id-17' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='_IO_write_base' type-id='type-id-17' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='_IO_write_ptr' type-id='type-id-17' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='_IO_write_end' type-id='type-id-17' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='_IO_buf_base' type-id='type-id-17' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='_IO_buf_end' type-id='type-id-17' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='576'>
- <var-decl name='_IO_save_base' type-id='type-id-17' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='640'>
- <var-decl name='_IO_backup_base' type-id='type-id-17' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='704'>
- <var-decl name='_IO_save_end' type-id='type-id-17' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='768'>
- <var-decl name='_markers' type-id='type-id-149' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='832'>
- <var-decl name='_chain' type-id='type-id-150' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='896'>
- <var-decl name='_fileno' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='928'>
- <var-decl name='_flags2' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='960'>
- <var-decl name='_old_offset' type-id='type-id-151' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1024'>
- <var-decl name='_cur_column' type-id='type-id-152' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1040'>
- <var-decl name='_vtable_offset' type-id='type-id-153' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1048'>
- <var-decl name='_shortbuf' type-id='type-id-154' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1152'>
- <var-decl name='_offset' type-id='type-id-155' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1216'>
- <var-decl name='_codecvt' type-id='type-id-156' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1280'>
- <var-decl name='_wide_data' type-id='type-id-157' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1344'>
- <var-decl name='_freeres_list' type-id='type-id-150' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1408'>
- <var-decl name='_freeres_buf' type-id='type-id-7' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1472'>
- <var-decl name='__pad5' type-id='type-id-28' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1536'>
- <var-decl name='_mode' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1568'>
- <var-decl name='_unused2' type-id='type-id-158' visibility='default'/>
- </data-member>
- </class-decl>
- <class-decl name='_IO_marker' is-struct='yes' visibility='default' is-declaration-only='yes' id='type-id-159'/>
- <pointer-type-def type-id='type-id-159' size-in-bits='64' id='type-id-149'/>
- <pointer-type-def type-id='type-id-148' size-in-bits='64' id='type-id-150'/>
- <typedef-decl name='__off_t' type-id='type-id-54' id='type-id-151'/>
- <type-decl name='unsigned short int' size-in-bits='16' id='type-id-152'/>
- <type-decl name='signed char' size-in-bits='8' id='type-id-153'/>
-
- <array-type-def dimensions='1' type-id='type-id-32' size-in-bits='8' id='type-id-154'>
- <subrange length='1' type-id='type-id-33' id='type-id-160'/>
-
- </array-type-def>
- <typedef-decl name='__off64_t' type-id='type-id-54' id='type-id-155'/>
- <class-decl name='_IO_codecvt' is-struct='yes' visibility='default' is-declaration-only='yes' id='type-id-161'/>
- <pointer-type-def type-id='type-id-161' size-in-bits='64' id='type-id-156'/>
- <class-decl name='_IO_wide_data' is-struct='yes' visibility='default' is-declaration-only='yes' id='type-id-162'/>
- <pointer-type-def type-id='type-id-162' size-in-bits='64' id='type-id-157'/>
-
- <array-type-def dimensions='1' type-id='type-id-32' size-in-bits='160' id='type-id-158'>
- <subrange length='20' type-id='type-id-33' id='type-id-163'/>
-
- </array-type-def>
- <function-decl name='fileno' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-150'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='isatty' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-8'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <qualified-type-def type-id='type-id-75' const='yes' id='type-id-164'/>
- <pointer-type-def type-id='type-id-164' size-in-bits='64' id='type-id-165'/>
- <function-decl name='PKCS5_PBKDF2_HMAC_SHA1' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-165'/>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-73'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='zfs_crypto_unload_key' mangled-name='zfs_crypto_unload_key' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_unload_key'>
- <parameter type-id='type-id-135' name='zhp'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='lzc_unload_key' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='zfs_crypto_rewrap' mangled-name='zfs_crypto_rewrap' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_rewrap'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-15' name='raw_props'/>
- <parameter type-id='type-id-16' name='inheritkey'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='zfs_parent_name' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-82'/>
- <parameter type-id='type-id-17'/>
- <parameter type-id='type-id-35'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='fnvlist_alloc' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-104'/>
- </function-decl>
- <function-decl name='zfs_name_to_prop' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-85'/>
+ <function-decl name='zfs_get_name' mangled-name='zfs_get_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_name'>
+ <parameter type-id='type-id-107' name='zhp'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='zfs_valid_proplist' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <parameter type-id='type-id-76'/>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-82'/>
- <parameter type-id='type-id-126'/>
- <parameter type-id='type-id-50'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-104'/>
+ <function-decl name='zfs_prop_get_written' mangled-name='zfs_prop_get_written' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_written'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-84' name='propname'/>
+ <parameter type-id='type-id-14' name='propbuf'/>
+ <parameter type-id='type-id-2' name='proplen'/>
+ <parameter type-id='type-id-9' name='literal'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='lzc_change_key' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-5'/>
- <return type-id='type-id-8'/>
+ <pointer-type-def type-id='type-id-7' size-in-bits='64' id='type-id-108'/>
+ <function-decl name='zfs_prop_get_written_int' mangled-name='zfs_prop_get_written_int' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_written_int'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-84' name='propname'/>
+ <parameter type-id='type-id-108' name='propvalue'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='ferror' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-150'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_prop_get_userquota' mangled-name='zfs_prop_get_userquota' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_userquota'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-84' name='propname'/>
+ <parameter type-id='type-id-14' name='propbuf'/>
+ <parameter type-id='type-id-2' name='proplen'/>
+ <parameter type-id='type-id-9' name='literal'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <class-decl name='__anonymous_struct__' size-in-bits='1024' is-struct='yes' is-anonymous='yes' naming-typedef-id='type-id-166' visibility='default' id='type-id-167'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='__val' type-id='type-id-168' visibility='default'/>
- </data-member>
- </class-decl>
-
- <array-type-def dimensions='1' type-id='type-id-35' size-in-bits='1024' id='type-id-168'>
- <subrange length='16' type-id='type-id-33' id='type-id-169'/>
-
- </array-type-def>
- <pointer-type-def type-id='type-id-167' size-in-bits='64' id='type-id-170'/>
- <function-decl name='sigemptyset' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-170'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_prop_get_userquota_int' mangled-name='zfs_prop_get_userquota_int' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_userquota_int'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-84' name='propname'/>
+ <parameter type-id='type-id-108' name='propvalue'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <class-decl name='sigaction' size-in-bits='1216' is-struct='yes' visibility='default' id='type-id-171'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='__sigaction_handler' type-id='type-id-172' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='sa_mask' type-id='type-id-166' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1088'>
- <var-decl name='sa_flags' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1152'>
- <var-decl name='sa_restorer' type-id='type-id-173' visibility='default'/>
- </data-member>
- </class-decl>
- <union-decl name='__anonymous_union__' size-in-bits='64' is-anonymous='yes' visibility='default' id='type-id-172'>
- <data-member access='private'>
- <var-decl name='sa_handler' type-id='type-id-174' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='sa_sigaction' type-id='type-id-175' visibility='default'/>
- </data-member>
- </union-decl>
- <pointer-type-def type-id='type-id-176' size-in-bits='64' id='type-id-177'/>
- <typedef-decl name='__sighandler_t' type-id='type-id-177' id='type-id-174'/>
- <class-decl name='__anonymous_struct__' size-in-bits='1024' is-struct='yes' is-anonymous='yes' naming-typedef-id='type-id-178' visibility='default' id='type-id-179'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='si_signo' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='si_errno' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='si_code' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='96'>
- <var-decl name='__pad0' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='_sifields' type-id='type-id-180' visibility='default'/>
- </data-member>
- </class-decl>
- <union-decl name='__anonymous_union__' size-in-bits='896' is-anonymous='yes' visibility='default' id='type-id-180'>
- <data-member access='private'>
- <var-decl name='_pad' type-id='type-id-181' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='_kill' type-id='type-id-182' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='_timer' type-id='type-id-183' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='_rt' type-id='type-id-184' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='_sigchld' type-id='type-id-185' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='_sigfault' type-id='type-id-186' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='_sigpoll' type-id='type-id-187' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='_sigsys' type-id='type-id-188' visibility='default'/>
- </data-member>
- </union-decl>
-
- <array-type-def dimensions='1' type-id='type-id-8' size-in-bits='896' id='type-id-181'>
- <subrange length='28' type-id='type-id-33' id='type-id-189'/>
-
- </array-type-def>
- <class-decl name='__anonymous_struct__' size-in-bits='64' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-182'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='si_pid' type-id='type-id-190' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='si_uid' type-id='type-id-191' visibility='default'/>
- </data-member>
- </class-decl>
- <typedef-decl name='__pid_t' type-id='type-id-8' id='type-id-190'/>
- <typedef-decl name='__uid_t' type-id='type-id-5' id='type-id-191'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-183'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='si_tid' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='si_overrun' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='si_sigval' type-id='type-id-192' visibility='default'/>
- </data-member>
- </class-decl>
- <union-decl name='sigval' size-in-bits='64' visibility='default' id='type-id-193'>
- <data-member access='private'>
- <var-decl name='sival_int' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='sival_ptr' type-id='type-id-7' visibility='default'/>
- </data-member>
- </union-decl>
- <typedef-decl name='__sigval_t' type-id='type-id-193' id='type-id-192'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-184'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='si_pid' type-id='type-id-190' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='si_uid' type-id='type-id-191' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='si_sigval' type-id='type-id-192' visibility='default'/>
- </data-member>
- </class-decl>
- <class-decl name='__anonymous_struct__' size-in-bits='256' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-185'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='si_pid' type-id='type-id-190' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='si_uid' type-id='type-id-191' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='si_status' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='si_utime' type-id='type-id-194' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='si_stime' type-id='type-id-194' visibility='default'/>
- </data-member>
- </class-decl>
- <typedef-decl name='__clock_t' type-id='type-id-54' id='type-id-194'/>
- <class-decl name='__anonymous_struct__' size-in-bits='256' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-186'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='si_addr' type-id='type-id-7' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='si_addr_lsb' type-id='type-id-55' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='_bounds' type-id='type-id-195' visibility='default'/>
- </data-member>
- </class-decl>
- <union-decl name='__anonymous_union__' size-in-bits='128' is-anonymous='yes' visibility='default' id='type-id-195'>
- <data-member access='private'>
- <var-decl name='_addr_bnd' type-id='type-id-196' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='_pkey' type-id='type-id-40' visibility='default'/>
- </data-member>
- </union-decl>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-196'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='_lower' type-id='type-id-7' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='_upper' type-id='type-id-7' visibility='default'/>
- </data-member>
- </class-decl>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-187'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='si_band' type-id='type-id-54' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='si_fd' type-id='type-id-8' visibility='default'/>
- </data-member>
- </class-decl>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-188'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='_call_addr' type-id='type-id-7' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='_syscall' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='96'>
- <var-decl name='_arch' type-id='type-id-5' visibility='default'/>
- </data-member>
- </class-decl>
- <typedef-decl name='siginfo_t' type-id='type-id-179' id='type-id-178'/>
- <pointer-type-def type-id='type-id-178' size-in-bits='64' id='type-id-197'/>
- <pointer-type-def type-id='type-id-198' size-in-bits='64' id='type-id-175'/>
- <typedef-decl name='__sigset_t' type-id='type-id-167' id='type-id-166'/>
- <pointer-type-def type-id='type-id-199' size-in-bits='64' id='type-id-173'/>
- <qualified-type-def type-id='type-id-171' const='yes' id='type-id-200'/>
- <pointer-type-def type-id='type-id-200' size-in-bits='64' id='type-id-201'/>
- <pointer-type-def type-id='type-id-171' size-in-bits='64' id='type-id-202'/>
- <function-decl name='sigaction' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-201'/>
- <parameter type-id='type-id-202'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='fputc' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-150'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='fflush' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-150'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <class-decl name='termios' size-in-bits='480' is-struct='yes' visibility='default' id='type-id-203'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='c_iflag' type-id='type-id-204' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='c_oflag' type-id='type-id-204' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='c_cflag' type-id='type-id-204' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='96'>
- <var-decl name='c_lflag' type-id='type-id-204' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='c_line' type-id='type-id-205' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='136'>
- <var-decl name='c_cc' type-id='type-id-206' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='416'>
- <var-decl name='c_ispeed' type-id='type-id-207' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='c_ospeed' type-id='type-id-207' visibility='default'/>
- </data-member>
- </class-decl>
- <typedef-decl name='tcflag_t' type-id='type-id-5' id='type-id-204'/>
- <typedef-decl name='cc_t' type-id='type-id-75' id='type-id-205'/>
-
- <array-type-def dimensions='1' type-id='type-id-205' size-in-bits='256' id='type-id-206'>
- <subrange length='32' type-id='type-id-33' id='type-id-208'/>
-
- </array-type-def>
- <typedef-decl name='speed_t' type-id='type-id-5' id='type-id-207'/>
- <pointer-type-def type-id='type-id-203' size-in-bits='64' id='type-id-209'/>
- <function-decl name='tcgetattr' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-209'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <qualified-type-def type-id='type-id-203' const='yes' id='type-id-210'/>
- <pointer-type-def type-id='type-id-210' size-in-bits='64' id='type-id-211'/>
- <function-decl name='tcsetattr' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-211'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='getpid' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='kill' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-8'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='fclose' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-150'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='dlopen' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-8'/>
- <return type-id='type-id-7'/>
+ <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-109'>
+ <underlying-type type-id='type-id-41'/>
+ <enumerator name='ZPROP_CONT' value='-2'/>
+ <enumerator name='ZPROP_INVAL' value='-1'/>
+ <enumerator name='ZFS_PROP_TYPE' value='0'/>
+ <enumerator name='ZFS_PROP_CREATION' value='1'/>
+ <enumerator name='ZFS_PROP_USED' value='2'/>
+ <enumerator name='ZFS_PROP_AVAILABLE' value='3'/>
+ <enumerator name='ZFS_PROP_REFERENCED' value='4'/>
+ <enumerator name='ZFS_PROP_COMPRESSRATIO' value='5'/>
+ <enumerator name='ZFS_PROP_MOUNTED' value='6'/>
+ <enumerator name='ZFS_PROP_ORIGIN' value='7'/>
+ <enumerator name='ZFS_PROP_QUOTA' value='8'/>
+ <enumerator name='ZFS_PROP_RESERVATION' value='9'/>
+ <enumerator name='ZFS_PROP_VOLSIZE' value='10'/>
+ <enumerator name='ZFS_PROP_VOLBLOCKSIZE' value='11'/>
+ <enumerator name='ZFS_PROP_RECORDSIZE' value='12'/>
+ <enumerator name='ZFS_PROP_MOUNTPOINT' value='13'/>
+ <enumerator name='ZFS_PROP_SHARENFS' value='14'/>
+ <enumerator name='ZFS_PROP_CHECKSUM' value='15'/>
+ <enumerator name='ZFS_PROP_COMPRESSION' value='16'/>
+ <enumerator name='ZFS_PROP_ATIME' value='17'/>
+ <enumerator name='ZFS_PROP_DEVICES' value='18'/>
+ <enumerator name='ZFS_PROP_EXEC' value='19'/>
+ <enumerator name='ZFS_PROP_SETUID' value='20'/>
+ <enumerator name='ZFS_PROP_READONLY' value='21'/>
+ <enumerator name='ZFS_PROP_ZONED' value='22'/>
+ <enumerator name='ZFS_PROP_SNAPDIR' value='23'/>
+ <enumerator name='ZFS_PROP_ACLMODE' value='24'/>
+ <enumerator name='ZFS_PROP_ACLINHERIT' value='25'/>
+ <enumerator name='ZFS_PROP_CREATETXG' value='26'/>
+ <enumerator name='ZFS_PROP_NAME' value='27'/>
+ <enumerator name='ZFS_PROP_CANMOUNT' value='28'/>
+ <enumerator name='ZFS_PROP_ISCSIOPTIONS' value='29'/>
+ <enumerator name='ZFS_PROP_XATTR' value='30'/>
+ <enumerator name='ZFS_PROP_NUMCLONES' value='31'/>
+ <enumerator name='ZFS_PROP_COPIES' value='32'/>
+ <enumerator name='ZFS_PROP_VERSION' value='33'/>
+ <enumerator name='ZFS_PROP_UTF8ONLY' value='34'/>
+ <enumerator name='ZFS_PROP_NORMALIZE' value='35'/>
+ <enumerator name='ZFS_PROP_CASE' value='36'/>
+ <enumerator name='ZFS_PROP_VSCAN' value='37'/>
+ <enumerator name='ZFS_PROP_NBMAND' value='38'/>
+ <enumerator name='ZFS_PROP_SHARESMB' value='39'/>
+ <enumerator name='ZFS_PROP_REFQUOTA' value='40'/>
+ <enumerator name='ZFS_PROP_REFRESERVATION' value='41'/>
+ <enumerator name='ZFS_PROP_GUID' value='42'/>
+ <enumerator name='ZFS_PROP_PRIMARYCACHE' value='43'/>
+ <enumerator name='ZFS_PROP_SECONDARYCACHE' value='44'/>
+ <enumerator name='ZFS_PROP_USEDSNAP' value='45'/>
+ <enumerator name='ZFS_PROP_USEDDS' value='46'/>
+ <enumerator name='ZFS_PROP_USEDCHILD' value='47'/>
+ <enumerator name='ZFS_PROP_USEDREFRESERV' value='48'/>
+ <enumerator name='ZFS_PROP_USERACCOUNTING' value='49'/>
+ <enumerator name='ZFS_PROP_STMF_SHAREINFO' value='50'/>
+ <enumerator name='ZFS_PROP_DEFER_DESTROY' value='51'/>
+ <enumerator name='ZFS_PROP_USERREFS' value='52'/>
+ <enumerator name='ZFS_PROP_LOGBIAS' value='53'/>
+ <enumerator name='ZFS_PROP_UNIQUE' value='54'/>
+ <enumerator name='ZFS_PROP_OBJSETID' value='55'/>
+ <enumerator name='ZFS_PROP_DEDUP' value='56'/>
+ <enumerator name='ZFS_PROP_MLSLABEL' value='57'/>
+ <enumerator name='ZFS_PROP_SYNC' value='58'/>
+ <enumerator name='ZFS_PROP_DNODESIZE' value='59'/>
+ <enumerator name='ZFS_PROP_REFRATIO' value='60'/>
+ <enumerator name='ZFS_PROP_WRITTEN' value='61'/>
+ <enumerator name='ZFS_PROP_CLONES' value='62'/>
+ <enumerator name='ZFS_PROP_LOGICALUSED' value='63'/>
+ <enumerator name='ZFS_PROP_LOGICALREFERENCED' value='64'/>
+ <enumerator name='ZFS_PROP_INCONSISTENT' value='65'/>
+ <enumerator name='ZFS_PROP_VOLMODE' value='66'/>
+ <enumerator name='ZFS_PROP_FILESYSTEM_LIMIT' value='67'/>
+ <enumerator name='ZFS_PROP_SNAPSHOT_LIMIT' value='68'/>
+ <enumerator name='ZFS_PROP_FILESYSTEM_COUNT' value='69'/>
+ <enumerator name='ZFS_PROP_SNAPSHOT_COUNT' value='70'/>
+ <enumerator name='ZFS_PROP_SNAPDEV' value='71'/>
+ <enumerator name='ZFS_PROP_ACLTYPE' value='72'/>
+ <enumerator name='ZFS_PROP_SELINUX_CONTEXT' value='73'/>
+ <enumerator name='ZFS_PROP_SELINUX_FSCONTEXT' value='74'/>
+ <enumerator name='ZFS_PROP_SELINUX_DEFCONTEXT' value='75'/>
+ <enumerator name='ZFS_PROP_SELINUX_ROOTCONTEXT' value='76'/>
+ <enumerator name='ZFS_PROP_RELATIME' value='77'/>
+ <enumerator name='ZFS_PROP_REDUNDANT_METADATA' value='78'/>
+ <enumerator name='ZFS_PROP_OVERLAY' value='79'/>
+ <enumerator name='ZFS_PROP_PREV_SNAP' value='80'/>
+ <enumerator name='ZFS_PROP_RECEIVE_RESUME_TOKEN' value='81'/>
+ <enumerator name='ZFS_PROP_ENCRYPTION' value='82'/>
+ <enumerator name='ZFS_PROP_KEYLOCATION' value='83'/>
+ <enumerator name='ZFS_PROP_KEYFORMAT' value='84'/>
+ <enumerator name='ZFS_PROP_PBKDF2_SALT' value='85'/>
+ <enumerator name='ZFS_PROP_PBKDF2_ITERS' value='86'/>
+ <enumerator name='ZFS_PROP_ENCRYPTION_ROOT' value='87'/>
+ <enumerator name='ZFS_PROP_KEY_GUID' value='88'/>
+ <enumerator name='ZFS_PROP_KEYSTATUS' value='89'/>
+ <enumerator name='ZFS_PROP_REMAPTXG' value='90'/>
+ <enumerator name='ZFS_PROP_SPECIAL_SMALL_BLOCKS' value='91'/>
+ <enumerator name='ZFS_PROP_IVSET_GUID' value='92'/>
+ <enumerator name='ZFS_PROP_REDACTED' value='93'/>
+ <enumerator name='ZFS_PROP_REDACT_SNAPS' value='94'/>
+ <enumerator name='ZFS_NUM_PROPS' value='95'/>
+ </enum-decl>
+ <typedef-decl name='zfs_prop_t' type-id='type-id-109' id='type-id-110'/>
+ <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-111'>
+ <underlying-type type-id='type-id-41'/>
+ <enumerator name='ZPROP_SRC_NONE' value='1'/>
+ <enumerator name='ZPROP_SRC_DEFAULT' value='2'/>
+ <enumerator name='ZPROP_SRC_TEMPORARY' value='4'/>
+ <enumerator name='ZPROP_SRC_LOCAL' value='8'/>
+ <enumerator name='ZPROP_SRC_INHERITED' value='16'/>
+ <enumerator name='ZPROP_SRC_RECEIVED' value='32'/>
+ </enum-decl>
+ <typedef-decl name='zprop_source_t' type-id='type-id-111' id='type-id-112'/>
+ <pointer-type-def type-id='type-id-112' size-in-bits='64' id='type-id-113'/>
+ <function-decl name='zfs_prop_get_numeric' mangled-name='zfs_prop_get_numeric' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_numeric'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-110' name='prop'/>
+ <parameter type-id='type-id-108' name='value'/>
+ <parameter type-id='type-id-113' name='src'/>
+ <parameter type-id='type-id-14' name='statbuf'/>
+ <parameter type-id='type-id-18' name='statlen'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='dlsym' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-7'/>
- <parameter type-id='type-id-84'/>
+ <function-decl name='zfs_prop_get_int' mangled-name='zfs_prop_get_int' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_int'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-110' name='prop'/>
<return type-id='type-id-7'/>
</function-decl>
- <function-decl name='fdopen' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-150'/>
- </function-decl>
- <function-decl name='dlerror' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-17'/>
- </function-decl>
- <function-decl name='asprintf' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-88'/>
- <parameter type-id='type-id-84'/>
- <parameter is-variadic='yes'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='mkostemps' mangled-name='mkostemps64' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-17'/>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-8'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_prop_get' mangled-name='zfs_prop_get' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-110' name='prop'/>
+ <parameter type-id='type-id-14' name='propbuf'/>
+ <parameter type-id='type-id-18' name='proplen'/>
+ <parameter type-id='type-id-113' name='src'/>
+ <parameter type-id='type-id-14' name='statbuf'/>
+ <parameter type-id='type-id-18' name='statlen'/>
+ <parameter type-id='type-id-9' name='literal'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='unlink' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_get_clones_nvl' mangled-name='zfs_get_clones_nvl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_clones_nvl'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <return type-id='type-id-19'/>
</function-decl>
- <function-decl name='rewind' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-150'/>
- <return type-id='type-id-6'/>
+ <function-decl name='zfs_prop_get_recvd' mangled-name='zfs_prop_get_recvd' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_recvd'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-84' name='propname'/>
+ <parameter type-id='type-id-14' name='propbuf'/>
+ <parameter type-id='type-id-18' name='proplen'/>
+ <parameter type-id='type-id-9' name='literal'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-type size-in-bits='64' id='type-id-199'>
- <return type-id='type-id-6'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-176'>
- <parameter type-id='type-id-8'/>
- <return type-id='type-id-6'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-198'>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-197'/>
- <parameter type-id='type-id-7'/>
- <return type-id='type-id-6'/>
- </function-type>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='libzfs_dataset.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libzfs' language='LANG_C99'>
- <function-decl name='zfs_type_to_name' mangled-name='zfs_type_to_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_type_to_name'>
- <parameter type-id='type-id-13' name='type'/>
- <return type-id='type-id-84'/>
+ <function-decl name='zfs_prop_inherit' mangled-name='zfs_prop_inherit' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_inherit'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-84' name='propname'/>
+ <parameter type-id='type-id-9' name='received'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-212'>
- <underlying-type type-id='type-id-49'/>
- <enumerator name='NAME_ERR_LEADING_SLASH' value='0'/>
- <enumerator name='NAME_ERR_EMPTY_COMPONENT' value='1'/>
- <enumerator name='NAME_ERR_TRAILING_SLASH' value='2'/>
- <enumerator name='NAME_ERR_INVALCHAR' value='3'/>
- <enumerator name='NAME_ERR_MULTIPLE_DELIMITERS' value='4'/>
- <enumerator name='NAME_ERR_NOLETTER' value='5'/>
- <enumerator name='NAME_ERR_RESERVED' value='6'/>
- <enumerator name='NAME_ERR_DISKLIKE' value='7'/>
- <enumerator name='NAME_ERR_TOOLONG' value='8'/>
- <enumerator name='NAME_ERR_SELF_REF' value='9'/>
- <enumerator name='NAME_ERR_PARENT_REF' value='10'/>
- <enumerator name='NAME_ERR_NO_AT' value='11'/>
- <enumerator name='NAME_ERR_NO_POUND' value='12'/>
- </enum-decl>
- <pointer-type-def type-id='type-id-212' size-in-bits='64' id='type-id-213'/>
- <function-decl name='entity_namecheck' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-213'/>
- <parameter type-id='type-id-17'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_prop_set_list' mangled-name='zfs_prop_set_list' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_set_list'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-19' name='props'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zfs_name_valid' mangled-name='zfs_name_valid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_name_valid'>
- <parameter type-id='type-id-84' name='name'/>
- <parameter type-id='type-id-13' name='type'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_prop_set' mangled-name='zfs_prop_set' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_set'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-84' name='propname'/>
+ <parameter type-id='type-id-84' name='propval'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zpool_name_valid' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <parameter type-id='type-id-50'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-50'/>
+ <function-decl name='zfs_valid_proplist' mangled-name='zfs_valid_proplist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_valid_proplist'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <parameter type-id='type-id-66' name='type'/>
+ <parameter type-id='type-id-19' name='nvl'/>
+ <parameter type-id='type-id-7' name='zoned'/>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-4' name='zpool_hdl'/>
+ <parameter type-id='type-id-9' name='key_params_ok'/>
+ <parameter type-id='type-id-84' name='errbuf'/>
+ <return type-id='type-id-19'/>
</function-decl>
- <function-decl name='zpool_free_handles' mangled-name='zpool_free_handles' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_free_handles'>
- <parameter type-id='type-id-10' name='hdl'/>
- <return type-id='type-id-6'/>
+ <pointer-type-def type-id='type-id-2' size-in-bits='64' id='type-id-114'/>
+ <function-decl name='zfs_spa_version' mangled-name='zfs_spa_version' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_spa_version'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-114' name='spa_version'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zpool_close' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-126'/>
- <return type-id='type-id-6'/>
+ <function-decl name='libzfs_mnttab_remove' mangled-name='libzfs_mnttab_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_mnttab_remove'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <parameter type-id='type-id-84' name='fsname'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_refresh_properties' mangled-name='zfs_refresh_properties' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_refresh_properties'>
- <parameter type-id='type-id-135' name='zhp'/>
- <return type-id='type-id-6'/>
+ <function-decl name='libzfs_mnttab_add' mangled-name='libzfs_mnttab_add' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_mnttab_add'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <parameter type-id='type-id-84' name='special'/>
+ <parameter type-id='type-id-84' name='mountp'/>
+ <parameter type-id='type-id-84' name='mntopts'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_get_name' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-126'/>
- <return type-id='type-id-84'/>
+ <function-decl name='libzfs_mnttab_cache' mangled-name='libzfs_mnttab_cache' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_mnttab_cache'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <parameter type-id='type-id-9' name='enable'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_open_canfail' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-126'/>
+ <function-decl name='libzfs_mnttab_fini' mangled-name='libzfs_mnttab_fini' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_mnttab_fini'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_handle_dup' mangled-name='zfs_handle_dup' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_handle_dup'>
- <parameter type-id='type-id-135' name='zhp_orig'/>
- <return type-id='type-id-135'/>
+ <function-decl name='libzfs_mnttab_init' mangled-name='libzfs_mnttab_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_mnttab_init'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <return type-id='type-id-1'/>
</function-decl>
<function-decl name='zfs_close' mangled-name='zfs_close' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_close'>
- <parameter type-id='type-id-135' name='zhp'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <function-decl name='zfs_bookmark_exists' mangled-name='zfs_bookmark_exists' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_bookmark_exists'>
- <parameter type-id='type-id-84' name='path'/>
- <return type-id='type-id-16'/>
- </function-decl>
- <function-decl name='lzc_get_bookmarks' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-107'/>
- <return type-id='type-id-8'/>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <return type-id='type-id-1'/>
</function-decl>
<function-decl name='zfs_open' mangled-name='zfs_open' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_open'>
- <parameter type-id='type-id-10' name='hdl'/>
+ <parameter type-id='type-id-16' name='hdl'/>
<parameter type-id='type-id-84' name='path'/>
- <parameter type-id='type-id-8' name='types'/>
- <return type-id='type-id-135'/>
- </function-decl>
- <function-decl name='zfs_iter_bookmarks' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-82'/>
- <parameter type-id='type-id-95'/>
- <parameter type-id='type-id-7'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='libzfs_mnttab_init' mangled-name='libzfs_mnttab_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_mnttab_init'>
- <parameter type-id='type-id-10' name='hdl'/>
- <return type-id='type-id-6'/>
+ <parameter type-id='type-id-2' name='types'/>
+ <return type-id='type-id-76'/>
</function-decl>
- <pointer-type-def type-id='type-id-51' size-in-bits='64' id='type-id-214'/>
- <union-decl name='__anonymous_union__' size-in-bits='32' is-anonymous='yes' visibility='default' id='type-id-215'>
- <data-member access='private'>
- <var-decl name='__size' type-id='type-id-216' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='__align' type-id='type-id-8' visibility='default'/>
- </data-member>
- </union-decl>
-
- <array-type-def dimensions='1' type-id='type-id-32' size-in-bits='32' id='type-id-216'>
- <subrange length='4' type-id='type-id-33' id='type-id-217'/>
-
- </array-type-def>
- <qualified-type-def type-id='type-id-215' const='yes' id='type-id-218'/>
- <pointer-type-def type-id='type-id-218' size-in-bits='64' id='type-id-219'/>
- <function-decl name='pthread_mutex_init' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-214'/>
- <parameter type-id='type-id-219'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <pointer-type-def type-id='type-id-60' size-in-bits='64' id='type-id-220'/>
- <function-decl name='avl_create' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-220'/>
- <parameter type-id='type-id-62'/>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-35'/>
- <return type-id='type-id-6'/>
+ <function-decl name='zfs_bookmark_exists' mangled-name='zfs_bookmark_exists' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_bookmark_exists'>
+ <parameter type-id='type-id-84' name='path'/>
+ <return type-id='type-id-9'/>
</function-decl>
- <function-decl name='libzfs_mnttab_fini' mangled-name='libzfs_mnttab_fini' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_mnttab_fini'>
- <parameter type-id='type-id-10' name='hdl'/>
- <return type-id='type-id-6'/>
+ <function-decl name='zfs_handle_dup' mangled-name='zfs_handle_dup' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_handle_dup'>
+ <parameter type-id='type-id-76' name='zhp_orig'/>
+ <return type-id='type-id-76'/>
</function-decl>
- <function-decl name='avl_destroy_nodes' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-220'/>
- <parameter type-id='type-id-103'/>
- <return type-id='type-id-7'/>
+ <function-decl name='zfs_refresh_properties' mangled-name='zfs_refresh_properties' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_refresh_properties'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='avl_destroy' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-220'/>
- <return type-id='type-id-6'/>
+ <function-decl name='zpool_free_handles' mangled-name='zpool_free_handles' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_free_handles'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='pthread_mutex_destroy' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-214'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_name_valid' mangled-name='zfs_name_valid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_name_valid'>
+ <parameter type-id='type-id-84' name='name'/>
+ <parameter type-id='type-id-66' name='type'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='libzfs_mnttab_cache' mangled-name='libzfs_mnttab_cache' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_mnttab_cache'>
- <parameter type-id='type-id-10' name='hdl'/>
- <parameter type-id='type-id-16' name='enable'/>
- <return type-id='type-id-6'/>
+ <function-decl name='zfs_type_to_name' mangled-name='zfs_type_to_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_type_to_name'>
+ <parameter type-id='type-id-66' name='type'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <class-decl name='mnttab' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-221'>
+ <class-decl name='mnttab' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-115'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='mnt_special' type-id='type-id-17' visibility='default'/>
+ <var-decl name='mnt_special' type-id='type-id-14' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='mnt_mountp' type-id='type-id-17' visibility='default'/>
+ <var-decl name='mnt_mountp' type-id='type-id-14' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='mnt_fstype' type-id='type-id-17' visibility='default'/>
+ <var-decl name='mnt_fstype' type-id='type-id-14' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='mnt_mntopts' type-id='type-id-17' visibility='default'/>
+ <var-decl name='mnt_mntopts' type-id='type-id-14' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-221' size-in-bits='64' id='type-id-222'/>
+ <pointer-type-def type-id='type-id-115' size-in-bits='64' id='type-id-116'/>
<function-decl name='libzfs_mnttab_find' mangled-name='libzfs_mnttab_find' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_mnttab_find'>
- <parameter type-id='type-id-10' name='hdl'/>
+ <parameter type-id='type-id-16' name='hdl'/>
<parameter type-id='type-id-84' name='fsname'/>
- <parameter type-id='type-id-222' name='entry'/>
- <return type-id='type-id-8'/>
+ <parameter type-id='type-id-116' name='entry'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='pthread_mutex_lock' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-214'/>
- <return type-id='type-id-8'/>
+ <pointer-type-def type-id='type-id-14' size-in-bits='64' id='type-id-117'/>
+ <function-decl name='getprop_uint64' mangled-name='getprop_uint64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getprop_uint64'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-110' name='prop'/>
+ <parameter type-id='type-id-117' name='source'/>
+ <return type-id='type-id-7'/>
</function-decl>
- <function-decl name='avl_numnodes' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-220'/>
- <return type-id='type-id-35'/>
+ <function-decl name='zfs_dataset_exists' mangled-name='zfs_dataset_exists' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_dataset_exists'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <parameter type-id='type-id-84' name='path'/>
+ <parameter type-id='type-id-66' name='types'/>
+ <return type-id='type-id-9'/>
</function-decl>
- <function-decl name='avl_find' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-220'/>
- <parameter type-id='type-id-7'/>
- <parameter type-id='type-id-102'/>
- <return type-id='type-id-7'/>
+ <function-decl name='lzc_wait_fs' mangled-name='lzc_wait_fs' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='getmntany' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-150'/>
- <parameter type-id='type-id-222'/>
- <parameter type-id='type-id-222'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_standard_error_fmt' mangled-name='zfs_standard_error_fmt' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='pthread_mutex_unlock' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-214'/>
- <return type-id='type-id-8'/>
+ <function-decl name='nvlist_lookup_nvlist_array' mangled-name='nvlist_lookup_nvlist_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='avl_add' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-220'/>
- <parameter type-id='type-id-7'/>
- <return type-id='type-id-6'/>
+ <function-decl name='zpool_get_config' mangled-name='zpool_get_config' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='libzfs_mnttab_add' mangled-name='libzfs_mnttab_add' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_mnttab_add'>
- <parameter type-id='type-id-10' name='hdl'/>
- <parameter type-id='type-id-84' name='special'/>
- <parameter type-id='type-id-84' name='mountp'/>
- <parameter type-id='type-id-84' name='mntopts'/>
- <return type-id='type-id-6'/>
+ <function-decl name='strtol' mangled-name='strtol' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='libzfs_mnttab_remove' mangled-name='libzfs_mnttab_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_mnttab_remove'>
- <parameter type-id='type-id-10' name='hdl'/>
- <parameter type-id='type-id-84' name='fsname'/>
- <return type-id='type-id-6'/>
+ <function-decl name='lzc_get_holds' mangled-name='lzc_get_holds' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='avl_remove' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-220'/>
- <parameter type-id='type-id-7'/>
- <return type-id='type-id-6'/>
+ <function-decl name='nvlist_size' mangled-name='nvlist_size' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <pointer-type-def type-id='type-id-8' size-in-bits='64' id='type-id-223'/>
- <function-decl name='zfs_spa_version' mangled-name='zfs_spa_version' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_spa_version'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-223' name='spa_version'/>
- <return type-id='type-id-8'/>
+ <function-decl name='nvlist_pack' mangled-name='nvlist_pack' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_valid_proplist' mangled-name='zfs_valid_proplist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_valid_proplist'>
- <parameter type-id='type-id-10' name='hdl'/>
- <parameter type-id='type-id-13' name='type'/>
- <parameter type-id='type-id-15' name='nvl'/>
- <parameter type-id='type-id-22' name='zoned'/>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-11' name='zpool_hdl'/>
- <parameter type-id='type-id-16' name='key_params_ok'/>
- <parameter type-id='type-id-84' name='errbuf'/>
- <return type-id='type-id-15'/>
+ <function-decl name='nvlist_unpack' mangled-name='nvlist_unpack' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_alloc' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-107'/>
- <parameter type-id='type-id-5'/>
- <parameter type-id='type-id-8'/>
- <return type-id='type-id-8'/>
+ <function-decl name='strerror' mangled-name='strerror' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_prop_valid_for_type' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-76'/>
- <parameter type-id='type-id-50'/>
- <return type-id='type-id-50'/>
+ <function-decl name='nvlist_empty' mangled-name='nvlist_empty' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_prop_readonly' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-85'/>
- <return type-id='type-id-50'/>
+ <function-decl name='fnvlist_free' mangled-name='fnvlist_free' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_prop_setonce' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-85'/>
- <return type-id='type-id-50'/>
+ <function-decl name='lzc_release' mangled-name='lzc_release' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_prop_encryption_key_param' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-85'/>
- <return type-id='type-id-50'/>
+ <function-decl name='fnvpair_value_int32' mangled-name='fnvpair_value_int32' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zprop_parse_value' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <parameter type-id='type-id-133'/>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-76'/>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-88'/>
- <parameter type-id='type-id-102'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-8'/>
+ <function-decl name='fnvlist_add_boolean' mangled-name='fnvlist_add_boolean' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_prop_user' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-50'/>
+ <function-decl name='fnvlist_add_nvlist' mangled-name='fnvlist_add_nvlist' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvpair_type' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-133'/>
- <return type-id='type-id-132'/>
+ <function-decl name='lzc_hold' mangled-name='lzc_hold' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvpair_value_string' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-133'/>
- <parameter type-id='type-id-88'/>
- <return type-id='type-id-8'/>
+ <function-decl name='ioctl' mangled-name='ioctl' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_prop_userquota' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-50'/>
+ <function-decl name='nvlist_alloc' mangled-name='nvlist_alloc' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_nicestrtonum' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-102'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zcmd_write_src_nvlist' mangled-name='zcmd_write_src_nvlist' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvpair_value_uint64' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-133'/>
- <parameter type-id='type-id-102'/>
- <return type-id='type-id-8'/>
+ <function-decl name='nvpair_type' mangled-name='nvpair_type' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='mountpoint_namecheck' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-213'/>
- <return type-id='type-id-8'/>
+ <function-decl name='nvlist_remove' mangled-name='nvlist_remove' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_prop_valid_keylocation' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-50'/>
- <return type-id='type-id-50'/>
+ <function-decl name='zprop_expand_list' mangled-name='zprop_expand_list' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_prop_get_feature' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-126'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-17'/>
- <parameter type-id='type-id-35'/>
- <return type-id='type-id-8'/>
+ <function-decl name='changelist_gather' mangled-name='changelist_gather' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_parse_options' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-17'/>
- <parameter type-id='type-id-89'/>
- <return type-id='type-id-8'/>
+ <function-decl name='changelist_haszonedchild' mangled-name='changelist_haszonedchild' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_add_uint64_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-102'/>
- <parameter type-id='type-id-5'/>
- <return type-id='type-id-8'/>
+ <function-decl name='changelist_free' mangled-name='changelist_free' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_prop_written' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-50'/>
+ <function-decl name='changelist_rename' mangled-name='changelist_rename' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_nicebytes' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-17'/>
- <parameter type-id='type-id-35'/>
- <return type-id='type-id-6'/>
+ <function-decl name='changelist_postfix' mangled-name='changelist_postfix' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <class-decl name='passwd' size-in-bits='384' is-struct='yes' visibility='default' id='type-id-224'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='pw_name' type-id='type-id-17' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='pw_passwd' type-id='type-id-17' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='pw_uid' type-id='type-id-191' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='160'>
- <var-decl name='pw_gid' type-id='type-id-225' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='pw_gecos' type-id='type-id-17' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='pw_dir' type-id='type-id-17' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='pw_shell' type-id='type-id-17' visibility='default'/>
- </data-member>
- </class-decl>
- <typedef-decl name='__gid_t' type-id='type-id-5' id='type-id-225'/>
- <pointer-type-def type-id='type-id-224' size-in-bits='64' id='type-id-226'/>
- <function-decl name='getpwnam' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-226'/>
+ <function-decl name='changelist_prefix' mangled-name='changelist_prefix' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <class-decl name='group' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-227'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='gr_name' type-id='type-id-17' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='gr_passwd' type-id='type-id-17' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='gr_gid' type-id='type-id-225' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='gr_mem' type-id='type-id-88' visibility='default'/>
- </data-member>
- </class-decl>
- <pointer-type-def type-id='type-id-227' size-in-bits='64' id='type-id-228'/>
- <function-decl name='getgrnam' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-228'/>
+ <function-decl name='zfs_iter_snapshots' mangled-name='zfs_iter_snapshots' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <typedef-decl name='zfs_prop_t' type-id='type-id-85' id='type-id-229'/>
- <function-decl name='zfs_prop_get_int' mangled-name='zfs_prop_get_int' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_int'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-229' name='prop'/>
- <return type-id='type-id-22'/>
+ <function-decl name='zfs_iter_bookmarks' mangled-name='zfs_iter_bookmarks' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_prop_set' mangled-name='zfs_prop_set' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_set'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-84' name='propname'/>
- <parameter type-id='type-id-84' name='propval'/>
- <return type-id='type-id-8'/>
+ <function-decl name='lzc_rollback_to' mangled-name='lzc_rollback_to' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_prop_set_list' mangled-name='zfs_prop_set_list' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_set_list'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-15' name='props'/>
- <return type-id='type-id-8'/>
+ <function-decl name='changelist_remove' mangled-name='changelist_remove' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='fnvlist_free' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <return type-id='type-id-6'/>
+ <function-decl name='strcspn' mangled-name='strcspn' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='fnvlist_add_uint64' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-35'/>
- <return type-id='type-id-6'/>
+ <function-decl name='zpool_open' mangled-name='zpool_open' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zvol_volsize_to_reservation' mangled-name='zvol_volsize_to_reservation' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zvol_volsize_to_reservation'>
- <parameter type-id='type-id-11' name='zph'/>
- <parameter type-id='type-id-22' name='volsize'/>
- <parameter type-id='type-id-15' name='props'/>
- <return type-id='type-id-22'/>
- </function-decl>
- <function-decl name='fnvpair_value_uint64' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-133'/>
- <return type-id='type-id-35'/>
- </function-decl>
- <class-decl name='prop_changelist' is-struct='yes' visibility='default' is-declaration-only='yes' id='type-id-230'/>
- <pointer-type-def type-id='type-id-230' size-in-bits='64' id='type-id-231'/>
- <function-decl name='changelist_gather' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-82'/>
- <parameter type-id='type-id-85'/>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-8'/>
- <return type-id='type-id-231'/>
- </function-decl>
- <function-decl name='changelist_haszonedchild' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-231'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='changelist_prefix' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-231'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='zcmd_write_src_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <parameter type-id='type-id-122'/>
- <parameter type-id='type-id-104'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='changelist_free' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-231'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <function-decl name='changelist_postfix' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-231'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='zfs_setprop_error' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <parameter type-id='type-id-85'/>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-17'/>
- <return type-id='type-id-6'/>
+ <function-decl name='zpool_close' mangled-name='zpool_close' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_remove_all' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-8'/>
+ <function-decl name='lzc_snapshot' mangled-name='lzc_snapshot' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_prop_inherit' mangled-name='zfs_prop_inherit' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_inherit'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-84' name='propname'/>
- <parameter type-id='type-id-16' name='received'/>
- <return type-id='type-id-8'/>
+ <function-decl name='lzc_promote' mangled-name='lzc_promote' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_prop_inheritable' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-85'/>
- <return type-id='type-id-50'/>
+ <function-decl name='zfs_crypto_clone_check' mangled-name='zfs_crypto_clone_check' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='getprop_uint64' mangled-name='getprop_uint64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getprop_uint64'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-229' name='prop'/>
- <parameter type-id='type-id-88' name='source'/>
- <return type-id='type-id-22'/>
+ <function-decl name='lzc_clone' mangled-name='lzc_clone' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_prop_default_numeric' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-85'/>
- <return type-id='type-id-35'/>
+ <function-decl name='lzc_destroy_snaps' mangled-name='lzc_destroy_snaps' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_prop_get_recvd' mangled-name='zfs_prop_get_recvd' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_recvd'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-84' name='propname'/>
- <parameter type-id='type-id-17' name='propbuf'/>
- <parameter type-id='type-id-28' name='proplen'/>
- <parameter type-id='type-id-16' name='literal'/>
- <return type-id='type-id-8'/>
+ <function-decl name='lzc_destroy_bookmarks' mangled-name='lzc_destroy_bookmarks' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <typedef-decl name='zprop_source_t' type-id='type-id-86' id='type-id-232'/>
- <pointer-type-def type-id='type-id-232' size-in-bits='64' id='type-id-233'/>
- <function-decl name='zfs_prop_get' mangled-name='zfs_prop_get' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-229' name='prop'/>
- <parameter type-id='type-id-17' name='propbuf'/>
- <parameter type-id='type-id-28' name='proplen'/>
- <parameter type-id='type-id-233' name='src'/>
- <parameter type-id='type-id-17' name='statbuf'/>
- <parameter type-id='type-id-28' name='statlen'/>
- <parameter type-id='type-id-16' name='literal'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-234'>
- <underlying-type type-id='type-id-49'/>
- <enumerator name='PROP_TYPE_NUMBER' value='0'/>
- <enumerator name='PROP_TYPE_STRING' value='1'/>
- <enumerator name='PROP_TYPE_INDEX' value='2'/>
- </enum-decl>
- <function-decl name='zfs_prop_get_type' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-85'/>
- <return type-id='type-id-234'/>
+ <function-decl name='lzc_destroy' mangled-name='lzc_destroy' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_nicenum' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-17'/>
- <parameter type-id='type-id-35'/>
- <return type-id='type-id-6'/>
+ <function-decl name='zfs_prop_default_numeric' mangled-name='zfs_prop_default_numeric' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <class-decl name='tm' size-in-bits='448' is-struct='yes' visibility='default' id='type-id-235'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='tm_sec' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='tm_min' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='tm_hour' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='96'>
- <var-decl name='tm_mday' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='tm_mon' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='160'>
- <var-decl name='tm_year' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='tm_wday' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='224'>
- <var-decl name='tm_yday' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='tm_isdst' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='tm_gmtoff' type-id='type-id-54' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='tm_zone' type-id='type-id-84' visibility='default'/>
- </data-member>
- </class-decl>
- <pointer-type-def type-id='type-id-235' size-in-bits='64' id='type-id-236'/>
- <qualified-type-def type-id='type-id-54' const='yes' id='type-id-237'/>
- <pointer-type-def type-id='type-id-237' size-in-bits='64' id='type-id-238'/>
- <function-decl name='localtime_r' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-238'/>
- <parameter type-id='type-id-236'/>
- <return type-id='type-id-236'/>
- </function-decl>
- <qualified-type-def type-id='type-id-235' const='yes' id='type-id-239'/>
- <pointer-type-def type-id='type-id-239' size-in-bits='64' id='type-id-240'/>
- <function-decl name='strftime' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-17'/>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-240'/>
- <return type-id='type-id-35'/>
- </function-decl>
- <function-decl name='zpool_get_prop' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-126'/>
- <parameter type-id='type-id-142'/>
- <parameter type-id='type-id-17'/>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-87'/>
- <parameter type-id='type-id-50'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <pointer-type-def type-id='type-id-84' size-in-bits='64' id='type-id-241'/>
- <function-decl name='zfs_prop_index_to_string' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-85'/>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-241'/>
- <return type-id='type-id-8'/>
+ <function-decl name='dataset_nestcheck' mangled-name='dataset_nestcheck' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_get_clones_nvl' mangled-name='zfs_get_clones_nvl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_clones_nvl'>
- <parameter type-id='type-id-135' name='zhp'/>
- <return type-id='type-id-15'/>
+ <function-decl name='zfs_crypto_create' mangled-name='zfs_crypto_create' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='strsep' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-88'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-17'/>
+ <function-decl name='lzc_create' mangled-name='lzc_create' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='strdup' mangled-name='strdup' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zfs_share' mangled-name='zfs_share' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zfs_commit_all_shares' mangled-name='zfs_commit_all_shares' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_add_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-104'/>
- <return type-id='type-id-8'/>
+ <function-decl name='__builtin___strncpy_chk' mangled-name='__strncpy_chk' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='fnvlist_add_boolean' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-6'/>
+ <function-decl name='strrchr' mangled-name='strrchr' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='fnvlist_add_string' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-6'/>
+ <function-decl name='zfs_nicebytes' mangled-name='zfs_nicebytes' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_channel_program_nosync' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-107'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='fnvlist_lookup_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-104'/>
+ <function-decl name='zfs_nicenum' mangled-name='zfs_nicenum' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <pointer-type-def type-id='type-id-54' size-in-bits='64' id='type-id-242'/>
- <function-decl name='nvlist_lookup_int64' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-242'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_prop_valid_for_type' mangled-name='zfs_prop_valid_for_type' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_prop_default_string' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-85'/>
- <return type-id='type-id-84'/>
+ <function-decl name='zfs_error_fmt' mangled-name='zfs_error_fmt' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='fnvlist_lookup_string' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-17'/>
+ <function-decl name='localtime_r' mangled-name='localtime_r' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <pointer-type-def type-id='type-id-102' size-in-bits='64' id='type-id-243'/>
- <pointer-type-def type-id='type-id-5' size-in-bits='64' id='type-id-244'/>
- <function-decl name='nvlist_lookup_uint64_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-243'/>
- <parameter type-id='type-id-244'/>
- <return type-id='type-id-8'/>
+ <function-decl name='strftime' mangled-name='strftime' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_empty' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <return type-id='type-id-50'/>
+ <function-decl name='zpool_get_prop' mangled-name='zpool_get_prop' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <class-decl name='mntent' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-245'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='mnt_fsname' type-id='type-id-17' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='mnt_dir' type-id='type-id-17' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='mnt_type' type-id='type-id-17' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='mnt_opts' type-id='type-id-17' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='mnt_freq' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='288'>
- <var-decl name='mnt_passno' type-id='type-id-8' visibility='default'/>
- </data-member>
- </class-decl>
- <qualified-type-def type-id='type-id-245' const='yes' id='type-id-246'/>
- <pointer-type-def type-id='type-id-246' size-in-bits='64' id='type-id-247'/>
- <function-decl name='hasmntopt' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-247'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-17'/>
+ <function-decl name='__builtin_snprintf' mangled-name='snprintf' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <pointer-type-def type-id='type-id-22' size-in-bits='64' id='type-id-248'/>
- <function-decl name='zfs_prop_get_numeric' mangled-name='zfs_prop_get_numeric' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_numeric'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-229' name='prop'/>
- <parameter type-id='type-id-248' name='value'/>
- <parameter type-id='type-id-233' name='src'/>
- <parameter type-id='type-id-17' name='statbuf'/>
- <parameter type-id='type-id-28' name='statlen'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='zfs_error_fmt' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-84'/>
- <parameter is-variadic='yes'/>
- <return type-id='type-id-8'/>
+ <function-decl name='nvlist_lookup_uint64_array' mangled-name='nvlist_lookup_uint64_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_prop_get_userquota_int' mangled-name='zfs_prop_get_userquota_int' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_userquota_int'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-84' name='propname'/>
- <parameter type-id='type-id-248' name='propvalue'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_prop_get_type' mangled-name='zfs_prop_get_type' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_prop_get_userquota' mangled-name='zfs_prop_get_userquota' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_userquota'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-84' name='propname'/>
- <parameter type-id='type-id-17' name='propbuf'/>
- <parameter type-id='type-id-8' name='proplen'/>
- <parameter type-id='type-id-16' name='literal'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_prop_index_to_string' mangled-name='zfs_prop_index_to_string' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_prop_get_written_int' mangled-name='zfs_prop_get_written_int' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_written_int'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-84' name='propname'/>
- <parameter type-id='type-id-248' name='propvalue'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_prop_readonly' mangled-name='zfs_prop_readonly' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_prop_get_written' mangled-name='zfs_prop_get_written' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_written'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-84' name='propname'/>
- <parameter type-id='type-id-17' name='propbuf'/>
- <parameter type-id='type-id-8' name='proplen'/>
- <parameter type-id='type-id-16' name='literal'/>
- <return type-id='type-id-8'/>
+ <function-decl name='abort' mangled-name='abort' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <qualified-type-def type-id='type-id-134' const='yes' id='type-id-249'/>
- <pointer-type-def type-id='type-id-249' size-in-bits='64' id='type-id-250'/>
- <function-decl name='zfs_get_name' mangled-name='zfs_get_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_name'>
- <parameter type-id='type-id-250' name='zhp'/>
- <return type-id='type-id-84'/>
+ <function-decl name='nvlist_lookup_int64' mangled-name='nvlist_lookup_int64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_get_pool_name' mangled-name='zfs_get_pool_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_pool_name'>
- <parameter type-id='type-id-250' name='zhp'/>
- <return type-id='type-id-84'/>
+ <function-decl name='__fprintf_chk' mangled-name='__fprintf_chk' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_get_type' mangled-name='zfs_get_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_type'>
- <parameter type-id='type-id-250' name='zhp'/>
- <return type-id='type-id-13'/>
+ <function-decl name='fnvlist_add_string' mangled-name='fnvlist_add_string' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_get_underlying_type' mangled-name='zfs_get_underlying_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_underlying_type'>
- <parameter type-id='type-id-250' name='zhp'/>
- <return type-id='type-id-13'/>
+ <function-decl name='lzc_channel_program_nosync' mangled-name='lzc_channel_program_nosync' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_parent_name' mangled-name='zfs_parent_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_parent_name'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-17' name='buf'/>
- <parameter type-id='type-id-28' name='buflen'/>
- <return type-id='type-id-8'/>
+ <function-decl name='fnvlist_lookup_nvlist' mangled-name='fnvlist_lookup_nvlist' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_dataset_exists' mangled-name='zfs_dataset_exists' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_dataset_exists'>
- <parameter type-id='type-id-10' name='hdl'/>
- <parameter type-id='type-id-84' name='path'/>
- <parameter type-id='type-id-13' name='types'/>
- <return type-id='type-id-16'/>
+ <function-decl name='strsep' mangled-name='strsep' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_create' mangled-name='zfs_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_create'>
- <parameter type-id='type-id-10' name='hdl'/>
- <parameter type-id='type-id-84' name='path'/>
- <parameter type-id='type-id-13' name='type'/>
- <parameter type-id='type-id-15' name='props'/>
- <return type-id='type-id-8'/>
+ <function-decl name='nvlist_add_nvlist' mangled-name='nvlist_add_nvlist' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_share' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-82'/>
- <return type-id='type-id-8'/>
+ <function-decl name='hasmntopt' mangled-name='hasmntopt' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_commit_all_shares' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-6'/>
+ <function-decl name='zfs_prop_setonce' mangled-name='zfs_prop_setonce' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='dataset_nestcheck' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_prop_inheritable' mangled-name='zfs_prop_inheritable' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_open' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-126'/>
- </function-decl>
- <pointer-type-def type-id='type-id-73' size-in-bits='64' id='type-id-251'/>
- <function-decl name='zfs_crypto_create' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <parameter type-id='type-id-17'/>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-50'/>
- <parameter type-id='type-id-251'/>
- <parameter type-id='type-id-244'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <enum-decl name='lzc_dataset_type' id='type-id-252'>
- <underlying-type type-id='type-id-49'/>
- <enumerator name='LZC_DATSET_TYPE_ZFS' value='2'/>
- <enumerator name='LZC_DATSET_TYPE_ZVOL' value='3'/>
- </enum-decl>
- <function-decl name='lzc_create' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-252'/>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-5'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_prop_user' mangled-name='zfs_prop_user' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_create_ancestors' mangled-name='zfs_create_ancestors' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_create_ancestors'>
- <parameter type-id='type-id-10' name='hdl'/>
- <parameter type-id='type-id-84' name='path'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_setprop_error' mangled-name='zfs_setprop_error' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_destroy' mangled-name='zfs_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_destroy'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-16' name='defer'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='lzc_destroy_bookmarks' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-107'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='lzc_destroy_snaps' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-50'/>
- <parameter type-id='type-id-107'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='zfs_standard_error_fmt' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-84'/>
- <parameter is-variadic='yes'/>
- <return type-id='type-id-8'/>
+ <function-decl name='fnvlist_add_uint64' mangled-name='fnvlist_add_uint64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_destroy' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-8'/>
+ <function-decl name='fnvpair_value_uint64' mangled-name='fnvpair_value_uint64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_destroy_snaps' mangled-name='zfs_destroy_snaps' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_destroy_snaps'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-17' name='snapname'/>
- <parameter type-id='type-id-16' name='defer'/>
- <return type-id='type-id-8'/>
+ <function-decl name='__asprintf_chk' mangled-name='__asprintf_chk' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_destroy_snaps_nvl' mangled-name='zfs_destroy_snaps_nvl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_destroy_snaps_nvl'>
- <parameter type-id='type-id-10' name='hdl'/>
- <parameter type-id='type-id-15' name='snaps'/>
- <parameter type-id='type-id-16' name='defer'/>
- <return type-id='type-id-8'/>
+ <function-decl name='nvpair_value_uint64' mangled-name='nvpair_value_uint64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_exists' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-50'/>
+ <function-decl name='nvlist_add_uint64_array' mangled-name='nvlist_add_uint64_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_add_boolean' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-8'/>
+ <function-decl name='nvpair_value_string' mangled-name='nvpair_value_string' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='fnvpair_value_int32' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-133'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_nicestrtonum' mangled-name='zfs_nicestrtonum' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_clone' mangled-name='zfs_clone' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_clone'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-84' name='target'/>
- <parameter type-id='type-id-15' name='props'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_prop_get_feature' mangled-name='zpool_prop_get_feature' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_crypto_clone_check' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <parameter type-id='type-id-82'/>
- <parameter type-id='type-id-17'/>
- <parameter type-id='type-id-104'/>
- <return type-id='type-id-8'/>
+ <function-decl name='mountpoint_namecheck' mangled-name='mountpoint_namecheck' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_clone' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-104'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_parse_options' mangled-name='zfs_parse_options' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_promote' mangled-name='zfs_promote' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_promote'>
- <parameter type-id='type-id-135' name='zhp'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_prop_encryption_key_param' mangled-name='zfs_prop_encryption_key_param' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_promote' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-17'/>
- <parameter type-id='type-id-8'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zprop_parse_value' mangled-name='zprop_parse_value' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_snapshot_nvl' mangled-name='zfs_snapshot_nvl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_snapshot_nvl'>
- <parameter type-id='type-id-10' name='hdl'/>
- <parameter type-id='type-id-15' name='snaps'/>
- <parameter type-id='type-id-15' name='props'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_prop_userquota' mangled-name='zfs_prop_userquota' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_snapshot' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-107'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_prop_written' mangled-name='zfs_prop_written' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_snapshot' mangled-name='zfs_snapshot' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_snapshot'>
- <parameter type-id='type-id-10' name='hdl'/>
- <parameter type-id='type-id-84' name='path'/>
- <parameter type-id='type-id-16' name='recursive'/>
- <parameter type-id='type-id-15' name='props'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_prop_valid_keylocation' mangled-name='zfs_prop_valid_keylocation' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_rollback' mangled-name='zfs_rollback' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_rollback'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-135' name='snap'/>
- <parameter type-id='type-id-16' name='force'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='zfs_iter_snapshots' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-82'/>
- <parameter type-id='type-id-50'/>
- <parameter type-id='type-id-95'/>
- <parameter type-id='type-id-7'/>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-35'/>
- <return type-id='type-id-8'/>
+ <function-decl name='pthread_mutex_lock' mangled-name='pthread_mutex_lock' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_rollback_to' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-8'/>
+ <function-decl name='avl_find' mangled-name='avl_find' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <class-decl name='renameflags' size-in-bits='32' is-struct='yes' visibility='default' id='type-id-253'>
- <data-member access='public' layout-offset-in-bits='31'>
- <var-decl name='recursive' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='30'>
- <var-decl name='nounmount' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='29'>
- <var-decl name='forceunmount' type-id='type-id-8' visibility='default'/>
- </data-member>
- </class-decl>
- <typedef-decl name='renameflags_t' type-id='type-id-253' id='type-id-254'/>
- <function-decl name='zfs_rename' mangled-name='zfs_rename' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_rename'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-84' name='target'/>
- <parameter type-id='type-id-254' name='flags'/>
- <return type-id='type-id-8'/>
+ <function-decl name='avl_remove' mangled-name='avl_remove' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='changelist_rename' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-231'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-6'/>
+ <function-decl name='pthread_mutex_unlock' mangled-name='pthread_mutex_unlock' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_get_all_props' mangled-name='zfs_get_all_props' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_all_props'>
- <parameter type-id='type-id-135' name='zhp'/>
- <return type-id='type-id-15'/>
+ <function-decl name='avl_numnodes' mangled-name='avl_numnodes' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_get_recvd_props' mangled-name='zfs_get_recvd_props' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_recvd_props'>
- <parameter type-id='type-id-135' name='zhp'/>
- <return type-id='type-id-15'/>
+ <function-decl name='avl_add' mangled-name='avl_add' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_get_user_props' mangled-name='zfs_get_user_props' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_user_props'>
- <parameter type-id='type-id-135' name='zhp'/>
- <return type-id='type-id-15'/>
+ <function-decl name='avl_destroy_nodes' mangled-name='avl_destroy_nodes' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <class-decl name='zprop_list' size-in-bits='448' is-struct='yes' visibility='default' id='type-id-255'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='pl_prop' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='pl_user_prop' type-id='type-id-17' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='pl_next' type-id='type-id-256' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='pl_all' type-id='type-id-16' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='pl_width' type-id='type-id-28' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='pl_recvd_width' type-id='type-id-28' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='pl_fixed' type-id='type-id-16' visibility='default'/>
- </data-member>
- </class-decl>
- <pointer-type-def type-id='type-id-255' size-in-bits='64' id='type-id-256'/>
- <typedef-decl name='zprop_list_t' type-id='type-id-255' id='type-id-257'/>
- <pointer-type-def type-id='type-id-257' size-in-bits='64' id='type-id-258'/>
- <pointer-type-def type-id='type-id-258' size-in-bits='64' id='type-id-259'/>
- <function-decl name='zfs_expand_proplist' mangled-name='zfs_expand_proplist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_expand_proplist'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-259' name='plp'/>
- <parameter type-id='type-id-16' name='received'/>
- <parameter type-id='type-id-16' name='literal'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <pointer-type-def type-id='type-id-256' size-in-bits='64' id='type-id-260'/>
- <function-decl name='zprop_expand_list' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <parameter type-id='type-id-260'/>
- <parameter type-id='type-id-76'/>
- <return type-id='type-id-8'/>
+ <function-decl name='avl_destroy' mangled-name='avl_destroy' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_prune_proplist' mangled-name='zfs_prune_proplist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prune_proplist'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-18' name='props'/>
- <return type-id='type-id-6'/>
+ <function-decl name='pthread_mutex_destroy' mangled-name='pthread_mutex_destroy' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_remove' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-132'/>
- <return type-id='type-id-8'/>
+ <function-decl name='pthread_mutex_init' mangled-name='pthread_mutex_init' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_smb_acl_add' mangled-name='zfs_smb_acl_add' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_smb_acl_add'>
- <parameter type-id='type-id-10' name='hdl'/>
- <parameter type-id='type-id-17' name='dataset'/>
- <parameter type-id='type-id-17' name='path'/>
- <parameter type-id='type-id-17' name='resource'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='ioctl' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-35'/>
- <parameter is-variadic='yes'/>
- <return type-id='type-id-8'/>
+ <function-decl name='avl_create' mangled-name='avl_create' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_smb_acl_remove' mangled-name='zfs_smb_acl_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_smb_acl_remove'>
- <parameter type-id='type-id-10' name='hdl'/>
- <parameter type-id='type-id-17' name='dataset'/>
- <parameter type-id='type-id-17' name='path'/>
- <parameter type-id='type-id-17' name='resource'/>
- <return type-id='type-id-8'/>
+ <function-decl name='lzc_get_bookmarks' mangled-name='lzc_get_bookmarks' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_smb_acl_purge' mangled-name='zfs_smb_acl_purge' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_smb_acl_purge'>
- <parameter type-id='type-id-10' name='hdl'/>
- <parameter type-id='type-id-17' name='dataset'/>
- <parameter type-id='type-id-17' name='path'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_get_name' mangled-name='zpool_get_name' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_smb_acl_rename' mangled-name='zfs_smb_acl_rename' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_smb_acl_rename'>
- <parameter type-id='type-id-10' name='hdl'/>
- <parameter type-id='type-id-17' name='dataset'/>
- <parameter type-id='type-id-17' name='path'/>
- <parameter type-id='type-id-17' name='oldname'/>
- <parameter type-id='type-id-17' name='newname'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-261'>
- <underlying-type type-id='type-id-49'/>
- <enumerator name='ZFS_PROP_USERUSED' value='0'/>
- <enumerator name='ZFS_PROP_USERQUOTA' value='1'/>
- <enumerator name='ZFS_PROP_GROUPUSED' value='2'/>
- <enumerator name='ZFS_PROP_GROUPQUOTA' value='3'/>
- <enumerator name='ZFS_PROP_USEROBJUSED' value='4'/>
- <enumerator name='ZFS_PROP_USEROBJQUOTA' value='5'/>
- <enumerator name='ZFS_PROP_GROUPOBJUSED' value='6'/>
- <enumerator name='ZFS_PROP_GROUPOBJQUOTA' value='7'/>
- <enumerator name='ZFS_PROP_PROJECTUSED' value='8'/>
- <enumerator name='ZFS_PROP_PROJECTQUOTA' value='9'/>
- <enumerator name='ZFS_PROP_PROJECTOBJUSED' value='10'/>
- <enumerator name='ZFS_PROP_PROJECTOBJQUOTA' value='11'/>
- <enumerator name='ZFS_NUM_USERQUOTA_PROPS' value='12'/>
- </enum-decl>
- <typedef-decl name='zfs_userquota_prop_t' type-id='type-id-261' id='type-id-262'/>
- <typedef-decl name='uid_t' type-id='type-id-191' id='type-id-263'/>
- <pointer-type-def type-id='type-id-264' size-in-bits='64' id='type-id-265'/>
- <typedef-decl name='zfs_userspace_cb_t' type-id='type-id-265' id='type-id-266'/>
- <function-decl name='zfs_userspace' mangled-name='zfs_userspace' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_userspace'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-262' name='type'/>
- <parameter type-id='type-id-266' name='func'/>
- <parameter type-id='type-id-7' name='arg'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_open_canfail' mangled-name='zpool_open_canfail' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_hold' mangled-name='zfs_hold' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_hold'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-84' name='snapname'/>
- <parameter type-id='type-id-84' name='tag'/>
- <parameter type-id='type-id-16' name='recursive'/>
- <parameter type-id='type-id-8' name='cleanup_fd'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_name_valid' mangled-name='zpool_name_valid' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_hold_nvl' mangled-name='zfs_hold_nvl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_hold_nvl'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-8' name='cleanup_fd'/>
- <parameter type-id='type-id-15' name='holds'/>
- <return type-id='type-id-8'/>
+ <function-decl name='strtoul' mangled-name='strtoul' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_hold' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-107'/>
- <return type-id='type-id-8'/>
+ <function-decl name='getgrnam' mangled-name='getgrnam' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_release' mangled-name='zfs_release' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_release'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-84' name='snapname'/>
- <parameter type-id='type-id-84' name='tag'/>
- <parameter type-id='type-id-16' name='recursive'/>
- <return type-id='type-id-8'/>
+ <function-decl name='getpwnam' mangled-name='getpwnam' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_release' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-107'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_prop_default_string' mangled-name='zfs_prop_default_string' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_get_holds' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-107'/>
- <return type-id='type-id-8'/>
+ <function-decl name='fnvlist_lookup_string' mangled-name='fnvlist_lookup_string' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='fnvlist_add_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-104'/>
- <return type-id='type-id-6'/>
+ <function-decl name='strstr' mangled-name='strstr' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_get_fsacl' mangled-name='zfs_get_fsacl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_fsacl'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-105' name='nvl'/>
- <return type-id='type-id-8'/>
+ <function-decl name='entity_namecheck' mangled-name='entity_namecheck' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_unpack' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-17'/>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-107'/>
- <parameter type-id='type-id-8'/>
- <return type-id='type-id-8'/>
+ <function-decl name='lzc_exists' mangled-name='lzc_exists' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_set_fsacl' mangled-name='zfs_set_fsacl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_set_fsacl'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-16' name='un'/>
- <parameter type-id='type-id-15' name='nvl'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='nvlist_size' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-102'/>
- <parameter type-id='type-id-8'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='nvlist_pack' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-88'/>
- <parameter type-id='type-id-102'/>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-8'/>
- <return type-id='type-id-8'/>
+ <function-decl name='nvlist_add_boolean' mangled-name='nvlist_add_boolean' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_get_holds' mangled-name='zfs_get_holds' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_holds'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-105' name='nvl'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='zpool_get_config' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-126'/>
- <parameter type-id='type-id-107'/>
- <return type-id='type-id-104'/>
- </function-decl>
- <pointer-type-def type-id='type-id-107' size-in-bits='64' id='type-id-267'/>
- <function-decl name='nvlist_lookup_nvlist_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-267'/>
- <parameter type-id='type-id-244'/>
- <return type-id='type-id-8'/>
+ <function-decl name='getmntany' mangled-name='getmntany' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-268'>
- <underlying-type type-id='type-id-49'/>
- <enumerator name='ZFS_WAIT_DELETEQ' value='0'/>
- <enumerator name='ZFS_WAIT_NUM_ACTIVITIES' value='1'/>
- </enum-decl>
- <typedef-decl name='zfs_wait_activity_t' type-id='type-id-268' id='type-id-269'/>
- <function-decl name='zfs_wait_status' mangled-name='zfs_wait_status' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_wait_status'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-269' name='activity'/>
- <parameter type-id='type-id-106' name='missing'/>
- <parameter type-id='type-id-106' name='waited'/>
- <return type-id='type-id-8'/>
+ <function-decl name='_sol_getmntent' mangled-name='_sol_getmntent' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_wait_fs' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-268'/>
- <parameter type-id='type-id-213'/>
- <return type-id='type-id-8'/>
+ <function-decl name='nvlist_remove_all' mangled-name='nvlist_remove_all' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='changelist_remove' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-231'/>
+ <function-type size-in-bits='64' id='type-id-96'>
+ <parameter type-id='type-id-13'/>
<parameter type-id='type-id-84'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <function-type size-in-bits='64' id='type-id-264'>
+ <parameter type-id='type-id-95'/>
<parameter type-id='type-id-7'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-263'/>
- <parameter type-id='type-id-22'/>
- <return type-id='type-id-8'/>
+ <return type-id='type-id-2'/>
</function-type>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='libzfs_diff.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libzfs' language='LANG_C99'>
+ <abi-instr version='1.0' address-size='64' path='libzfs_diff.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
<function-decl name='zfs_show_diffs' mangled-name='zfs_show_diffs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_show_diffs'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-8' name='outfd'/>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-2' name='outfd'/>
<parameter type-id='type-id-84' name='fromsnap'/>
<parameter type-id='type-id-84' name='tosnap'/>
- <parameter type-id='type-id-8' name='flags'/>
- <return type-id='type-id-8'/>
+ <parameter type-id='type-id-2' name='flags'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zfs_validate_name' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-50'/>
- <return type-id='type-id-8'/>
+ <function-decl name='__builtin_strncpy' mangled-name='strncpy' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_asprintf' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <parameter type-id='type-id-84'/>
- <parameter is-variadic='yes'/>
- <return type-id='type-id-17'/>
+ <function-decl name='zfs_asprintf' mangled-name='zfs_asprintf' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='is_mounted' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-88'/>
- <return type-id='type-id-50'/>
+ <function-decl name='zfs_validate_name' mangled-name='zfs_validate_name' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <class-decl name='differ_info' size-in-bits='9024' is-struct='yes' visibility='default' id='type-id-270'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='zhp' type-id='type-id-135' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='fromsnap' type-id='type-id-17' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='frommnt' type-id='type-id-17' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='tosnap' type-id='type-id-17' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='tomnt' type-id='type-id-17' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='ds' type-id='type-id-17' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='dsmnt' type-id='type-id-17' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='tmpsnap' type-id='type-id-17' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='errbuf' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='8704'>
- <var-decl name='isclone' type-id='type-id-16' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='8736'>
- <var-decl name='scripted' type-id='type-id-16' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='8768'>
- <var-decl name='classify' type-id='type-id-16' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='8800'>
- <var-decl name='timestamped' type-id='type-id-16' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='8832'>
- <var-decl name='shares' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='8896'>
- <var-decl name='zerr' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='8928'>
- <var-decl name='cleanupfd' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='8960'>
- <var-decl name='outputfd' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='8992'>
- <var-decl name='datafd' type-id='type-id-8' visibility='default'/>
- </data-member>
- </class-decl>
- <pointer-type-def type-id='type-id-270' size-in-bits='64' id='type-id-271'/>
- <function-decl name='find_shares_object' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-271'/>
- <return type-id='type-id-8'/>
+ <function-decl name='find_shares_object' mangled-name='find_shares_object' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='pipe2' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-223'/>
- <parameter type-id='type-id-8'/>
- <return type-id='type-id-8'/>
+ <function-decl name='pipe2' mangled-name='pipe2' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <union-decl name='pthread_attr_t' size-in-bits='448' visibility='default' id='type-id-272'>
- <data-member access='private'>
- <var-decl name='__size' type-id='type-id-273' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='__align' type-id='type-id-54' visibility='default'/>
- </data-member>
- </union-decl>
-
- <array-type-def dimensions='1' type-id='type-id-32' size-in-bits='448' id='type-id-273'>
- <subrange length='56' type-id='type-id-33' id='type-id-274'/>
-
- </array-type-def>
- <qualified-type-def type-id='type-id-272' const='yes' id='type-id-275'/>
- <pointer-type-def type-id='type-id-275' size-in-bits='64' id='type-id-276'/>
- <pointer-type-def type-id='type-id-277' size-in-bits='64' id='type-id-278'/>
- <function-decl name='pthread_create' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-102'/>
- <parameter type-id='type-id-276'/>
- <parameter type-id='type-id-278'/>
- <parameter type-id='type-id-7'/>
- <return type-id='type-id-8'/>
+ <function-decl name='pthread_create' mangled-name='pthread_create' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='pthread_join' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-103'/>
- <return type-id='type-id-8'/>
+ <function-decl name='pthread_cancel' mangled-name='pthread_cancel' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='pthread_cancel' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-35'/>
- <return type-id='type-id-8'/>
+ <function-decl name='pthread_join' mangled-name='pthread_join' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='fdopen' mangled-name='fdopen' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='__builtin_fwrite' mangled-name='fwrite' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='is_mounted' mangled-name='is_mounted' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-type size-in-bits='64' id='type-id-277'>
- <parameter type-id='type-id-7'/>
- <return type-id='type-id-7'/>
- </function-type>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='libzfs_import.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libzfs' language='LANG_C99'>
- <class-decl name='pool_config_ops' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-279'>
+ <abi-instr version='1.0' address-size='64' path='libzfs_import.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
+ <class-decl name='pool_config_ops' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-118'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='pco_refresh_config' type-id='type-id-280' visibility='default'/>
+ <var-decl name='pco_refresh_config' type-id='type-id-119' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='pco_pool_active' type-id='type-id-281' visibility='default'/>
+ <var-decl name='pco_pool_active' type-id='type-id-120' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='refresh_config_func_t' type-id='type-id-282' id='type-id-283'/>
- <pointer-type-def type-id='type-id-283' size-in-bits='64' id='type-id-280'/>
- <typedef-decl name='pool_active_func_t' type-id='type-id-284' id='type-id-285'/>
- <pointer-type-def type-id='type-id-285' size-in-bits='64' id='type-id-281'/>
- <qualified-type-def type-id='type-id-279' const='yes' id='type-id-286'/>
- <typedef-decl name='pool_config_ops_t' type-id='type-id-286' id='type-id-287'/>
- <var-decl name='libzfs_config_ops' type-id='type-id-287' mangled-name='libzfs_config_ops' visibility='default' elf-symbol-id='libzfs_config_ops'/>
- <function-decl name='zcmd_write_conf_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <parameter type-id='type-id-122'/>
- <parameter type-id='type-id-104'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='zpool_clear_label' mangled-name='zpool_clear_label' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_clear_label'>
- <parameter type-id='type-id-8' name='fd'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='pread64' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-7'/>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-54'/>
- <return type-id='type-id-54'/>
- </function-decl>
- <function-decl name='pwrite64' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-7'/>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-54'/>
- <return type-id='type-id-54'/>
- </function-decl>
- <enum-decl name='pool_state' id='type-id-289'>
- <underlying-type type-id='type-id-49'/>
+ <typedef-decl name='refresh_config_func_t' type-id='type-id-121' id='type-id-122'/>
+ <pointer-type-def type-id='type-id-122' size-in-bits='64' id='type-id-119'/>
+ <typedef-decl name='pool_active_func_t' type-id='type-id-123' id='type-id-124'/>
+ <pointer-type-def type-id='type-id-124' size-in-bits='64' id='type-id-120'/>
+ <qualified-type-def type-id='type-id-118' const='yes' id='type-id-125'/>
+ <typedef-decl name='pool_config_ops_t' type-id='type-id-125' id='type-id-126'/>
+ <var-decl name='libzfs_config_ops' type-id='type-id-126' mangled-name='libzfs_config_ops' visibility='default' elf-symbol-id='libzfs_config_ops'/>
+ <enum-decl name='pool_state' id='type-id-127'>
+ <underlying-type type-id='type-id-41'/>
<enumerator name='POOL_STATE_ACTIVE' value='0'/>
<enumerator name='POOL_STATE_EXPORTED' value='1'/>
<enumerator name='POOL_STATE_DESTROYED' value='2'/>
<enumerator name='POOL_STATE_SPARE' value='3'/>
<enumerator name='POOL_STATE_L2CACHE' value='4'/>
<enumerator name='POOL_STATE_UNINITIALIZED' value='5'/>
<enumerator name='POOL_STATE_UNAVAIL' value='6'/>
<enumerator name='POOL_STATE_POTENTIALLY_ACTIVE' value='7'/>
</enum-decl>
- <typedef-decl name='pool_state_t' type-id='type-id-289' id='type-id-290'/>
- <pointer-type-def type-id='type-id-290' size-in-bits='64' id='type-id-291'/>
+ <typedef-decl name='pool_state_t' type-id='type-id-127' id='type-id-128'/>
+ <pointer-type-def type-id='type-id-128' size-in-bits='64' id='type-id-129'/>
<function-decl name='zpool_in_use' mangled-name='zpool_in_use' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_in_use'>
- <parameter type-id='type-id-10' name='hdl'/>
- <parameter type-id='type-id-8' name='fd'/>
- <parameter type-id='type-id-291' name='state'/>
- <parameter type-id='type-id-88' name='namestr'/>
- <parameter type-id='type-id-106' name='inuse'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='zpool_read_label' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-107'/>
- <parameter type-id='type-id-223'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <pointer-type-def type-id='type-id-292' size-in-bits='64' id='type-id-293'/>
- <function-decl name='zpool_iter' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <parameter type-id='type-id-293'/>
- <parameter type-id='type-id-7'/>
- <return type-id='type-id-8'/>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <parameter type-id='type-id-2' name='fd'/>
+ <parameter type-id='type-id-129' name='state'/>
+ <parameter type-id='type-id-117' name='namestr'/>
+ <parameter type-id='type-id-85' name='inuse'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-type size-in-bits='64' id='type-id-284'>
- <parameter type-id='type-id-7'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-22'/>
- <parameter type-id='type-id-106'/>
- <return type-id='type-id-8'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-292'>
- <parameter type-id='type-id-126'/>
- <parameter type-id='type-id-7'/>
- <return type-id='type-id-8'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-282'>
- <parameter type-id='type-id-7'/>
- <parameter type-id='type-id-15'/>
- <return type-id='type-id-15'/>
- </function-type>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='libzfs_iter.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libzfs' language='LANG_C99'>
- <function-decl name='zfs_iter_filesystems' mangled-name='zfs_iter_filesystems' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_filesystems'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-138' name='func'/>
- <parameter type-id='type-id-7' name='data'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_clear_label' mangled-name='zpool_clear_label' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_clear_label'>
+ <parameter type-id='type-id-2' name='fd'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='make_dataset_handle_zc' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <parameter type-id='type-id-122'/>
- <return type-id='type-id-82'/>
+ <function-decl name='zpool_read_label' mangled-name='zpool_read_label' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_iter_snapshots' mangled-name='zfs_iter_snapshots' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_snapshots'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-16' name='simple'/>
- <parameter type-id='type-id-138' name='func'/>
- <parameter type-id='type-id-7' name='data'/>
- <parameter type-id='type-id-22' name='min_txg'/>
- <parameter type-id='type-id-22' name='max_txg'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='make_dataset_simple_handle_zc' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-82'/>
- <parameter type-id='type-id-122'/>
- <return type-id='type-id-82'/>
+ <function-decl name='zpool_iter' mangled-name='zpool_iter' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_iter_bookmarks' mangled-name='zfs_iter_bookmarks' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_bookmarks'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-138' name='func'/>
- <parameter type-id='type-id-7' name='data'/>
- <return type-id='type-id-8'/>
+ <function-decl name='__pread64_alias' mangled-name='pread64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_get_type' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-97'/>
- <return type-id='type-id-76'/>
+ <function-decl name='pwrite64' mangled-name='pwrite64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='__fxstat64' mangled-name='__fxstat64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='fnvpair_value_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-133'/>
- <return type-id='type-id-104'/>
+ <function-decl name='zcmd_write_conf_nvlist' mangled-name='zcmd_write_conf_nvlist' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='make_bookmark_handle' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-82'/>
+ <function-type size-in-bits='64' id='type-id-123'>
+ <parameter type-id='type-id-13'/>
<parameter type-id='type-id-84'/>
- <parameter type-id='type-id-104'/>
- <return type-id='type-id-82'/>
+ <parameter type-id='type-id-7'/>
+ <parameter type-id='type-id-85'/>
+ <return type-id='type-id-2'/>
+ </function-type>
+ <function-type size-in-bits='64' id='type-id-121'>
+ <parameter type-id='type-id-13'/>
+ <parameter type-id='type-id-19'/>
+ <return type-id='type-id-19'/>
+ </function-type>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='libzfs_iter.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
+ <function-decl name='zfs_iter_mounted' mangled-name='zfs_iter_mounted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_mounted'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-79' name='func'/>
+ <parameter type-id='type-id-13' name='data'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zfs_iter_snapshots_sorted' mangled-name='zfs_iter_snapshots_sorted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_snapshots_sorted'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-138' name='callback'/>
- <parameter type-id='type-id-7' name='data'/>
- <parameter type-id='type-id-22' name='min_txg'/>
- <parameter type-id='type-id-22' name='max_txg'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='avl_first' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-220'/>
- <return type-id='type-id-7'/>
+ <function-decl name='zfs_iter_dependents' mangled-name='zfs_iter_dependents' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_dependents'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-9' name='allowrecursion'/>
+ <parameter type-id='type-id-79' name='func'/>
+ <parameter type-id='type-id-13' name='data'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='avl_walk' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-220'/>
- <parameter type-id='type-id-7'/>
- <parameter type-id='type-id-8'/>
- <return type-id='type-id-7'/>
+ <function-decl name='zfs_iter_children' mangled-name='zfs_iter_children' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_children'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-79' name='func'/>
+ <parameter type-id='type-id-13' name='data'/>
+ <return type-id='type-id-2'/>
</function-decl>
<function-decl name='zfs_iter_snapspec' mangled-name='zfs_iter_snapspec' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_snapspec'>
- <parameter type-id='type-id-135' name='fs_zhp'/>
+ <parameter type-id='type-id-76' name='fs_zhp'/>
<parameter type-id='type-id-84' name='spec_orig'/>
- <parameter type-id='type-id-138' name='func'/>
- <parameter type-id='type-id-7' name='arg'/>
- <return type-id='type-id-8'/>
+ <parameter type-id='type-id-79' name='func'/>
+ <parameter type-id='type-id-13' name='arg'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zfs_dataset_exists' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-76'/>
- <return type-id='type-id-50'/>
+ <function-decl name='zfs_iter_snapshots_sorted' mangled-name='zfs_iter_snapshots_sorted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_snapshots_sorted'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-79' name='callback'/>
+ <parameter type-id='type-id-13' name='data'/>
+ <parameter type-id='type-id-7' name='min_txg'/>
+ <parameter type-id='type-id-7' name='max_txg'/>
+ <return type-id='type-id-2'/>
+ </function-decl>
+ <function-decl name='zfs_iter_bookmarks' mangled-name='zfs_iter_bookmarks' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_bookmarks'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-79' name='func'/>
+ <parameter type-id='type-id-13' name='data'/>
+ <return type-id='type-id-2'/>
+ </function-decl>
+ <function-decl name='zfs_iter_snapshots' mangled-name='zfs_iter_snapshots' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_snapshots'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-9' name='simple'/>
+ <parameter type-id='type-id-79' name='func'/>
+ <parameter type-id='type-id-13' name='data'/>
+ <parameter type-id='type-id-7' name='min_txg'/>
+ <parameter type-id='type-id-7' name='max_txg'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zfs_iter_children' mangled-name='zfs_iter_children' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_children'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-138' name='func'/>
- <parameter type-id='type-id-7' name='data'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_iter_filesystems' mangled-name='zfs_iter_filesystems' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_filesystems'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-79' name='func'/>
+ <parameter type-id='type-id-13' name='data'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zfs_iter_dependents' mangled-name='zfs_iter_dependents' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_dependents'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-16' name='allowrecursion'/>
- <parameter type-id='type-id-138' name='func'/>
- <parameter type-id='type-id-7' name='data'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_get_clones_nvl' mangled-name='zfs_get_clones_nvl' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_get_clones_nvl' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-82'/>
- <return type-id='type-id-104'/>
+ <function-decl name='zfs_dataset_exists' mangled-name='zfs_dataset_exists' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_iter_mounted' mangled-name='zfs_iter_mounted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_mounted'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-138' name='func'/>
- <parameter type-id='type-id-7' name='data'/>
- <return type-id='type-id-8'/>
+ <function-decl name='avl_first' mangled-name='avl_first' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='libzfs_mount.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libzfs' language='LANG_C99'>
- <function-decl name='is_mounted' mangled-name='is_mounted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='is_mounted'>
- <parameter type-id='type-id-10' name='zfs_hdl'/>
- <parameter type-id='type-id-84' name='special'/>
- <parameter type-id='type-id-88' name='where'/>
- <return type-id='type-id-16'/>
+ <function-decl name='avl_walk' mangled-name='avl_walk' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='libzfs_mnttab_find' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-222'/>
- <return type-id='type-id-8'/>
+ <function-decl name='make_bookmark_handle' mangled-name='make_bookmark_handle' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_is_mounted' mangled-name='zfs_is_mounted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_is_mounted'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-88' name='where'/>
- <return type-id='type-id-16'/>
+ <function-decl name='fnvpair_value_nvlist' mangled-name='fnvpair_value_nvlist' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_mount' mangled-name='zfs_mount' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_mount'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-84' name='options'/>
- <parameter type-id='type-id-8' name='flags'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_get_type' mangled-name='zfs_get_type' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_mount_at' mangled-name='zfs_mount_at' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_mount_at'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-84' name='options'/>
- <parameter type-id='type-id-8' name='flags'/>
- <parameter type-id='type-id-84' name='mountpoint'/>
- <return type-id='type-id-8'/>
+ <function-decl name='make_dataset_simple_handle_zc' mangled-name='make_dataset_simple_handle_zc' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='getprop_uint64' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-82'/>
- <parameter type-id='type-id-85'/>
- <parameter type-id='type-id-88'/>
- <return type-id='type-id-35'/>
- </function-decl>
- <pointer-type-def type-id='type-id-50' size-in-bits='64' id='type-id-294'/>
- <function-decl name='zfs_crypto_get_encryption_root' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-82'/>
- <parameter type-id='type-id-294'/>
- <parameter type-id='type-id-17'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='zfs_crypto_load_key' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-82'/>
- <parameter type-id='type-id-50'/>
- <parameter type-id='type-id-17'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='mkdirp' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-5'/>
- <return type-id='type-id-8'/>
+ <function-decl name='make_dataset_handle_zc' mangled-name='make_dataset_handle_zc' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <class-decl name='statfs64' size-in-bits='960' is-struct='yes' visibility='default' id='type-id-295'>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='libzfs_mount.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
+ <class-decl name='__anonymous_struct__' size-in-bits='192' is-struct='yes' is-anonymous='yes' naming-typedef-id='type-id-130' visibility='default' id='type-id-131'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='f_type' type-id='type-id-296' visibility='default'/>
+ <var-decl name='p_prop' type-id='type-id-110' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='f_bsize' type-id='type-id-296' visibility='default'/>
+ <var-decl name='p_name' type-id='type-id-14' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='f_blocks' type-id='type-id-297' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='f_bfree' type-id='type-id-297' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='f_bavail' type-id='type-id-297' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='f_files' type-id='type-id-298' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='f_ffree' type-id='type-id-298' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='f_fsid' type-id='type-id-299' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='f_namelen' type-id='type-id-296' visibility='default'/>
+ <var-decl name='p_share_err' type-id='type-id-2' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='576'>
- <var-decl name='f_frsize' type-id='type-id-296' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='640'>
- <var-decl name='f_flags' type-id='type-id-296' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='704'>
- <var-decl name='f_spare' type-id='type-id-300' visibility='default'/>
- </data-member>
- </class-decl>
- <typedef-decl name='__fsword_t' type-id='type-id-54' id='type-id-296'/>
- <typedef-decl name='__fsblkcnt64_t' type-id='type-id-35' id='type-id-297'/>
- <typedef-decl name='__fsfilcnt64_t' type-id='type-id-35' id='type-id-298'/>
- <class-decl name='__anonymous_struct__' size-in-bits='64' is-struct='yes' is-anonymous='yes' naming-typedef-id='type-id-299' visibility='default' id='type-id-301'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='__val' type-id='type-id-302' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='160'>
+ <var-decl name='p_unshare_err' type-id='type-id-2' visibility='default'/>
</data-member>
</class-decl>
+ <typedef-decl name='proto_table_t' type-id='type-id-131' id='type-id-130'/>
- <array-type-def dimensions='1' type-id='type-id-8' size-in-bits='64' id='type-id-302'>
- <subrange length='2' type-id='type-id-33' id='type-id-67'/>
+ <array-type-def dimensions='1' type-id='type-id-130' size-in-bits='384' id='type-id-132'>
+ <subrange length='2' type-id='type-id-24' id='type-id-59'/>
</array-type-def>
- <typedef-decl name='__fsid_t' type-id='type-id-301' id='type-id-299'/>
-
- <array-type-def dimensions='1' type-id='type-id-296' size-in-bits='256' id='type-id-300'>
- <subrange length='4' type-id='type-id-33' id='type-id-217'/>
-
- </array-type-def>
- <pointer-type-def type-id='type-id-295' size-in-bits='64' id='type-id-303'/>
- <function-decl name='statfs64' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-303'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='openat' mangled-name='openat64' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-8'/>
- <parameter is-variadic='yes'/>
- <return type-id='type-id-8'/>
+ <var-decl name='proto_table' type-id='type-id-132' visibility='default'/>
+ <function-decl name='zpool_disable_datasets' mangled-name='zpool_disable_datasets' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_disable_datasets'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-9' name='force'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <class-decl name='__dirstream' is-struct='yes' visibility='default' is-declaration-only='yes' id='type-id-304'/>
- <pointer-type-def type-id='type-id-304' size-in-bits='64' id='type-id-305'/>
- <function-decl name='fdopendir' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-8'/>
- <return type-id='type-id-305'/>
+ <function-decl name='zpool_enable_datasets' mangled-name='zpool_enable_datasets' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_enable_datasets'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-84' name='mntopts'/>
+ <parameter type-id='type-id-2' name='flags'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <class-decl name='dirent64' size-in-bits='2240' is-struct='yes' visibility='default' id='type-id-306'>
+ <pointer-type-def type-id='type-id-76' size-in-bits='64' id='type-id-133'/>
+ <function-decl name='zfs_foreach_mountpoint' mangled-name='zfs_foreach_mountpoint' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_foreach_mountpoint'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <parameter type-id='type-id-133' name='handles'/>
+ <parameter type-id='type-id-18' name='num_handles'/>
+ <parameter type-id='type-id-79' name='func'/>
+ <parameter type-id='type-id-13' name='data'/>
+ <parameter type-id='type-id-9' name='parallel'/>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <class-decl name='get_all_cb' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-134'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='d_ino' type-id='type-id-307' visibility='default'/>
+ <var-decl name='cb_handles' type-id='type-id-133' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='d_off' type-id='type-id-155' visibility='default'/>
+ <var-decl name='cb_alloc' type-id='type-id-18' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='d_reclen' type-id='type-id-152' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='144'>
- <var-decl name='d_type' type-id='type-id-75' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='152'>
- <var-decl name='d_name' type-id='type-id-12' visibility='default'/>
+ <var-decl name='cb_used' type-id='type-id-18' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='__ino64_t' type-id='type-id-35' id='type-id-307'/>
- <pointer-type-def type-id='type-id-306' size-in-bits='64' id='type-id-308'/>
- <function-decl name='readdir64' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-305'/>
- <return type-id='type-id-308'/>
- </function-decl>
- <function-decl name='closedir' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-305'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='do_mount' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-82'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-17'/>
- <parameter type-id='type-id-8'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='libzfs_mnttab_remove' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <function-decl name='libzfs_mnttab_add' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <function-decl name='zfs_spa_version' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-82'/>
- <parameter type-id='type-id-223'/>
- <return type-id='type-id-8'/>
+ <typedef-decl name='get_all_cb_t' type-id='type-id-134' id='type-id-135'/>
+ <pointer-type-def type-id='type-id-135' size-in-bits='64' id='type-id-136'/>
+ <function-decl name='libzfs_add_handle' mangled-name='libzfs_add_handle' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_add_handle'>
+ <parameter type-id='type-id-136' name='cbp'/>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_unmount' mangled-name='zfs_unmount' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unmount'>
- <parameter type-id='type-id-135' name='zhp'/>
+ <function-decl name='zfs_unshareall_bytype' mangled-name='zfs_unshareall_bytype' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshareall_bytype'>
+ <parameter type-id='type-id-76' name='zhp'/>
<parameter type-id='type-id-84' name='mountpoint'/>
- <parameter type-id='type-id-8' name='flags'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='sa_commit_shares' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <function-decl name='do_unmount' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-8'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='zfs_crypto_unload_key' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-82'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='sa_is_shared' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-17'/>
- <return type-id='type-id-50'/>
+ <parameter type-id='type-id-84' name='proto'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='sa_disable_share' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-17'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_unshareall_bypath' mangled-name='zfs_unshareall_bypath' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshareall_bypath'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-84' name='mountpoint'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='sa_errorstr' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-8'/>
- <return type-id='type-id-17'/>
+ <function-decl name='zfs_unshareall' mangled-name='zfs_unshareall' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshareall'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zfs_shareall' mangled-name='zfs_shareall' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_shareall'>
- <parameter type-id='type-id-135' name='zhp'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_unshareall_smb' mangled-name='zfs_unshareall_smb' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshareall_smb'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zfs_unmountall' mangled-name='zfs_unmountall' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unmountall'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-8' name='flags'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_unshareall_nfs' mangled-name='zfs_unshareall_nfs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshareall_nfs'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zfs_is_shared' mangled-name='zfs_is_shared' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_is_shared'>
- <parameter type-id='type-id-135' name='zhp'/>
- <return type-id='type-id-16'/>
+ <function-decl name='zfs_unshare_smb' mangled-name='zfs_unshare_smb' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshare_smb'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-84' name='mountpoint'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='sa_enable_share' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-17'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_unshare_nfs' mangled-name='zfs_unshare_nfs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshare_nfs'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-84' name='mountpoint'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zfs_share' mangled-name='zfs_share' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_share'>
- <parameter type-id='type-id-135' name='zhp'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_share_smb' mangled-name='zfs_share_smb' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_share_smb'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zfs_unshare' mangled-name='zfs_unshare' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshare'>
- <parameter type-id='type-id-135' name='zhp'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_share_nfs' mangled-name='zfs_share_nfs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_share_nfs'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='changelist_unshare' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-231'/>
- <parameter type-id='type-id-294'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_commit_shares' mangled-name='zfs_commit_shares' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_commit_shares'>
+ <parameter type-id='type-id-84' name='proto'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_unshareall' mangled-name='zfs_unshareall' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshareall'>
- <parameter type-id='type-id-135' name='zhp'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_is_shared_smb' mangled-name='zfs_is_shared_smb' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_is_shared_smb'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-117' name='where'/>
+ <return type-id='type-id-9'/>
</function-decl>
<function-decl name='zfs_is_shared_nfs' mangled-name='zfs_is_shared_nfs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_is_shared_nfs'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-88' name='where'/>
- <return type-id='type-id-16'/>
- </function-decl>
- <function-decl name='zfs_is_shared_smb' mangled-name='zfs_is_shared_smb' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_is_shared_smb'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-88' name='where'/>
- <return type-id='type-id-16'/>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-117' name='where'/>
+ <return type-id='type-id-9'/>
</function-decl>
- <function-decl name='sa_validate_shareopts' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-17'/>
- <parameter type-id='type-id-17'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_unshare' mangled-name='zfs_unshare' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshare'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zfs_commit_shares' mangled-name='zfs_commit_shares' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_commit_shares'>
- <parameter type-id='type-id-84' name='proto'/>
- <return type-id='type-id-6'/>
+ <function-decl name='zfs_share' mangled-name='zfs_share' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_share'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zfs_share_nfs' mangled-name='zfs_share_nfs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_share_nfs'>
- <parameter type-id='type-id-135' name='zhp'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_is_shared' mangled-name='zfs_is_shared' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_is_shared'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <return type-id='type-id-9'/>
</function-decl>
- <function-decl name='zfs_share_smb' mangled-name='zfs_share_smb' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_share_smb'>
- <parameter type-id='type-id-135' name='zhp'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_unmountall' mangled-name='zfs_unmountall' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unmountall'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-2' name='flags'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zfs_unshare_nfs' mangled-name='zfs_unshare_nfs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshare_nfs'>
- <parameter type-id='type-id-135' name='zhp'/>
+ <function-decl name='zfs_unmount' mangled-name='zfs_unmount' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unmount'>
+ <parameter type-id='type-id-76' name='zhp'/>
<parameter type-id='type-id-84' name='mountpoint'/>
- <return type-id='type-id-8'/>
+ <parameter type-id='type-id-2' name='flags'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zfs_unshare_smb' mangled-name='zfs_unshare_smb' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshare_smb'>
- <parameter type-id='type-id-135' name='zhp'/>
+ <function-decl name='zfs_mount_at' mangled-name='zfs_mount_at' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_mount_at'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-84' name='options'/>
+ <parameter type-id='type-id-2' name='flags'/>
<parameter type-id='type-id-84' name='mountpoint'/>
- <return type-id='type-id-8'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zfs_unshareall_nfs' mangled-name='zfs_unshareall_nfs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshareall_nfs'>
- <parameter type-id='type-id-135' name='zhp'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_mount' mangled-name='zfs_mount' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_mount'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-84' name='options'/>
+ <parameter type-id='type-id-2' name='flags'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zfs_unshareall_smb' mangled-name='zfs_unshareall_smb' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshareall_smb'>
- <parameter type-id='type-id-135' name='zhp'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_is_mounted' mangled-name='zfs_is_mounted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_is_mounted'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-117' name='where'/>
+ <return type-id='type-id-9'/>
</function-decl>
- <function-decl name='zfs_unshareall_bypath' mangled-name='zfs_unshareall_bypath' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshareall_bypath'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-84' name='mountpoint'/>
- <return type-id='type-id-8'/>
+ <function-decl name='is_mounted' mangled-name='is_mounted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='is_mounted'>
+ <parameter type-id='type-id-16' name='zfs_hdl'/>
+ <parameter type-id='type-id-84' name='special'/>
+ <parameter type-id='type-id-117' name='where'/>
+ <return type-id='type-id-9'/>
</function-decl>
- <function-decl name='zfs_unshareall_bytype' mangled-name='zfs_unshareall_bytype' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshareall_bytype'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-84' name='mountpoint'/>
- <parameter type-id='type-id-84' name='proto'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_realloc' mangled-name='zfs_realloc' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='rmdir' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-8'/>
+ <function-decl name='qsort' mangled-name='qsort' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <class-decl name='get_all_cb' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-309'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='cb_handles' type-id='type-id-310' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='cb_alloc' type-id='type-id-28' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='cb_used' type-id='type-id-28' visibility='default'/>
- </data-member>
- </class-decl>
- <pointer-type-def type-id='type-id-135' size-in-bits='64' id='type-id-310'/>
- <typedef-decl name='get_all_cb_t' type-id='type-id-309' id='type-id-311'/>
- <pointer-type-def type-id='type-id-311' size-in-bits='64' id='type-id-312'/>
- <function-decl name='libzfs_add_handle' mangled-name='libzfs_add_handle' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_add_handle'>
- <parameter type-id='type-id-312' name='cbp'/>
- <parameter type-id='type-id-135' name='zhp'/>
- <return type-id='type-id-6'/>
+ <function-decl name='tpool_dispatch' mangled-name='tpool_dispatch' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_realloc' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <parameter type-id='type-id-7'/>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-35'/>
- <return type-id='type-id-7'/>
+ <function-decl name='tpool_create' mangled-name='tpool_create' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_foreach_mountpoint' mangled-name='zfs_foreach_mountpoint' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_foreach_mountpoint'>
- <parameter type-id='type-id-10' name='hdl'/>
- <parameter type-id='type-id-310' name='handles'/>
- <parameter type-id='type-id-28' name='num_handles'/>
- <parameter type-id='type-id-138' name='func'/>
- <parameter type-id='type-id-7' name='data'/>
- <parameter type-id='type-id-16' name='parallel'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <function-decl name='qsort' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-7'/>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-62'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <class-decl name='tpool' is-struct='yes' visibility='default' is-declaration-only='yes' id='type-id-313'/>
- <pointer-type-def type-id='type-id-313' size-in-bits='64' id='type-id-314'/>
- <pointer-type-def type-id='type-id-272' size-in-bits='64' id='type-id-315'/>
- <function-decl name='tpool_create' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-5'/>
- <parameter type-id='type-id-5'/>
- <parameter type-id='type-id-5'/>
- <parameter type-id='type-id-315'/>
- <return type-id='type-id-314'/>
- </function-decl>
- <pointer-type-def type-id='type-id-316' size-in-bits='64' id='type-id-317'/>
- <function-decl name='tpool_dispatch' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-314'/>
- <parameter type-id='type-id-317'/>
- <parameter type-id='type-id-7'/>
- <return type-id='type-id-8'/>
+ <function-decl name='tpool_wait' mangled-name='tpool_wait' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='tpool_wait' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-314'/>
- <return type-id='type-id-6'/>
+ <function-decl name='tpool_destroy' mangled-name='tpool_destroy' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='tpool_destroy' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-314'/>
- <return type-id='type-id-6'/>
+ <function-decl name='rmdir' mangled-name='rmdir' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_enable_datasets' mangled-name='zpool_enable_datasets' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_enable_datasets'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-84' name='mntopts'/>
- <parameter type-id='type-id-8' name='flags'/>
- <return type-id='type-id-8'/>
+ <function-decl name='changelist_unshare' mangled-name='changelist_unshare' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_disable_datasets' mangled-name='zpool_disable_datasets' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_disable_datasets'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-16' name='force'/>
- <return type-id='type-id-8'/>
+ <function-decl name='libzfs_mnttab_find' mangled-name='libzfs_mnttab_find' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-type size-in-bits='64' id='type-id-316'>
- <parameter type-id='type-id-7'/>
- <return type-id='type-id-6'/>
- </function-type>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='libzfs_pool.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libzfs' language='LANG_C99'>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-318'>
- <underlying-type type-id='type-id-49'/>
- <enumerator name='ZPOOL_COMPATIBILITY_OK' value='0'/>
- <enumerator name='ZPOOL_COMPATIBILITY_WARNTOKEN' value='1'/>
- <enumerator name='ZPOOL_COMPATIBILITY_BADTOKEN' value='2'/>
- <enumerator name='ZPOOL_COMPATIBILITY_BADFILE' value='3'/>
- <enumerator name='ZPOOL_COMPATIBILITY_NOFILES' value='4'/>
- </enum-decl>
- <typedef-decl name='zpool_compat_status_t' type-id='type-id-318' id='type-id-319'/>
- <function-decl name='zpool_load_compat' mangled-name='zpool_load_compat' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_load_compat'>
- <parameter type-id='type-id-84' name='compat'/>
- <parameter type-id='type-id-106' name='features'/>
- <parameter type-id='type-id-17' name='report'/>
- <parameter type-id='type-id-28' name='rlen'/>
- <return type-id='type-id-319'/>
+ <function-decl name='sa_commit_shares' mangled-name='sa_commit_shares' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_props_refresh' mangled-name='zpool_props_refresh' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_props_refresh'>
- <parameter type-id='type-id-11' name='zhp'/>
- <return type-id='type-id-8'/>
+ <function-decl name='sa_validate_shareopts' mangled-name='sa_validate_shareopts' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <typedef-decl name='zpool_prop_t' type-id='type-id-142' id='type-id-320'/>
- <function-decl name='zpool_get_prop_int' mangled-name='zpool_get_prop_int' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_prop_int'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-320' name='prop'/>
- <parameter type-id='type-id-233' name='src'/>
- <return type-id='type-id-22'/>
+ <function-decl name='sa_enable_share' mangled-name='sa_enable_share' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_prop_to_name' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-142'/>
- <return type-id='type-id-84'/>
+ <function-decl name='sa_errorstr' mangled-name='sa_errorstr' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_prop_default_numeric' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-142'/>
- <return type-id='type-id-35'/>
+ <function-decl name='sa_disable_share' mangled-name='sa_disable_share' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <enum-decl name='vdev_state' id='type-id-321'>
- <underlying-type type-id='type-id-49'/>
- <enumerator name='VDEV_STATE_UNKNOWN' value='0'/>
- <enumerator name='VDEV_STATE_CLOSED' value='1'/>
- <enumerator name='VDEV_STATE_OFFLINE' value='2'/>
- <enumerator name='VDEV_STATE_REMOVED' value='3'/>
- <enumerator name='VDEV_STATE_CANT_OPEN' value='4'/>
- <enumerator name='VDEV_STATE_FAULTED' value='5'/>
- <enumerator name='VDEV_STATE_DEGRADED' value='6'/>
- <enumerator name='VDEV_STATE_HEALTHY' value='7'/>
- </enum-decl>
- <typedef-decl name='vdev_state_t' type-id='type-id-321' id='type-id-322'/>
- <enum-decl name='vdev_aux' id='type-id-323'>
- <underlying-type type-id='type-id-49'/>
- <enumerator name='VDEV_AUX_NONE' value='0'/>
- <enumerator name='VDEV_AUX_OPEN_FAILED' value='1'/>
- <enumerator name='VDEV_AUX_CORRUPT_DATA' value='2'/>
- <enumerator name='VDEV_AUX_NO_REPLICAS' value='3'/>
- <enumerator name='VDEV_AUX_BAD_GUID_SUM' value='4'/>
- <enumerator name='VDEV_AUX_TOO_SMALL' value='5'/>
- <enumerator name='VDEV_AUX_BAD_LABEL' value='6'/>
- <enumerator name='VDEV_AUX_VERSION_NEWER' value='7'/>
- <enumerator name='VDEV_AUX_VERSION_OLDER' value='8'/>
- <enumerator name='VDEV_AUX_UNSUP_FEAT' value='9'/>
- <enumerator name='VDEV_AUX_SPARED' value='10'/>
- <enumerator name='VDEV_AUX_ERR_EXCEEDED' value='11'/>
- <enumerator name='VDEV_AUX_IO_FAILURE' value='12'/>
- <enumerator name='VDEV_AUX_BAD_LOG' value='13'/>
- <enumerator name='VDEV_AUX_EXTERNAL' value='14'/>
- <enumerator name='VDEV_AUX_SPLIT_POOL' value='15'/>
- <enumerator name='VDEV_AUX_BAD_ASHIFT' value='16'/>
- <enumerator name='VDEV_AUX_EXTERNAL_PERSIST' value='17'/>
- <enumerator name='VDEV_AUX_ACTIVE' value='18'/>
- <enumerator name='VDEV_AUX_CHILDREN_OFFLINE' value='19'/>
- <enumerator name='VDEV_AUX_ASHIFT_TOO_BIG' value='20'/>
- </enum-decl>
- <typedef-decl name='vdev_aux_t' type-id='type-id-323' id='type-id-324'/>
- <function-decl name='zpool_state_to_name' mangled-name='zpool_state_to_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_state_to_name'>
- <parameter type-id='type-id-322' name='state'/>
- <parameter type-id='type-id-324' name='aux'/>
- <return type-id='type-id-84'/>
+ <function-decl name='libzfs_mnttab_remove' mangled-name='libzfs_mnttab_remove' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_pool_state_to_name' mangled-name='zpool_pool_state_to_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_pool_state_to_name'>
- <parameter type-id='type-id-290' name='state'/>
- <return type-id='type-id-84'/>
+ <function-decl name='zfs_crypto_get_encryption_root' mangled-name='zfs_crypto_get_encryption_root' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_get_state_str' mangled-name='zpool_get_state_str' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_state_str'>
- <parameter type-id='type-id-11' name='zhp'/>
- <return type-id='type-id-84'/>
+ <function-decl name='zfs_crypto_unload_key' mangled-name='zfs_crypto_unload_key' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-325'>
- <underlying-type type-id='type-id-49'/>
- <enumerator name='ZPOOL_STATUS_CORRUPT_CACHE' value='0'/>
- <enumerator name='ZPOOL_STATUS_MISSING_DEV_R' value='1'/>
- <enumerator name='ZPOOL_STATUS_MISSING_DEV_NR' value='2'/>
- <enumerator name='ZPOOL_STATUS_CORRUPT_LABEL_R' value='3'/>
- <enumerator name='ZPOOL_STATUS_CORRUPT_LABEL_NR' value='4'/>
- <enumerator name='ZPOOL_STATUS_BAD_GUID_SUM' value='5'/>
- <enumerator name='ZPOOL_STATUS_CORRUPT_POOL' value='6'/>
- <enumerator name='ZPOOL_STATUS_CORRUPT_DATA' value='7'/>
- <enumerator name='ZPOOL_STATUS_FAILING_DEV' value='8'/>
- <enumerator name='ZPOOL_STATUS_VERSION_NEWER' value='9'/>
- <enumerator name='ZPOOL_STATUS_HOSTID_MISMATCH' value='10'/>
- <enumerator name='ZPOOL_STATUS_HOSTID_ACTIVE' value='11'/>
- <enumerator name='ZPOOL_STATUS_HOSTID_REQUIRED' value='12'/>
- <enumerator name='ZPOOL_STATUS_IO_FAILURE_WAIT' value='13'/>
- <enumerator name='ZPOOL_STATUS_IO_FAILURE_CONTINUE' value='14'/>
- <enumerator name='ZPOOL_STATUS_IO_FAILURE_MMP' value='15'/>
- <enumerator name='ZPOOL_STATUS_BAD_LOG' value='16'/>
- <enumerator name='ZPOOL_STATUS_ERRATA' value='17'/>
- <enumerator name='ZPOOL_STATUS_UNSUP_FEAT_READ' value='18'/>
- <enumerator name='ZPOOL_STATUS_UNSUP_FEAT_WRITE' value='19'/>
- <enumerator name='ZPOOL_STATUS_FAULTED_DEV_R' value='20'/>
- <enumerator name='ZPOOL_STATUS_FAULTED_DEV_NR' value='21'/>
- <enumerator name='ZPOOL_STATUS_VERSION_OLDER' value='22'/>
- <enumerator name='ZPOOL_STATUS_FEAT_DISABLED' value='23'/>
- <enumerator name='ZPOOL_STATUS_RESILVERING' value='24'/>
- <enumerator name='ZPOOL_STATUS_OFFLINE_DEV' value='25'/>
- <enumerator name='ZPOOL_STATUS_REMOVED_DEV' value='26'/>
- <enumerator name='ZPOOL_STATUS_REBUILDING' value='27'/>
- <enumerator name='ZPOOL_STATUS_REBUILD_SCRUB' value='28'/>
- <enumerator name='ZPOOL_STATUS_NON_NATIVE_ASHIFT' value='29'/>
- <enumerator name='ZPOOL_STATUS_COMPATIBILITY_ERR' value='30'/>
- <enumerator name='ZPOOL_STATUS_INCOMPATIBLE_FEAT' value='31'/>
- <enumerator name='ZPOOL_STATUS_OK' value='32'/>
- </enum-decl>
- <enum-decl name='zpool_errata' id='type-id-326'>
- <underlying-type type-id='type-id-49'/>
- <enumerator name='ZPOOL_ERRATA_NONE' value='0'/>
- <enumerator name='ZPOOL_ERRATA_ZOL_2094_SCRUB' value='1'/>
- <enumerator name='ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY' value='2'/>
- <enumerator name='ZPOOL_ERRATA_ZOL_6845_ENCRYPTION' value='3'/>
- <enumerator name='ZPOOL_ERRATA_ZOL_8308_ENCRYPTION' value='4'/>
- </enum-decl>
- <pointer-type-def type-id='type-id-326' size-in-bits='64' id='type-id-327'/>
- <function-decl name='zpool_get_status' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-126'/>
- <parameter type-id='type-id-88'/>
- <parameter type-id='type-id-327'/>
- <return type-id='type-id-325'/>
+ <function-decl name='do_unmount' mangled-name='do_unmount' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_get_state' mangled-name='zpool_get_state' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_state'>
- <parameter type-id='type-id-11' name='zhp'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_spa_version' mangled-name='zfs_spa_version' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_get_prop' mangled-name='zpool_get_prop' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_prop'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-320' name='prop'/>
- <parameter type-id='type-id-17' name='buf'/>
- <parameter type-id='type-id-28' name='len'/>
- <parameter type-id='type-id-233' name='srctype'/>
- <parameter type-id='type-id-16' name='literal'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='zpool_prop_get_type' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-142'/>
- <return type-id='type-id-234'/>
- </function-decl>
- <function-decl name='zpool_prop_index_to_string' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-142'/>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-241'/>
- <return type-id='type-id-8'/>
+ <function-decl name='__lxstat' mangled-name='__lxstat64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='__openat_alias' mangled-name='openat64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='fdopendir' mangled-name='fdopendir' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_get_name' mangled-name='zpool_get_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_name'>
- <parameter type-id='type-id-11' name='zhp'/>
- <return type-id='type-id-84'/>
+ <function-decl name='readdir64' mangled-name='readdir64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_prop_default_string' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-142'/>
- <return type-id='type-id-84'/>
+ <function-decl name='closedir' mangled-name='closedir' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_set_prop' mangled-name='zpool_set_prop' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_set_prop'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-84' name='propname'/>
- <parameter type-id='type-id-84' name='propval'/>
- <return type-id='type-id-8'/>
+ <function-decl name='__xstat' mangled-name='__xstat64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_standard_error' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-8'/>
+ <function-decl name='statfs64' mangled-name='statfs64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_name_to_prop' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-142'/>
+ <function-decl name='do_mount' mangled-name='do_mount' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_prop_readonly' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-142'/>
- <return type-id='type-id-50'/>
+ <function-decl name='libzfs_mnttab_add' mangled-name='libzfs_mnttab_add' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_prop_setonce' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-142'/>
- <return type-id='type-id-50'/>
+ <function-decl name='mkdirp' mangled-name='mkdirp' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_prop_feature' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-50'/>
+ <function-decl name='zfs_crypto_load_key' mangled-name='zfs_crypto_load_key' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <enum-decl name='spa_feature' id='type-id-328'>
- <underlying-type type-id='type-id-49'/>
- <enumerator name='SPA_FEATURE_NONE' value='-1'/>
- <enumerator name='SPA_FEATURE_ASYNC_DESTROY' value='0'/>
- <enumerator name='SPA_FEATURE_EMPTY_BPOBJ' value='1'/>
- <enumerator name='SPA_FEATURE_LZ4_COMPRESS' value='2'/>
- <enumerator name='SPA_FEATURE_MULTI_VDEV_CRASH_DUMP' value='3'/>
- <enumerator name='SPA_FEATURE_SPACEMAP_HISTOGRAM' value='4'/>
- <enumerator name='SPA_FEATURE_ENABLED_TXG' value='5'/>
- <enumerator name='SPA_FEATURE_HOLE_BIRTH' value='6'/>
- <enumerator name='SPA_FEATURE_EXTENSIBLE_DATASET' value='7'/>
- <enumerator name='SPA_FEATURE_EMBEDDED_DATA' value='8'/>
- <enumerator name='SPA_FEATURE_BOOKMARKS' value='9'/>
- <enumerator name='SPA_FEATURE_FS_SS_LIMIT' value='10'/>
- <enumerator name='SPA_FEATURE_LARGE_BLOCKS' value='11'/>
- <enumerator name='SPA_FEATURE_LARGE_DNODE' value='12'/>
- <enumerator name='SPA_FEATURE_SHA512' value='13'/>
- <enumerator name='SPA_FEATURE_SKEIN' value='14'/>
- <enumerator name='SPA_FEATURE_EDONR' value='15'/>
- <enumerator name='SPA_FEATURE_USEROBJ_ACCOUNTING' value='16'/>
- <enumerator name='SPA_FEATURE_ENCRYPTION' value='17'/>
- <enumerator name='SPA_FEATURE_PROJECT_QUOTA' value='18'/>
- <enumerator name='SPA_FEATURE_DEVICE_REMOVAL' value='19'/>
- <enumerator name='SPA_FEATURE_OBSOLETE_COUNTS' value='20'/>
- <enumerator name='SPA_FEATURE_POOL_CHECKPOINT' value='21'/>
- <enumerator name='SPA_FEATURE_SPACEMAP_V2' value='22'/>
- <enumerator name='SPA_FEATURE_ALLOCATION_CLASSES' value='23'/>
- <enumerator name='SPA_FEATURE_RESILVER_DEFER' value='24'/>
- <enumerator name='SPA_FEATURE_BOOKMARK_V2' value='25'/>
- <enumerator name='SPA_FEATURE_REDACTION_BOOKMARKS' value='26'/>
- <enumerator name='SPA_FEATURE_REDACTED_DATASETS' value='27'/>
- <enumerator name='SPA_FEATURE_BOOKMARK_WRITTEN' value='28'/>
- <enumerator name='SPA_FEATURE_LOG_SPACEMAP' value='29'/>
- <enumerator name='SPA_FEATURE_LIVELIST' value='30'/>
- <enumerator name='SPA_FEATURE_DEVICE_REBUILD' value='31'/>
- <enumerator name='SPA_FEATURE_ZSTD_COMPRESS' value='32'/>
- <enumerator name='SPA_FEATURE_DRAID' value='33'/>
- <enumerator name='SPA_FEATURES' value='34'/>
- </enum-decl>
- <pointer-type-def type-id='type-id-328' size-in-bits='64' id='type-id-329'/>
- <function-decl name='zfeature_lookup_name' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-329'/>
- <return type-id='type-id-8'/>
+ <function-decl name='getprop_uint64' mangled-name='getprop_uint64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_open_canfail' mangled-name='zpool_open_canfail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_open_canfail'>
- <parameter type-id='type-id-10' name='hdl'/>
- <parameter type-id='type-id-84' name='pool'/>
- <return type-id='type-id-11'/>
+ <function-decl name='sa_is_shared' mangled-name='sa_is_shared' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_close' mangled-name='zpool_close' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_close'>
- <parameter type-id='type-id-11' name='zhp'/>
- <return type-id='type-id-6'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='libzfs_pool.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
+ <function-decl name='zpool_get_bootenv' mangled-name='zpool_get_bootenv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_bootenv'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-86' name='nvlp'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='get_system_hostid' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-35'/>
+ <qualified-type-def type-id='type-id-33' const='yes' id='type-id-137'/>
+ <pointer-type-def type-id='type-id-137' size-in-bits='64' id='type-id-138'/>
+ <function-decl name='zpool_set_bootenv' mangled-name='zpool_set_bootenv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_set_bootenv'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-138' name='envmap'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zpool_expand_proplist' mangled-name='zpool_expand_proplist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_expand_proplist'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-259' name='plp'/>
- <parameter type-id='type-id-16' name='literal'/>
- <return type-id='type-id-8'/>
+ <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-139'>
+ <underlying-type type-id='type-id-41'/>
+ <enumerator name='ZPOOL_WAIT_CKPT_DISCARD' value='0'/>
+ <enumerator name='ZPOOL_WAIT_FREE' value='1'/>
+ <enumerator name='ZPOOL_WAIT_INITIALIZE' value='2'/>
+ <enumerator name='ZPOOL_WAIT_REPLACE' value='3'/>
+ <enumerator name='ZPOOL_WAIT_REMOVE' value='4'/>
+ <enumerator name='ZPOOL_WAIT_RESILVER' value='5'/>
+ <enumerator name='ZPOOL_WAIT_SCRUB' value='6'/>
+ <enumerator name='ZPOOL_WAIT_TRIM' value='7'/>
+ <enumerator name='ZPOOL_WAIT_NUM_ACTIVITIES' value='8'/>
+ </enum-decl>
+ <typedef-decl name='zpool_wait_activity_t' type-id='type-id-139' id='type-id-140'/>
+ <function-decl name='zpool_wait_status' mangled-name='zpool_wait_status' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_wait_status'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-140' name='activity'/>
+ <parameter type-id='type-id-85' name='missing'/>
+ <parameter type-id='type-id-85' name='waited'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zfeature_is_supported' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-50'/>
+ <function-decl name='zpool_wait' mangled-name='zpool_wait' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_wait'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-140' name='activity'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zpool_prop_get_feature' mangled-name='zpool_prop_get_feature' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_get_feature'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-84' name='propname'/>
- <parameter type-id='type-id-17' name='buf'/>
- <parameter type-id='type-id-28' name='len'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_obj_to_path_ds' mangled-name='zpool_obj_to_path_ds' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_obj_to_path_ds'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-7' name='dsobj'/>
+ <parameter type-id='type-id-7' name='obj'/>
+ <parameter type-id='type-id-14' name='pathname'/>
+ <parameter type-id='type-id-18' name='len'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='pool_namecheck' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-213'/>
- <parameter type-id='type-id-17'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_obj_to_path' mangled-name='zpool_obj_to_path' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_obj_to_path'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-7' name='dsobj'/>
+ <parameter type-id='type-id-7' name='obj'/>
+ <parameter type-id='type-id-14' name='pathname'/>
+ <parameter type-id='type-id-18' name='len'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_refresh_stats' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-126'/>
- <parameter type-id='type-id-294'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_events_seek' mangled-name='zpool_events_seek' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_events_seek'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <parameter type-id='type-id-7' name='eid'/>
+ <parameter type-id='type-id-2' name='zevent_fd'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zpool_open' mangled-name='zpool_open' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_open'>
- <parameter type-id='type-id-10' name='hdl'/>
- <parameter type-id='type-id-84' name='pool'/>
- <return type-id='type-id-11'/>
+ <function-decl name='zpool_events_clear' mangled-name='zpool_events_clear' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_events_clear'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <parameter type-id='type-id-114' name='count'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zpool_is_draid_spare' mangled-name='zpool_is_draid_spare' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_is_draid_spare'>
- <parameter type-id='type-id-84' name='name'/>
- <return type-id='type-id-16'/>
+ <function-decl name='zpool_events_next' mangled-name='zpool_events_next' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_events_next'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <parameter type-id='type-id-86' name='nvp'/>
+ <parameter type-id='type-id-114' name='dropped'/>
+ <parameter type-id='type-id-30' name='flags'/>
+ <parameter type-id='type-id-2' name='zevent_fd'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zpool_create' mangled-name='zpool_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_create'>
- <parameter type-id='type-id-10' name='hdl'/>
- <parameter type-id='type-id-84' name='pool'/>
- <parameter type-id='type-id-15' name='nvroot'/>
- <parameter type-id='type-id-15' name='props'/>
- <parameter type-id='type-id-15' name='fsprops'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_get_history' mangled-name='zpool_get_history' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_history'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-86' name='nvhisp'/>
+ <parameter type-id='type-id-108' name='off'/>
+ <parameter type-id='type-id-85' name='eof'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='nvlist_add_uint8_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-5'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_log_history' mangled-name='zpool_log_history' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_log_history'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <parameter type-id='type-id-84' name='message'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zpool_destroy' mangled-name='zpool_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_destroy'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-84' name='log_str'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_save_arguments' mangled-name='zfs_save_arguments' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_save_arguments'>
+ <parameter type-id='type-id-2' name='argc'/>
+ <parameter type-id='type-id-117' name='argv'/>
+ <parameter type-id='type-id-14' name='string'/>
+ <parameter type-id='type-id-2' name='len'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_checkpoint' mangled-name='zpool_checkpoint' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_checkpoint'>
- <parameter type-id='type-id-11' name='zhp'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_upgrade' mangled-name='zpool_upgrade' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_upgrade'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-7' name='new_version'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='lzc_pool_checkpoint' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_get_errlog' mangled-name='zpool_get_errlog' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_errlog'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-86' name='nverrlistp'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zpool_discard_checkpoint' mangled-name='zpool_discard_checkpoint' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_discard_checkpoint'>
- <parameter type-id='type-id-11' name='zhp'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_vdev_name' mangled-name='zpool_vdev_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_name'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-19' name='nv'/>
+ <parameter type-id='type-id-2' name='name_flags'/>
+ <return type-id='type-id-14'/>
</function-decl>
- <function-decl name='lzc_pool_checkpoint_discard' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_sync_one' mangled-name='zpool_sync_one' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_sync_one'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-13' name='data'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zpool_add' mangled-name='zpool_add' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_add'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-15' name='nvroot'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_reopen_one' mangled-name='zpool_reopen_one' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_reopen_one'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-13' name='data'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zpool_export' mangled-name='zpool_export' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_export'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-16' name='force'/>
- <parameter type-id='type-id-84' name='log_str'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_reguid' mangled-name='zpool_reguid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_reguid'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zpool_standard_error_fmt' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-84'/>
- <parameter is-variadic='yes'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_vdev_clear' mangled-name='zpool_vdev_clear' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_clear'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-7' name='guid'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zpool_export_force' mangled-name='zpool_export_force' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_export_force'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-84' name='log_str'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_clear' mangled-name='zpool_clear' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_clear'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-84' name='path'/>
+ <parameter type-id='type-id-19' name='rewindnvl'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zpool_explain_recover' mangled-name='zpool_explain_recover' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_explain_recover'>
- <parameter type-id='type-id-10' name='hdl'/>
- <parameter type-id='type-id-84' name='name'/>
- <parameter type-id='type-id-8' name='reason'/>
- <parameter type-id='type-id-15' name='config'/>
- <return type-id='type-id-6'/>
+ <function-decl name='zpool_vdev_indirect_size' mangled-name='zpool_vdev_indirect_size' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_indirect_size'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-84' name='path'/>
+ <parameter type-id='type-id-108' name='sizep'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zpool_import' mangled-name='zpool_import' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_import'>
- <parameter type-id='type-id-10' name='hdl'/>
- <parameter type-id='type-id-15' name='config'/>
- <parameter type-id='type-id-84' name='newname'/>
- <parameter type-id='type-id-17' name='altroot'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_vdev_remove_cancel' mangled-name='zpool_vdev_remove_cancel' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_remove_cancel'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zpool_import_props' mangled-name='zpool_import_props' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_import_props'>
- <parameter type-id='type-id-10' name='hdl'/>
- <parameter type-id='type-id-15' name='config'/>
- <parameter type-id='type-id-84' name='newname'/>
- <parameter type-id='type-id-15' name='props'/>
- <parameter type-id='type-id-8' name='flags'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_vdev_remove' mangled-name='zpool_vdev_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_remove'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-84' name='path'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <class-decl name='zpool_load_policy' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-330'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='zlp_rewind' type-id='type-id-38' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='zlp_maxmeta' type-id='type-id-22' visibility='default'/>
+ <class-decl name='splitflags' size-in-bits='64' is-struct='yes' visibility='default' id='type-id-141'>
+ <data-member access='public' layout-offset-in-bits='31'>
+ <var-decl name='dryrun' type-id='type-id-2' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='zlp_maxdata' type-id='type-id-22' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='30'>
+ <var-decl name='import' type-id='type-id-2' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='zlp_txg' type-id='type-id-22' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='32'>
+ <var-decl name='name_flags' type-id='type-id-2' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-330' size-in-bits='64' id='type-id-331'/>
- <function-decl name='zpool_get_load_policy' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-331'/>
- <return type-id='type-id-6'/>
+ <typedef-decl name='splitflags_t' type-id='type-id-141' id='type-id-142'/>
+ <function-decl name='zpool_vdev_split' mangled-name='zpool_vdev_split' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_split'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-14' name='newname'/>
+ <parameter type-id='type-id-86' name='newroot'/>
+ <parameter type-id='type-id-19' name='props'/>
+ <parameter type-id='type-id-142' name='flags'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zpool_print_unsup_feat' mangled-name='zpool_print_unsup_feat' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_print_unsup_feat'>
- <parameter type-id='type-id-15' name='config'/>
- <return type-id='type-id-6'/>
+ <function-decl name='zpool_vdev_detach' mangled-name='zpool_vdev_detach' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_detach'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-84' name='path'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='fnvlist_lookup_uint64' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-35'/>
+ <function-decl name='zpool_vdev_attach' mangled-name='zpool_vdev_attach' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_attach'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-84' name='old_disk'/>
+ <parameter type-id='type-id-84' name='new_disk'/>
+ <parameter type-id='type-id-19' name='nvroot'/>
+ <parameter type-id='type-id-2' name='replacing'/>
+ <parameter type-id='type-id-9' name='rebuild'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zpool_vdev_name' mangled-name='zpool_vdev_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_name'>
- <parameter type-id='type-id-10' name='hdl'/>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-15' name='nv'/>
- <parameter type-id='type-id-8' name='name_flags'/>
- <return type-id='type-id-17'/>
- </function-decl>
- <enum-decl name='pool_initialize_func' id='type-id-332'>
- <underlying-type type-id='type-id-49'/>
- <enumerator name='POOL_INITIALIZE_START' value='0'/>
- <enumerator name='POOL_INITIALIZE_CANCEL' value='1'/>
- <enumerator name='POOL_INITIALIZE_SUSPEND' value='2'/>
- <enumerator name='POOL_INITIALIZE_FUNCS' value='3'/>
+ <enum-decl name='vdev_aux' id='type-id-143'>
+ <underlying-type type-id='type-id-41'/>
+ <enumerator name='VDEV_AUX_NONE' value='0'/>
+ <enumerator name='VDEV_AUX_OPEN_FAILED' value='1'/>
+ <enumerator name='VDEV_AUX_CORRUPT_DATA' value='2'/>
+ <enumerator name='VDEV_AUX_NO_REPLICAS' value='3'/>
+ <enumerator name='VDEV_AUX_BAD_GUID_SUM' value='4'/>
+ <enumerator name='VDEV_AUX_TOO_SMALL' value='5'/>
+ <enumerator name='VDEV_AUX_BAD_LABEL' value='6'/>
+ <enumerator name='VDEV_AUX_VERSION_NEWER' value='7'/>
+ <enumerator name='VDEV_AUX_VERSION_OLDER' value='8'/>
+ <enumerator name='VDEV_AUX_UNSUP_FEAT' value='9'/>
+ <enumerator name='VDEV_AUX_SPARED' value='10'/>
+ <enumerator name='VDEV_AUX_ERR_EXCEEDED' value='11'/>
+ <enumerator name='VDEV_AUX_IO_FAILURE' value='12'/>
+ <enumerator name='VDEV_AUX_BAD_LOG' value='13'/>
+ <enumerator name='VDEV_AUX_EXTERNAL' value='14'/>
+ <enumerator name='VDEV_AUX_SPLIT_POOL' value='15'/>
+ <enumerator name='VDEV_AUX_BAD_ASHIFT' value='16'/>
+ <enumerator name='VDEV_AUX_EXTERNAL_PERSIST' value='17'/>
+ <enumerator name='VDEV_AUX_ACTIVE' value='18'/>
+ <enumerator name='VDEV_AUX_CHILDREN_OFFLINE' value='19'/>
+ <enumerator name='VDEV_AUX_ASHIFT_TOO_BIG' value='20'/>
</enum-decl>
- <typedef-decl name='pool_initialize_func_t' type-id='type-id-332' id='type-id-333'/>
- <function-decl name='zpool_initialize' mangled-name='zpool_initialize' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_initialize'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-333' name='cmd_type'/>
- <parameter type-id='type-id-15' name='vds'/>
- <return type-id='type-id-8'/>
+ <typedef-decl name='vdev_aux_t' type-id='type-id-143' id='type-id-144'/>
+ <function-decl name='zpool_vdev_degrade' mangled-name='zpool_vdev_degrade' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_degrade'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-7' name='guid'/>
+ <parameter type-id='type-id-144' name='aux'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='lzc_initialize' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-332'/>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-107'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_vdev_fault' mangled-name='zpool_vdev_fault' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_fault'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-7' name='guid'/>
+ <parameter type-id='type-id-144' name='aux'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='fnvpair_value_int64' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-133'/>
- <return type-id='type-id-54'/>
+ <function-decl name='zpool_vdev_offline' mangled-name='zpool_vdev_offline' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_offline'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-84' name='path'/>
+ <parameter type-id='type-id-9' name='istmp'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-334'>
- <underlying-type type-id='type-id-49'/>
- <enumerator name='ZPOOL_WAIT_CKPT_DISCARD' value='0'/>
- <enumerator name='ZPOOL_WAIT_FREE' value='1'/>
- <enumerator name='ZPOOL_WAIT_INITIALIZE' value='2'/>
- <enumerator name='ZPOOL_WAIT_REPLACE' value='3'/>
- <enumerator name='ZPOOL_WAIT_REMOVE' value='4'/>
- <enumerator name='ZPOOL_WAIT_RESILVER' value='5'/>
- <enumerator name='ZPOOL_WAIT_SCRUB' value='6'/>
- <enumerator name='ZPOOL_WAIT_TRIM' value='7'/>
- <enumerator name='ZPOOL_WAIT_NUM_ACTIVITIES' value='8'/>
+ <enum-decl name='vdev_state' id='type-id-145'>
+ <underlying-type type-id='type-id-41'/>
+ <enumerator name='VDEV_STATE_UNKNOWN' value='0'/>
+ <enumerator name='VDEV_STATE_CLOSED' value='1'/>
+ <enumerator name='VDEV_STATE_OFFLINE' value='2'/>
+ <enumerator name='VDEV_STATE_REMOVED' value='3'/>
+ <enumerator name='VDEV_STATE_CANT_OPEN' value='4'/>
+ <enumerator name='VDEV_STATE_FAULTED' value='5'/>
+ <enumerator name='VDEV_STATE_DEGRADED' value='6'/>
+ <enumerator name='VDEV_STATE_HEALTHY' value='7'/>
</enum-decl>
- <function-decl name='lzc_wait_tag' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-334'/>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-294'/>
- <return type-id='type-id-8'/>
+ <typedef-decl name='vdev_state_t' type-id='type-id-145' id='type-id-146'/>
+ <pointer-type-def type-id='type-id-146' size-in-bits='64' id='type-id-147'/>
+ <function-decl name='zpool_vdev_online' mangled-name='zpool_vdev_online' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_online'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-84' name='path'/>
+ <parameter type-id='type-id-2' name='flags'/>
+ <parameter type-id='type-id-147' name='newstate'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zpool_initialize_wait' mangled-name='zpool_initialize_wait' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_initialize_wait'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-333' name='cmd_type'/>
- <parameter type-id='type-id-15' name='vds'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_vdev_path_to_guid' mangled-name='zpool_vdev_path_to_guid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_path_to_guid'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-84' name='path'/>
+ <return type-id='type-id-7'/>
+ </function-decl>
+ <function-decl name='zpool_get_physpath' mangled-name='zpool_get_physpath' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_physpath'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-14' name='physpath'/>
+ <parameter type-id='type-id-18' name='phypath_size'/>
+ <return type-id='type-id-2'/>
+ </function-decl>
+ <function-decl name='zpool_find_vdev' mangled-name='zpool_find_vdev' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_find_vdev'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-84' name='path'/>
+ <parameter type-id='type-id-85' name='avail_spare'/>
+ <parameter type-id='type-id-85' name='l2cache'/>
+ <parameter type-id='type-id-85' name='log'/>
+ <return type-id='type-id-19'/>
</function-decl>
- <enum-decl name='pool_trim_func' id='type-id-335'>
- <underlying-type type-id='type-id-49'/>
+ <function-decl name='zpool_find_vdev_by_physpath' mangled-name='zpool_find_vdev_by_physpath' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_find_vdev_by_physpath'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-84' name='ppath'/>
+ <parameter type-id='type-id-85' name='avail_spare'/>
+ <parameter type-id='type-id-85' name='l2cache'/>
+ <parameter type-id='type-id-85' name='log'/>
+ <return type-id='type-id-19'/>
+ </function-decl>
+ <enum-decl name='pool_scan_func' id='type-id-148'>
+ <underlying-type type-id='type-id-41'/>
+ <enumerator name='POOL_SCAN_NONE' value='0'/>
+ <enumerator name='POOL_SCAN_SCRUB' value='1'/>
+ <enumerator name='POOL_SCAN_RESILVER' value='2'/>
+ <enumerator name='POOL_SCAN_FUNCS' value='3'/>
+ </enum-decl>
+ <typedef-decl name='pool_scan_func_t' type-id='type-id-148' id='type-id-149'/>
+ <enum-decl name='pool_scrub_cmd' id='type-id-150'>
+ <underlying-type type-id='type-id-41'/>
+ <enumerator name='POOL_SCRUB_NORMAL' value='0'/>
+ <enumerator name='POOL_SCRUB_PAUSE' value='1'/>
+ <enumerator name='POOL_SCRUB_FLAGS_END' value='2'/>
+ </enum-decl>
+ <typedef-decl name='pool_scrub_cmd_t' type-id='type-id-150' id='type-id-151'/>
+ <function-decl name='zpool_scan' mangled-name='zpool_scan' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_scan'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-149' name='func'/>
+ <parameter type-id='type-id-151' name='cmd'/>
+ <return type-id='type-id-2'/>
+ </function-decl>
+ <enum-decl name='pool_trim_func' id='type-id-152'>
+ <underlying-type type-id='type-id-41'/>
<enumerator name='POOL_TRIM_START' value='0'/>
<enumerator name='POOL_TRIM_CANCEL' value='1'/>
<enumerator name='POOL_TRIM_SUSPEND' value='2'/>
<enumerator name='POOL_TRIM_FUNCS' value='3'/>
</enum-decl>
- <typedef-decl name='pool_trim_func_t' type-id='type-id-335' id='type-id-336'/>
- <class-decl name='trimflags' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-337'>
+ <typedef-decl name='pool_trim_func_t' type-id='type-id-152' id='type-id-153'/>
+ <class-decl name='trimflags' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-154'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='fullpool' type-id='type-id-16' visibility='default'/>
+ <var-decl name='fullpool' type-id='type-id-9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='secure' type-id='type-id-16' visibility='default'/>
+ <var-decl name='secure' type-id='type-id-9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='wait' type-id='type-id-16' visibility='default'/>
+ <var-decl name='wait' type-id='type-id-9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='rate' type-id='type-id-22' visibility='default'/>
+ <var-decl name='rate' type-id='type-id-7' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='trimflags_t' type-id='type-id-337' id='type-id-338'/>
- <pointer-type-def type-id='type-id-338' size-in-bits='64' id='type-id-339'/>
+ <typedef-decl name='trimflags_t' type-id='type-id-154' id='type-id-155'/>
+ <pointer-type-def type-id='type-id-155' size-in-bits='64' id='type-id-156'/>
<function-decl name='zpool_trim' mangled-name='zpool_trim' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_trim'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-336' name='cmd_type'/>
- <parameter type-id='type-id-15' name='vds'/>
- <parameter type-id='type-id-339' name='trim_flags'/>
- <return type-id='type-id-8'/>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-153' name='cmd_type'/>
+ <parameter type-id='type-id-19' name='vds'/>
+ <parameter type-id='type-id-156' name='trim_flags'/>
+ <return type-id='type-id-2'/>
+ </function-decl>
+ <enum-decl name='pool_initialize_func' id='type-id-157'>
+ <underlying-type type-id='type-id-41'/>
+ <enumerator name='POOL_INITIALIZE_START' value='0'/>
+ <enumerator name='POOL_INITIALIZE_CANCEL' value='1'/>
+ <enumerator name='POOL_INITIALIZE_SUSPEND' value='2'/>
+ <enumerator name='POOL_INITIALIZE_FUNCS' value='3'/>
+ </enum-decl>
+ <typedef-decl name='pool_initialize_func_t' type-id='type-id-157' id='type-id-158'/>
+ <function-decl name='zpool_initialize_wait' mangled-name='zpool_initialize_wait' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_initialize_wait'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-158' name='cmd_type'/>
+ <parameter type-id='type-id-19' name='vds'/>
+ <return type-id='type-id-2'/>
+ </function-decl>
+ <function-decl name='zpool_initialize' mangled-name='zpool_initialize' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_initialize'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-158' name='cmd_type'/>
+ <parameter type-id='type-id-19' name='vds'/>
+ <return type-id='type-id-2'/>
+ </function-decl>
+ <function-decl name='zpool_import_props' mangled-name='zpool_import_props' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_import_props'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <parameter type-id='type-id-19' name='config'/>
+ <parameter type-id='type-id-84' name='newname'/>
+ <parameter type-id='type-id-19' name='props'/>
+ <parameter type-id='type-id-2' name='flags'/>
+ <return type-id='type-id-2'/>
+ </function-decl>
+ <function-decl name='zpool_print_unsup_feat' mangled-name='zpool_print_unsup_feat' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_print_unsup_feat'>
+ <parameter type-id='type-id-19' name='config'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_trim' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-335'/>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-50'/>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-107'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='fnvlist_add_int64' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-54'/>
- <return type-id='type-id-6'/>
+ <function-decl name='zpool_import' mangled-name='zpool_import' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_import'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <parameter type-id='type-id-19' name='config'/>
+ <parameter type-id='type-id-84' name='newname'/>
+ <parameter type-id='type-id-14' name='altroot'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zpool_find_vdev' mangled-name='zpool_find_vdev' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_find_vdev'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-84' name='path'/>
- <parameter type-id='type-id-106' name='avail_spare'/>
- <parameter type-id='type-id-106' name='l2cache'/>
- <parameter type-id='type-id-106' name='log'/>
- <return type-id='type-id-15'/>
+ <function-decl name='zpool_explain_recover' mangled-name='zpool_explain_recover' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_explain_recover'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <parameter type-id='type-id-84' name='name'/>
+ <parameter type-id='type-id-2' name='reason'/>
+ <parameter type-id='type-id-19' name='config'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <enum-decl name='pool_scan_func' id='type-id-340'>
- <underlying-type type-id='type-id-49'/>
- <enumerator name='POOL_SCAN_NONE' value='0'/>
- <enumerator name='POOL_SCAN_SCRUB' value='1'/>
- <enumerator name='POOL_SCAN_RESILVER' value='2'/>
- <enumerator name='POOL_SCAN_FUNCS' value='3'/>
- </enum-decl>
- <typedef-decl name='pool_scan_func_t' type-id='type-id-340' id='type-id-341'/>
- <enum-decl name='pool_scrub_cmd' id='type-id-342'>
- <underlying-type type-id='type-id-49'/>
- <enumerator name='POOL_SCRUB_NORMAL' value='0'/>
- <enumerator name='POOL_SCRUB_PAUSE' value='1'/>
- <enumerator name='POOL_SCRUB_FLAGS_END' value='2'/>
- </enum-decl>
- <typedef-decl name='pool_scrub_cmd_t' type-id='type-id-342' id='type-id-343'/>
- <function-decl name='zpool_scan' mangled-name='zpool_scan' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_scan'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-341' name='func'/>
- <parameter type-id='type-id-343' name='cmd'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_export_force' mangled-name='zpool_export_force' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_export_force'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-84' name='log_str'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zpool_find_vdev_by_physpath' mangled-name='zpool_find_vdev_by_physpath' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_find_vdev_by_physpath'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-84' name='ppath'/>
- <parameter type-id='type-id-106' name='avail_spare'/>
- <parameter type-id='type-id-106' name='l2cache'/>
- <parameter type-id='type-id-106' name='log'/>
- <return type-id='type-id-15'/>
+ <function-decl name='zpool_export' mangled-name='zpool_export' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_export'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-9' name='force'/>
+ <parameter type-id='type-id-84' name='log_str'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zfs_strcmp_pathname' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-8'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_add' mangled-name='zpool_add' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_add'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-19' name='nvroot'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zpool_get_physpath' mangled-name='zpool_get_physpath' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_physpath'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-17' name='physpath'/>
- <parameter type-id='type-id-28' name='phypath_size'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_discard_checkpoint' mangled-name='zpool_discard_checkpoint' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_discard_checkpoint'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zpool_vdev_path_to_guid' mangled-name='zpool_vdev_path_to_guid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_path_to_guid'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-84' name='path'/>
- <return type-id='type-id-22'/>
+ <function-decl name='zpool_checkpoint' mangled-name='zpool_checkpoint' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_checkpoint'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <pointer-type-def type-id='type-id-322' size-in-bits='64' id='type-id-344'/>
- <function-decl name='zpool_vdev_online' mangled-name='zpool_vdev_online' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_online'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-84' name='path'/>
- <parameter type-id='type-id-8' name='flags'/>
- <parameter type-id='type-id-344' name='newstate'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_destroy' mangled-name='zpool_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_destroy'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-84' name='log_str'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zfs_resolve_shortname' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-17'/>
- <parameter type-id='type-id-35'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_create' mangled-name='zpool_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_create'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <parameter type-id='type-id-84' name='pool'/>
+ <parameter type-id='type-id-19' name='nvroot'/>
+ <parameter type-id='type-id-19' name='props'/>
+ <parameter type-id='type-id-19' name='fsprops'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zpool_relabel_disk' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_is_draid_spare' mangled-name='zpool_is_draid_spare' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_is_draid_spare'>
+ <parameter type-id='type-id-84' name='name'/>
+ <return type-id='type-id-9'/>
</function-decl>
- <function-decl name='zpool_vdev_offline' mangled-name='zpool_vdev_offline' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_offline'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-84' name='path'/>
- <parameter type-id='type-id-16' name='istmp'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_get_state' mangled-name='zpool_get_state' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_state'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zpool_vdev_fault' mangled-name='zpool_vdev_fault' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_fault'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-22' name='guid'/>
- <parameter type-id='type-id-324' name='aux'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_get_name' mangled-name='zpool_get_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_name'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='zpool_vdev_degrade' mangled-name='zpool_vdev_degrade' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_degrade'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-22' name='guid'/>
- <parameter type-id='type-id-324' name='aux'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_close' mangled-name='zpool_close' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_close'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_vdev_attach' mangled-name='zpool_vdev_attach' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_attach'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-84' name='old_disk'/>
- <parameter type-id='type-id-84' name='new_disk'/>
- <parameter type-id='type-id-15' name='nvroot'/>
- <parameter type-id='type-id-8' name='replacing'/>
- <parameter type-id='type-id-16' name='rebuild'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_open' mangled-name='zpool_open' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_open'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <parameter type-id='type-id-84' name='pool'/>
+ <return type-id='type-id-4'/>
</function-decl>
- <function-decl name='zfeature_lookup_guid' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-329'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_open_canfail' mangled-name='zpool_open_canfail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_open_canfail'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <parameter type-id='type-id-84' name='pool'/>
+ <return type-id='type-id-4'/>
</function-decl>
- <function-decl name='realpath' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-17'/>
- <return type-id='type-id-17'/>
+ <function-decl name='zpool_prop_get_feature' mangled-name='zpool_prop_get_feature' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_get_feature'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-84' name='propname'/>
+ <parameter type-id='type-id-14' name='buf'/>
+ <parameter type-id='type-id-18' name='len'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zfs_strip_path' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-17'/>
- <return type-id='type-id-17'/>
+ <function-decl name='zpool_expand_proplist' mangled-name='zpool_expand_proplist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_expand_proplist'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-103' name='plp'/>
+ <parameter type-id='type-id-9' name='literal'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zfs_strip_partition' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-17'/>
- <return type-id='type-id-17'/>
+ <function-decl name='zpool_set_prop' mangled-name='zpool_set_prop' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_set_prop'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-84' name='propname'/>
+ <parameter type-id='type-id-84' name='propval'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zpool_vdev_detach' mangled-name='zpool_vdev_detach' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_detach'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-84' name='path'/>
- <return type-id='type-id-8'/>
+ <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-159'>
+ <underlying-type type-id='type-id-41'/>
+ <enumerator name='ZPOOL_PROP_INVAL' value='-1'/>
+ <enumerator name='ZPOOL_PROP_NAME' value='0'/>
+ <enumerator name='ZPOOL_PROP_SIZE' value='1'/>
+ <enumerator name='ZPOOL_PROP_CAPACITY' value='2'/>
+ <enumerator name='ZPOOL_PROP_ALTROOT' value='3'/>
+ <enumerator name='ZPOOL_PROP_HEALTH' value='4'/>
+ <enumerator name='ZPOOL_PROP_GUID' value='5'/>
+ <enumerator name='ZPOOL_PROP_VERSION' value='6'/>
+ <enumerator name='ZPOOL_PROP_BOOTFS' value='7'/>
+ <enumerator name='ZPOOL_PROP_DELEGATION' value='8'/>
+ <enumerator name='ZPOOL_PROP_AUTOREPLACE' value='9'/>
+ <enumerator name='ZPOOL_PROP_CACHEFILE' value='10'/>
+ <enumerator name='ZPOOL_PROP_FAILUREMODE' value='11'/>
+ <enumerator name='ZPOOL_PROP_LISTSNAPS' value='12'/>
+ <enumerator name='ZPOOL_PROP_AUTOEXPAND' value='13'/>
+ <enumerator name='ZPOOL_PROP_DEDUPDITTO' value='14'/>
+ <enumerator name='ZPOOL_PROP_DEDUPRATIO' value='15'/>
+ <enumerator name='ZPOOL_PROP_FREE' value='16'/>
+ <enumerator name='ZPOOL_PROP_ALLOCATED' value='17'/>
+ <enumerator name='ZPOOL_PROP_READONLY' value='18'/>
+ <enumerator name='ZPOOL_PROP_ASHIFT' value='19'/>
+ <enumerator name='ZPOOL_PROP_COMMENT' value='20'/>
+ <enumerator name='ZPOOL_PROP_EXPANDSZ' value='21'/>
+ <enumerator name='ZPOOL_PROP_FREEING' value='22'/>
+ <enumerator name='ZPOOL_PROP_FRAGMENTATION' value='23'/>
+ <enumerator name='ZPOOL_PROP_LEAKED' value='24'/>
+ <enumerator name='ZPOOL_PROP_MAXBLOCKSIZE' value='25'/>
+ <enumerator name='ZPOOL_PROP_TNAME' value='26'/>
+ <enumerator name='ZPOOL_PROP_MAXDNODESIZE' value='27'/>
+ <enumerator name='ZPOOL_PROP_MULTIHOST' value='28'/>
+ <enumerator name='ZPOOL_PROP_CHECKPOINT' value='29'/>
+ <enumerator name='ZPOOL_PROP_LOAD_GUID' value='30'/>
+ <enumerator name='ZPOOL_PROP_AUTOTRIM' value='31'/>
+ <enumerator name='ZPOOL_PROP_COMPATIBILITY' value='32'/>
+ <enumerator name='ZPOOL_NUM_PROPS' value='33'/>
+ </enum-decl>
+ <typedef-decl name='zpool_prop_t' type-id='type-id-159' id='type-id-160'/>
+ <function-decl name='zpool_get_prop' mangled-name='zpool_get_prop' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_prop'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-160' name='prop'/>
+ <parameter type-id='type-id-14' name='buf'/>
+ <parameter type-id='type-id-18' name='len'/>
+ <parameter type-id='type-id-113' name='srctype'/>
+ <parameter type-id='type-id-9' name='literal'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <class-decl name='splitflags' size-in-bits='64' is-struct='yes' visibility='default' id='type-id-345'>
- <data-member access='public' layout-offset-in-bits='31'>
- <var-decl name='dryrun' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='30'>
- <var-decl name='import' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='name_flags' type-id='type-id-8' visibility='default'/>
- </data-member>
- </class-decl>
- <typedef-decl name='splitflags_t' type-id='type-id-345' id='type-id-346'/>
- <function-decl name='zpool_vdev_split' mangled-name='zpool_vdev_split' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_split'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-17' name='newname'/>
- <parameter type-id='type-id-105' name='newroot'/>
- <parameter type-id='type-id-15' name='props'/>
- <parameter type-id='type-id-346' name='flags'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='nvlist_add_nvlist_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-107'/>
- <parameter type-id='type-id-5'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_pool_state_to_name' mangled-name='zpool_pool_state_to_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_pool_state_to_name'>
+ <parameter type-id='type-id-128' name='state'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='zpool_vdev_remove' mangled-name='zpool_vdev_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_remove'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-84' name='path'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_state_to_name' mangled-name='zpool_state_to_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_state_to_name'>
+ <parameter type-id='type-id-146' name='state'/>
+ <parameter type-id='type-id-144' name='aux'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='zpool_vdev_remove_cancel' mangled-name='zpool_vdev_remove_cancel' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_remove_cancel'>
- <parameter type-id='type-id-11' name='zhp'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_get_prop_int' mangled-name='zpool_get_prop_int' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_prop_int'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-160' name='prop'/>
+ <parameter type-id='type-id-113' name='src'/>
+ <return type-id='type-id-7'/>
</function-decl>
- <function-decl name='zpool_vdev_indirect_size' mangled-name='zpool_vdev_indirect_size' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_indirect_size'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-84' name='path'/>
- <parameter type-id='type-id-248' name='sizep'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_props_refresh' mangled-name='zpool_props_refresh' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_props_refresh'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zpool_clear' mangled-name='zpool_clear' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_clear'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-84' name='path'/>
- <parameter type-id='type-id-15' name='rewindnvl'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_get_state_str' mangled-name='zpool_get_state_str' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_state_str'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='zpool_vdev_clear' mangled-name='zpool_vdev_clear' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_clear'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-22' name='guid'/>
- <return type-id='type-id-8'/>
+ <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-161'>
+ <underlying-type type-id='type-id-41'/>
+ <enumerator name='ZPOOL_COMPATIBILITY_OK' value='0'/>
+ <enumerator name='ZPOOL_COMPATIBILITY_WARNTOKEN' value='1'/>
+ <enumerator name='ZPOOL_COMPATIBILITY_BADTOKEN' value='2'/>
+ <enumerator name='ZPOOL_COMPATIBILITY_BADFILE' value='3'/>
+ <enumerator name='ZPOOL_COMPATIBILITY_NOFILES' value='4'/>
+ </enum-decl>
+ <typedef-decl name='zpool_compat_status_t' type-id='type-id-161' id='type-id-162'/>
+ <function-decl name='zpool_load_compat' mangled-name='zpool_load_compat' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_load_compat'>
+ <parameter type-id='type-id-84' name='compat'/>
+ <parameter type-id='type-id-85' name='features'/>
+ <parameter type-id='type-id-14' name='report'/>
+ <parameter type-id='type-id-18' name='rlen'/>
+ <return type-id='type-id-162'/>
</function-decl>
- <function-decl name='zpool_reguid' mangled-name='zpool_reguid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_reguid'>
- <parameter type-id='type-id-11' name='zhp'/>
- <return type-id='type-id-8'/>
+ <function-decl name='lzc_get_bootenv' mangled-name='lzc_get_bootenv' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_reopen_one' mangled-name='zpool_reopen_one' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_reopen_one'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-7' name='data'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_standard_error_fmt' mangled-name='zpool_standard_error_fmt' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_get_handle' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-126'/>
- <return type-id='type-id-91'/>
+ <function-decl name='lzc_set_bootenv' mangled-name='lzc_set_bootenv' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_reopen' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-50'/>
- <return type-id='type-id-8'/>
+ <function-decl name='lzc_wait' mangled-name='lzc_wait' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_sync_one' mangled-name='zpool_sync_one' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_sync_one'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-7' name='data'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_standard_error' mangled-name='zpool_standard_error' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='fnvlist_add_boolean_value' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-50'/>
- <return type-id='type-id-6'/>
+ <function-decl name='zpool_history_unpack' mangled-name='zpool_history_unpack' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_sync' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-107'/>
- <return type-id='type-id-8'/>
+ <function-decl name='nvlist_add_nvlist_array' mangled-name='nvlist_add_nvlist_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_get_errlog' mangled-name='zpool_get_errlog' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_errlog'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-105' name='nverrlistp'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_basename' mangled-name='zfs_basename' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_upgrade' mangled-name='zpool_upgrade' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_upgrade'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-22' name='new_version'/>
- <return type-id='type-id-8'/>
+ <function-decl name='memcmp' mangled-name='memcmp' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_save_arguments' mangled-name='zfs_save_arguments' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_save_arguments'>
- <parameter type-id='type-id-8' name='argc'/>
- <parameter type-id='type-id-88' name='argv'/>
- <parameter type-id='type-id-17' name='string'/>
- <parameter type-id='type-id-8' name='len'/>
- <return type-id='type-id-6'/>
+ <function-decl name='__realpath_alias' mangled-name='realpath' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_log_history' mangled-name='zpool_log_history' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_log_history'>
- <parameter type-id='type-id-10' name='hdl'/>
- <parameter type-id='type-id-84' name='message'/>
- <return type-id='type-id-8'/>
+ <function-decl name='strncasecmp' mangled-name='strncasecmp' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_get_history' mangled-name='zpool_get_history' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_history'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-105' name='nvhisp'/>
- <parameter type-id='type-id-248' name='off'/>
- <parameter type-id='type-id-106' name='eof'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='zpool_history_unpack' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-17'/>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-102'/>
- <parameter type-id='type-id-267'/>
- <parameter type-id='type-id-244'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_strip_path' mangled-name='zfs_strip_path' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_events_next' mangled-name='zpool_events_next' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_events_next'>
- <parameter type-id='type-id-10' name='hdl'/>
- <parameter type-id='type-id-105' name='nvp'/>
- <parameter type-id='type-id-223' name='dropped'/>
- <parameter type-id='type-id-5' name='flags'/>
- <parameter type-id='type-id-8' name='zevent_fd'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_strip_partition' mangled-name='zfs_strip_partition' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_events_clear' mangled-name='zpool_events_clear' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_events_clear'>
- <parameter type-id='type-id-10' name='hdl'/>
- <parameter type-id='type-id-223' name='count'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_get_handle' mangled-name='zpool_get_handle' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_events_seek' mangled-name='zpool_events_seek' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_events_seek'>
- <parameter type-id='type-id-10' name='hdl'/>
- <parameter type-id='type-id-22' name='eid'/>
- <parameter type-id='type-id-8' name='zevent_fd'/>
- <return type-id='type-id-8'/>
+ <function-decl name='fnvlist_add_boolean_value' mangled-name='fnvlist_add_boolean_value' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_obj_to_path' mangled-name='zpool_obj_to_path' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_obj_to_path'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-22' name='dsobj'/>
- <parameter type-id='type-id-22' name='obj'/>
- <parameter type-id='type-id-17' name='pathname'/>
- <parameter type-id='type-id-28' name='len'/>
- <return type-id='type-id-6'/>
+ <function-decl name='lzc_sync' mangled-name='lzc_sync' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_obj_to_path_ds' mangled-name='zpool_obj_to_path_ds' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_obj_to_path_ds'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-22' name='dsobj'/>
- <parameter type-id='type-id-22' name='obj'/>
- <parameter type-id='type-id-17' name='pathname'/>
- <parameter type-id='type-id-28' name='len'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <typedef-decl name='zpool_wait_activity_t' type-id='type-id-334' id='type-id-347'/>
- <function-decl name='zpool_wait' mangled-name='zpool_wait' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_wait'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-347' name='activity'/>
- <return type-id='type-id-8'/>
+ <function-decl name='lzc_reopen' mangled-name='lzc_reopen' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_wait' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-334'/>
- <parameter type-id='type-id-294'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_get_load_policy' mangled-name='zpool_get_load_policy' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_wait_status' mangled-name='zpool_wait_status' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_wait_status'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-347' name='activity'/>
- <parameter type-id='type-id-106' name='missing'/>
- <parameter type-id='type-id-106' name='waited'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <qualified-type-def type-id='type-id-42' const='yes' id='type-id-348'/>
- <pointer-type-def type-id='type-id-348' size-in-bits='64' id='type-id-349'/>
- <function-decl name='zpool_set_bootenv' mangled-name='zpool_set_bootenv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_set_bootenv'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-349' name='envmap'/>
- <return type-id='type-id-8'/>
+ <function-decl name='fnvlist_lookup_uint64' mangled-name='fnvlist_lookup_uint64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <qualified-type-def type-id='type-id-36' const='yes' id='type-id-350'/>
- <pointer-type-def type-id='type-id-350' size-in-bits='64' id='type-id-351'/>
- <function-decl name='lzc_set_bootenv' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-351'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_prop_to_name' mangled-name='zpool_prop_to_name' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_get_bootenv' mangled-name='zpool_get_bootenv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_bootenv'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-105' name='nvlp'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfeature_lookup_guid' mangled-name='zfeature_lookup_guid' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_get_bootenv' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-107'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_resolve_shortname' mangled-name='zfs_resolve_shortname' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='strtok_r' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-17'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-88'/>
- <return type-id='type-id-17'/>
+ <function-decl name='zpool_relabel_disk' mangled-name='zpool_relabel_disk' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='mmap' mangled-name='mmap64' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-7'/>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-54'/>
- <return type-id='type-id-7'/>
+ <function-decl name='strtoull' mangled-name='strtoull' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='strchrnul' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-8'/>
- <return type-id='type-id-17'/>
+ <function-decl name='zfs_strcmp_pathname' mangled-name='zfs_strcmp_pathname' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='munmap' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-7'/>
- <parameter type-id='type-id-35'/>
- <return type-id='type-id-8'/>
+ <function-decl name='lzc_wait_tag' mangled-name='lzc_wait_tag' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_name_valid' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-76'/>
- <return type-id='type-id-8'/>
+ <function-decl name='lzc_trim' mangled-name='lzc_trim' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='libzfs_sendrecv.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libzfs' language='LANG_C99'>
- <class-decl name='sendflags' size-in-bits='544' is-struct='yes' visibility='default' id='type-id-352'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='verbosity' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='replicate' type-id='type-id-16' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='skipmissing' type-id='type-id-16' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='96'>
- <var-decl name='doall' type-id='type-id-16' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='fromorigin' type-id='type-id-16' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='160'>
- <var-decl name='pad' type-id='type-id-16' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='props' type-id='type-id-16' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='224'>
- <var-decl name='dryrun' type-id='type-id-16' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='parsable' type-id='type-id-16' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='288'>
- <var-decl name='progress' type-id='type-id-16' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='largeblock' type-id='type-id-16' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='352'>
- <var-decl name='embed_data' type-id='type-id-16' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='compress' type-id='type-id-16' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='416'>
- <var-decl name='raw' type-id='type-id-16' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='backup' type-id='type-id-16' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='480'>
- <var-decl name='holds' type-id='type-id-16' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='saved' type-id='type-id-16' visibility='default'/>
- </data-member>
- </class-decl>
- <typedef-decl name='sendflags_t' type-id='type-id-352' id='type-id-353'/>
- <pointer-type-def type-id='type-id-353' size-in-bits='64' id='type-id-354'/>
- <typedef-decl name='snapfilter_cb_t' type-id='type-id-355' id='type-id-356'/>
- <pointer-type-def type-id='type-id-356' size-in-bits='64' id='type-id-357'/>
- <function-decl name='zfs_send' mangled-name='zfs_send' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_send'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-84' name='fromsnap'/>
- <parameter type-id='type-id-84' name='tosnap'/>
- <parameter type-id='type-id-354' name='flags'/>
- <parameter type-id='type-id-8' name='outfd'/>
- <parameter type-id='type-id-357' name='filter_func'/>
- <parameter type-id='type-id-7' name='cb_arg'/>
- <parameter type-id='type-id-105' name='debugnvp'/>
- <return type-id='type-id-8'/>
+ <function-decl name='fnvpair_value_int64' mangled-name='fnvpair_value_int64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_send_progress' mangled-name='zfs_send_progress' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_send_progress'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-8' name='fd'/>
- <parameter type-id='type-id-248' name='bytes_written'/>
- <parameter type-id='type-id-248' name='blocks_visited'/>
- <return type-id='type-id-8'/>
+ <function-decl name='lzc_initialize' mangled-name='lzc_initialize' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_send_resume_token_to_nvlist' mangled-name='zfs_send_resume_token_to_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_send_resume_token_to_nvlist'>
- <parameter type-id='type-id-10' name='hdl'/>
- <parameter type-id='type-id-84' name='token'/>
- <return type-id='type-id-15'/>
+ <function-decl name='zfeature_lookup_name' mangled-name='zfeature_lookup_name' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <class-decl name='zio_cksum' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-358'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='zc_word' type-id='type-id-359' visibility='default'/>
- </data-member>
- </class-decl>
-
- <array-type-def dimensions='1' type-id='type-id-22' size-in-bits='256' id='type-id-359'>
- <subrange length='4' type-id='type-id-33' id='type-id-217'/>
-
- </array-type-def>
- <pointer-type-def type-id='type-id-358' size-in-bits='64' id='type-id-360'/>
- <function-decl name='fletcher_4_native_varsize' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-7'/>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-360'/>
- <return type-id='type-id-6'/>
+ <function-decl name='lzc_pool_checkpoint_discard' mangled-name='lzc_pool_checkpoint_discard' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='uncompress' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-102'/>
- <parameter type-id='type-id-165'/>
- <parameter type-id='type-id-35'/>
- <return type-id='type-id-8'/>
+ <function-decl name='lzc_pool_checkpoint' mangled-name='lzc_pool_checkpoint' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_send_resume' mangled-name='zfs_send_resume' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_send_resume'>
- <parameter type-id='type-id-10' name='hdl'/>
- <parameter type-id='type-id-354' name='flags'/>
- <parameter type-id='type-id-8' name='outfd'/>
- <parameter type-id='type-id-84' name='resume_token'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='nvlist_print' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-150'/>
- <parameter type-id='type-id-104'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <enum-decl name='lzc_send_flags' id='type-id-361'>
- <underlying-type type-id='type-id-49'/>
- <enumerator name='LZC_SEND_FLAG_EMBED_DATA' value='1'/>
- <enumerator name='LZC_SEND_FLAG_LARGE_BLOCK' value='2'/>
- <enumerator name='LZC_SEND_FLAG_COMPRESS' value='4'/>
- <enumerator name='LZC_SEND_FLAG_RAW' value='8'/>
- <enumerator name='LZC_SEND_FLAG_SAVED' value='16'/>
- </enum-decl>
- <function-decl name='lzc_send_resume_redacted' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-361'/>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-8'/>
+ <function-decl name='nvlist_add_uint8_array' mangled-name='nvlist_add_uint8_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_send_saved' mangled-name='zfs_send_saved' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_send_saved'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-354' name='flags'/>
- <parameter type-id='type-id-8' name='outfd'/>
- <parameter type-id='type-id-84' name='resume_token'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_refresh_stats' mangled-name='zpool_refresh_stats' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_get_pool_handle' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-97'/>
- <return type-id='type-id-126'/>
+ <function-decl name='pool_namecheck' mangled-name='pool_namecheck' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_hold_nvl' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-82'/>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-104'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_prop_feature' mangled-name='zpool_prop_feature' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='fletcher_4_incremental_native' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-7'/>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-7'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfeature_is_supported' mangled-name='zfeature_is_supported' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='write' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-7'/>
- <parameter type-id='type-id-35'/>
- <return type-id='type-id-54'/>
+ <function-decl name='zfs_name_valid' mangled-name='zfs_name_valid' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='fnvlist_size' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <return type-id='type-id-35'/>
+ <function-decl name='zpool_name_to_prop' mangled-name='zpool_name_to_prop' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_lookup_boolean' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_prop_readonly' mangled-name='zpool_prop_readonly' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_send_one' mangled-name='zfs_send_one' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_send_one'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-84' name='from'/>
- <parameter type-id='type-id-8' name='fd'/>
- <parameter type-id='type-id-354' name='flags'/>
- <parameter type-id='type-id-84' name='redactbook'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_prop_setonce' mangled-name='zpool_prop_setonce' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='get_system_hostid' mangled-name='get_system_hostid' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zpool_prop_get_type' mangled-name='zpool_prop_get_type' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zpool_prop_index_to_string' mangled-name='zpool_prop_index_to_string' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zpool_prop_default_numeric' mangled-name='zpool_prop_default_numeric' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_send_redacted' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-361'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_prop_default_string' mangled-name='zpool_prop_default_string' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_send_space_resume_redacted' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-361'/>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-102'/>
- <return type-id='type-id-8'/>
+ <function-decl name='strtok_r' mangled-name='strtok_r' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='sleep' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-5'/>
- <return type-id='type-id-5'/>
+ <function-decl name='mmap' mangled-name='mmap64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='time' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-242'/>
- <return type-id='type-id-54'/>
+ <function-decl name='munmap' mangled-name='munmap' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='localtime' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-238'/>
- <return type-id='type-id-236'/>
+ <function-decl name='zpool_get_status' mangled-name='zpool_get_status' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <class-decl name='recvflags' size-in-bits='416' is-struct='yes' visibility='default' id='type-id-362'>
+ <function-decl name='fnvlist_add_int64' mangled-name='fnvlist_add_int64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='libzfs_sendrecv.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
+ <class-decl name='recvflags' size-in-bits='416' is-struct='yes' visibility='default' id='type-id-163'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='verbose' type-id='type-id-16' visibility='default'/>
+ <var-decl name='verbose' type-id='type-id-9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='isprefix' type-id='type-id-16' visibility='default'/>
+ <var-decl name='isprefix' type-id='type-id-9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='istail' type-id='type-id-16' visibility='default'/>
+ <var-decl name='istail' type-id='type-id-9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='96'>
- <var-decl name='dryrun' type-id='type-id-16' visibility='default'/>
+ <var-decl name='dryrun' type-id='type-id-9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='force' type-id='type-id-16' visibility='default'/>
+ <var-decl name='force' type-id='type-id-9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='160'>
- <var-decl name='canmountoff' type-id='type-id-16' visibility='default'/>
+ <var-decl name='canmountoff' type-id='type-id-9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='resumable' type-id='type-id-16' visibility='default'/>
+ <var-decl name='resumable' type-id='type-id-9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='224'>
- <var-decl name='byteswap' type-id='type-id-16' visibility='default'/>
+ <var-decl name='byteswap' type-id='type-id-9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='nomount' type-id='type-id-16' visibility='default'/>
+ <var-decl name='nomount' type-id='type-id-9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='288'>
- <var-decl name='holds' type-id='type-id-16' visibility='default'/>
+ <var-decl name='holds' type-id='type-id-9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='skipholds' type-id='type-id-16' visibility='default'/>
+ <var-decl name='skipholds' type-id='type-id-9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='352'>
- <var-decl name='domount' type-id='type-id-16' visibility='default'/>
+ <var-decl name='domount' type-id='type-id-9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='forceunmount' type-id='type-id-16' visibility='default'/>
+ <var-decl name='forceunmount' type-id='type-id-9' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='recvflags_t' type-id='type-id-362' id='type-id-363'/>
- <pointer-type-def type-id='type-id-363' size-in-bits='64' id='type-id-364'/>
- <pointer-type-def type-id='type-id-25' size-in-bits='64' id='type-id-365'/>
+ <typedef-decl name='recvflags_t' type-id='type-id-163' id='type-id-164'/>
+ <pointer-type-def type-id='type-id-164' size-in-bits='64' id='type-id-165'/>
+ <pointer-type-def type-id='type-id-11' size-in-bits='64' id='type-id-166'/>
<function-decl name='zfs_receive' mangled-name='zfs_receive' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_receive'>
- <parameter type-id='type-id-10' name='hdl'/>
+ <parameter type-id='type-id-16' name='hdl'/>
<parameter type-id='type-id-84' name='tosnap'/>
- <parameter type-id='type-id-15' name='props'/>
- <parameter type-id='type-id-364' name='flags'/>
- <parameter type-id='type-id-8' name='infd'/>
- <parameter type-id='type-id-365' name='stream_avl'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='libzfs_set_pipe_max' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-8'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <function-decl name='perror' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <function-decl name='fletcher_4_incremental_byteswap' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-7'/>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-7'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='fnvlist_lookup_uint64_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-244'/>
- <return type-id='type-id-102'/>
- </function-decl>
- <function-decl name='fnvlist_lookup_boolean_value' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-50'/>
- </function-decl>
- <function-decl name='zfs_iter_snapshots_sorted' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-82'/>
- <parameter type-id='type-id-95'/>
- <parameter type-id='type-id-7'/>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-35'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='zfs_get_recvd_props' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-82'/>
- <return type-id='type-id-104'/>
- </function-decl>
- <function-decl name='lzc_send_space' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-361'/>
- <parameter type-id='type-id-102'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='fnvlist_merge' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-104'/>
- <return type-id='type-id-6'/>
+ <parameter type-id='type-id-19' name='props'/>
+ <parameter type-id='type-id-165' name='flags'/>
+ <parameter type-id='type-id-2' name='infd'/>
+ <parameter type-id='type-id-166' name='stream_avl'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <class-decl name='dmu_replay_record' size-in-bits='2496' is-struct='yes' visibility='default' id='type-id-366'>
+ <class-decl name='sendflags' size-in-bits='544' is-struct='yes' visibility='default' id='type-id-167'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='drr_type' type-id='type-id-367' visibility='default'/>
+ <var-decl name='verbosity' type-id='type-id-2' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='drr_payloadlen' type-id='type-id-38' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='drr_u' type-id='type-id-368' visibility='default'/>
- </data-member>
- </class-decl>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-367'>
- <underlying-type type-id='type-id-49'/>
- <enumerator name='DRR_BEGIN' value='0'/>
- <enumerator name='DRR_OBJECT' value='1'/>
- <enumerator name='DRR_FREEOBJECTS' value='2'/>
- <enumerator name='DRR_WRITE' value='3'/>
- <enumerator name='DRR_FREE' value='4'/>
- <enumerator name='DRR_END' value='5'/>
- <enumerator name='DRR_WRITE_BYREF' value='6'/>
- <enumerator name='DRR_SPILL' value='7'/>
- <enumerator name='DRR_WRITE_EMBEDDED' value='8'/>
- <enumerator name='DRR_OBJECT_RANGE' value='9'/>
- <enumerator name='DRR_REDACT' value='10'/>
- <enumerator name='DRR_NUMTYPES' value='11'/>
- </enum-decl>
- <union-decl name='__anonymous_union__' size-in-bits='2432' is-anonymous='yes' visibility='default' id='type-id-368'>
- <data-member access='private'>
- <var-decl name='drr_begin' type-id='type-id-112' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='drr_end' type-id='type-id-369' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='drr_object' type-id='type-id-370' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='drr_freeobjects' type-id='type-id-371' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='drr_write' type-id='type-id-372' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='drr_free' type-id='type-id-373' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='drr_write_byref' type-id='type-id-374' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='drr_spill' type-id='type-id-375' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='drr_write_embedded' type-id='type-id-376' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='drr_object_range' type-id='type-id-377' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='drr_redact' type-id='type-id-378' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='drr_checksum' type-id='type-id-379' visibility='default'/>
- </data-member>
- </union-decl>
- <class-decl name='drr_end' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-369'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='drr_checksum' type-id='type-id-380' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='drr_toguid' type-id='type-id-22' visibility='default'/>
- </data-member>
- </class-decl>
- <typedef-decl name='zio_cksum_t' type-id='type-id-358' id='type-id-380'/>
- <class-decl name='drr_object' size-in-bits='448' is-struct='yes' visibility='default' id='type-id-370'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='drr_object' type-id='type-id-22' visibility='default'/>
+ <var-decl name='replicate' type-id='type-id-9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='drr_type' type-id='type-id-381' visibility='default'/>
+ <var-decl name='skipmissing' type-id='type-id-9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='96'>
- <var-decl name='drr_bonustype' type-id='type-id-381' visibility='default'/>
+ <var-decl name='doall' type-id='type-id-9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='drr_blksz' type-id='type-id-38' visibility='default'/>
+ <var-decl name='fromorigin' type-id='type-id-9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='160'>
- <var-decl name='drr_bonuslen' type-id='type-id-38' visibility='default'/>
+ <var-decl name='pad' type-id='type-id-9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='drr_checksumtype' type-id='type-id-79' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='200'>
- <var-decl name='drr_compress' type-id='type-id-79' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='208'>
- <var-decl name='drr_dn_slots' type-id='type-id-79' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='216'>
- <var-decl name='drr_flags' type-id='type-id-79' visibility='default'/>
+ <var-decl name='props' type-id='type-id-9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='224'>
- <var-decl name='drr_raw_bonuslen' type-id='type-id-38' visibility='default'/>
+ <var-decl name='dryrun' type-id='type-id-9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='drr_toguid' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='drr_indblkshift' type-id='type-id-79' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='328'>
- <var-decl name='drr_nlevels' type-id='type-id-79' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='336'>
- <var-decl name='drr_nblkptr' type-id='type-id-79' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='344'>
- <var-decl name='drr_pad' type-id='type-id-382' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='drr_maxblkid' type-id='type-id-22' visibility='default'/>
- </data-member>
- </class-decl>
- <enum-decl name='dmu_object_type' id='type-id-383'>
- <underlying-type type-id='type-id-49'/>
- <enumerator name='DMU_OT_NONE' value='0'/>
- <enumerator name='DMU_OT_OBJECT_DIRECTORY' value='1'/>
- <enumerator name='DMU_OT_OBJECT_ARRAY' value='2'/>
- <enumerator name='DMU_OT_PACKED_NVLIST' value='3'/>
- <enumerator name='DMU_OT_PACKED_NVLIST_SIZE' value='4'/>
- <enumerator name='DMU_OT_BPOBJ' value='5'/>
- <enumerator name='DMU_OT_BPOBJ_HDR' value='6'/>
- <enumerator name='DMU_OT_SPACE_MAP_HEADER' value='7'/>
- <enumerator name='DMU_OT_SPACE_MAP' value='8'/>
- <enumerator name='DMU_OT_INTENT_LOG' value='9'/>
- <enumerator name='DMU_OT_DNODE' value='10'/>
- <enumerator name='DMU_OT_OBJSET' value='11'/>
- <enumerator name='DMU_OT_DSL_DIR' value='12'/>
- <enumerator name='DMU_OT_DSL_DIR_CHILD_MAP' value='13'/>
- <enumerator name='DMU_OT_DSL_DS_SNAP_MAP' value='14'/>
- <enumerator name='DMU_OT_DSL_PROPS' value='15'/>
- <enumerator name='DMU_OT_DSL_DATASET' value='16'/>
- <enumerator name='DMU_OT_ZNODE' value='17'/>
- <enumerator name='DMU_OT_OLDACL' value='18'/>
- <enumerator name='DMU_OT_PLAIN_FILE_CONTENTS' value='19'/>
- <enumerator name='DMU_OT_DIRECTORY_CONTENTS' value='20'/>
- <enumerator name='DMU_OT_MASTER_NODE' value='21'/>
- <enumerator name='DMU_OT_UNLINKED_SET' value='22'/>
- <enumerator name='DMU_OT_ZVOL' value='23'/>
- <enumerator name='DMU_OT_ZVOL_PROP' value='24'/>
- <enumerator name='DMU_OT_PLAIN_OTHER' value='25'/>
- <enumerator name='DMU_OT_UINT64_OTHER' value='26'/>
- <enumerator name='DMU_OT_ZAP_OTHER' value='27'/>
- <enumerator name='DMU_OT_ERROR_LOG' value='28'/>
- <enumerator name='DMU_OT_SPA_HISTORY' value='29'/>
- <enumerator name='DMU_OT_SPA_HISTORY_OFFSETS' value='30'/>
- <enumerator name='DMU_OT_POOL_PROPS' value='31'/>
- <enumerator name='DMU_OT_DSL_PERMS' value='32'/>
- <enumerator name='DMU_OT_ACL' value='33'/>
- <enumerator name='DMU_OT_SYSACL' value='34'/>
- <enumerator name='DMU_OT_FUID' value='35'/>
- <enumerator name='DMU_OT_FUID_SIZE' value='36'/>
- <enumerator name='DMU_OT_NEXT_CLONES' value='37'/>
- <enumerator name='DMU_OT_SCAN_QUEUE' value='38'/>
- <enumerator name='DMU_OT_USERGROUP_USED' value='39'/>
- <enumerator name='DMU_OT_USERGROUP_QUOTA' value='40'/>
- <enumerator name='DMU_OT_USERREFS' value='41'/>
- <enumerator name='DMU_OT_DDT_ZAP' value='42'/>
- <enumerator name='DMU_OT_DDT_STATS' value='43'/>
- <enumerator name='DMU_OT_SA' value='44'/>
- <enumerator name='DMU_OT_SA_MASTER_NODE' value='45'/>
- <enumerator name='DMU_OT_SA_ATTR_REGISTRATION' value='46'/>
- <enumerator name='DMU_OT_SA_ATTR_LAYOUTS' value='47'/>
- <enumerator name='DMU_OT_SCAN_XLATE' value='48'/>
- <enumerator name='DMU_OT_DEDUP' value='49'/>
- <enumerator name='DMU_OT_DEADLIST' value='50'/>
- <enumerator name='DMU_OT_DEADLIST_HDR' value='51'/>
- <enumerator name='DMU_OT_DSL_CLONES' value='52'/>
- <enumerator name='DMU_OT_BPOBJ_SUBOBJ' value='53'/>
- <enumerator name='DMU_OT_NUMTYPES' value='54'/>
- <enumerator name='DMU_OTN_UINT8_DATA' value='128'/>
- <enumerator name='DMU_OTN_UINT8_METADATA' value='192'/>
- <enumerator name='DMU_OTN_UINT16_DATA' value='129'/>
- <enumerator name='DMU_OTN_UINT16_METADATA' value='193'/>
- <enumerator name='DMU_OTN_UINT32_DATA' value='130'/>
- <enumerator name='DMU_OTN_UINT32_METADATA' value='194'/>
- <enumerator name='DMU_OTN_UINT64_DATA' value='131'/>
- <enumerator name='DMU_OTN_UINT64_METADATA' value='195'/>
- <enumerator name='DMU_OTN_ZAP_DATA' value='132'/>
- <enumerator name='DMU_OTN_ZAP_METADATA' value='196'/>
- <enumerator name='DMU_OTN_UINT8_ENC_DATA' value='160'/>
- <enumerator name='DMU_OTN_UINT8_ENC_METADATA' value='224'/>
- <enumerator name='DMU_OTN_UINT16_ENC_DATA' value='161'/>
- <enumerator name='DMU_OTN_UINT16_ENC_METADATA' value='225'/>
- <enumerator name='DMU_OTN_UINT32_ENC_DATA' value='162'/>
- <enumerator name='DMU_OTN_UINT32_ENC_METADATA' value='226'/>
- <enumerator name='DMU_OTN_UINT64_ENC_DATA' value='163'/>
- <enumerator name='DMU_OTN_UINT64_ENC_METADATA' value='227'/>
- <enumerator name='DMU_OTN_ZAP_ENC_DATA' value='164'/>
- <enumerator name='DMU_OTN_ZAP_ENC_METADATA' value='228'/>
- </enum-decl>
- <typedef-decl name='dmu_object_type_t' type-id='type-id-383' id='type-id-381'/>
-
- <array-type-def dimensions='1' type-id='type-id-79' size-in-bits='40' id='type-id-382'>
- <subrange length='5' type-id='type-id-33' id='type-id-384'/>
-
- </array-type-def>
- <class-decl name='drr_freeobjects' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-371'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='drr_firstobj' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='drr_numobjs' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='drr_toguid' type-id='type-id-22' visibility='default'/>
- </data-member>
- </class-decl>
- <class-decl name='drr_write' size-in-bits='1088' is-struct='yes' visibility='default' id='type-id-372'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='drr_object' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='drr_type' type-id='type-id-381' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='96'>
- <var-decl name='drr_pad' type-id='type-id-38' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='drr_offset' type-id='type-id-22' visibility='default'/>
+ <var-decl name='parsable' type-id='type-id-9' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='drr_logical_size' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='drr_toguid' type-id='type-id-22' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='288'>
+ <var-decl name='progress' type-id='type-id-9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='drr_checksumtype' type-id='type-id-79' visibility='default'/>
+ <var-decl name='largeblock' type-id='type-id-9' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='328'>
- <var-decl name='drr_flags' type-id='type-id-79' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='336'>
- <var-decl name='drr_compressiontype' type-id='type-id-79' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='344'>
- <var-decl name='drr_pad2' type-id='type-id-382' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='352'>
+ <var-decl name='embed_data' type-id='type-id-9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='drr_key' type-id='type-id-385' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='704'>
- <var-decl name='drr_compressed_size' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='768'>
- <var-decl name='drr_salt' type-id='type-id-386' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='832'>
- <var-decl name='drr_iv' type-id='type-id-387' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='928'>
- <var-decl name='drr_mac' type-id='type-id-388' visibility='default'/>
- </data-member>
- </class-decl>
- <class-decl name='ddt_key' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-389'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='ddk_cksum' type-id='type-id-380' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='ddk_prop' type-id='type-id-22' visibility='default'/>
+ <var-decl name='compress' type-id='type-id-9' visibility='default'/>
</data-member>
- </class-decl>
- <typedef-decl name='ddt_key_t' type-id='type-id-389' id='type-id-385'/>
-
- <array-type-def dimensions='1' type-id='type-id-79' size-in-bits='64' id='type-id-386'>
- <subrange length='8' type-id='type-id-33' id='type-id-390'/>
-
- </array-type-def>
-
- <array-type-def dimensions='1' type-id='type-id-79' size-in-bits='96' id='type-id-387'>
- <subrange length='12' type-id='type-id-33' id='type-id-391'/>
-
- </array-type-def>
-
- <array-type-def dimensions='1' type-id='type-id-79' size-in-bits='128' id='type-id-388'>
- <subrange length='16' type-id='type-id-33' id='type-id-169'/>
-
- </array-type-def>
- <class-decl name='drr_free' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-373'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='drr_object' type-id='type-id-22' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='416'>
+ <var-decl name='raw' type-id='type-id-9' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='drr_offset' type-id='type-id-22' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='448'>
+ <var-decl name='backup' type-id='type-id-9' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='drr_length' type-id='type-id-22' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='480'>
+ <var-decl name='holds' type-id='type-id-9' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='drr_toguid' type-id='type-id-22' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='512'>
+ <var-decl name='saved' type-id='type-id-9' visibility='default'/>
</data-member>
</class-decl>
- <class-decl name='drr_write_byref' size-in-bits='832' is-struct='yes' visibility='default' id='type-id-374'>
+ <typedef-decl name='sendflags_t' type-id='type-id-167' id='type-id-168'/>
+ <pointer-type-def type-id='type-id-168' size-in-bits='64' id='type-id-169'/>
+ <function-decl name='zfs_send_one' mangled-name='zfs_send_one' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_send_one'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-84' name='from'/>
+ <parameter type-id='type-id-2' name='fd'/>
+ <parameter type-id='type-id-169' name='flags'/>
+ <parameter type-id='type-id-84' name='redactbook'/>
+ <return type-id='type-id-2'/>
+ </function-decl>
+ <typedef-decl name='snapfilter_cb_t' type-id='type-id-170' id='type-id-171'/>
+ <pointer-type-def type-id='type-id-171' size-in-bits='64' id='type-id-172'/>
+ <function-decl name='zfs_send' mangled-name='zfs_send' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_send'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-84' name='fromsnap'/>
+ <parameter type-id='type-id-84' name='tosnap'/>
+ <parameter type-id='type-id-169' name='flags'/>
+ <parameter type-id='type-id-2' name='outfd'/>
+ <parameter type-id='type-id-172' name='filter_func'/>
+ <parameter type-id='type-id-13' name='cb_arg'/>
+ <parameter type-id='type-id-86' name='debugnvp'/>
+ <return type-id='type-id-2'/>
+ </function-decl>
+ <function-decl name='zfs_send_saved' mangled-name='zfs_send_saved' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_send_saved'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-169' name='flags'/>
+ <parameter type-id='type-id-2' name='outfd'/>
+ <parameter type-id='type-id-84' name='resume_token'/>
+ <return type-id='type-id-2'/>
+ </function-decl>
+ <function-decl name='zfs_send_resume' mangled-name='zfs_send_resume' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_send_resume'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <parameter type-id='type-id-169' name='flags'/>
+ <parameter type-id='type-id-2' name='outfd'/>
+ <parameter type-id='type-id-84' name='resume_token'/>
+ <return type-id='type-id-2'/>
+ </function-decl>
+ <function-decl name='zfs_send_resume_token_to_nvlist' mangled-name='zfs_send_resume_token_to_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_send_resume_token_to_nvlist'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <parameter type-id='type-id-84' name='token'/>
+ <return type-id='type-id-19'/>
+ </function-decl>
+ <function-decl name='zfs_send_progress' mangled-name='zfs_send_progress' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_send_progress'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-2' name='fd'/>
+ <parameter type-id='type-id-108' name='bytes_written'/>
+ <parameter type-id='type-id-108' name='blocks_visited'/>
+ <return type-id='type-id-2'/>
+ </function-decl>
+ <function-decl name='libzfs_set_pipe_max' mangled-name='libzfs_set_pipe_max' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='perror' mangled-name='perror' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zfs_prop_set' mangled-name='zfs_prop_set' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_boolean' mangled-name='nvlist_lookup_boolean' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='fletcher_4_incremental_byteswap' mangled-name='fletcher_4_incremental_byteswap' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='__builtin___strcat_chk' mangled-name='__strcat_chk' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='fnvlist_merge' mangled-name='fnvlist_merge' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='create_parents' mangled-name='create_parents' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='nvpair_value_int32' mangled-name='nvpair_value_int32' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='time' mangled-name='time' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='fnvlist_add_nvpair' mangled-name='fnvlist_add_nvpair' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='fnvlist_remove' mangled-name='fnvlist_remove' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='lzc_receive_with_cmdprops' mangled-name='lzc_receive_with_cmdprops' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='__builtin___sprintf_chk' mangled-name='__sprintf_chk' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='__builtin_puts' mangled-name='puts' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='fnvlist_lookup_uint64_array' mangled-name='fnvlist_lookup_uint64_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='fletcher_4_incremental_native' mangled-name='fletcher_4_incremental_native' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='lzc_send_redacted' mangled-name='lzc_send_redacted' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zfs_hold_nvl' mangled-name='zfs_hold_nvl' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zfs_get_pool_handle' mangled-name='zfs_get_pool_handle' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='fnvlist_size' mangled-name='fnvlist_size' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='write' mangled-name='write' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='fnvlist_lookup_boolean_value' mangled-name='fnvlist_lookup_boolean_value' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='strndup' mangled-name='strndup' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='lzc_send_resume_redacted' mangled-name='lzc_send_resume_redacted' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='nvlist_print' mangled-name='nvlist_print' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='lzc_send_space_resume_redacted' mangled-name='lzc_send_space_resume_redacted' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='fletcher_4_native_varsize' mangled-name='fletcher_4_native_varsize' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='uncompress' mangled-name='uncompress' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zfs_iter_snapshots_sorted' mangled-name='zfs_iter_snapshots_sorted' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='lzc_send_space' mangled-name='lzc_send_space' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='sleep' mangled-name='sleep' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='localtime' mangled-name='localtime' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zfs_get_recvd_props' mangled-name='zfs_get_recvd_props' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='lzc_rename' mangled-name='lzc_rename' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-type size-in-bits='64' id='type-id-170'>
+ <parameter type-id='type-id-76'/>
+ <parameter type-id='type-id-13'/>
+ <return type-id='type-id-9'/>
+ </function-type>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='libzfs_status.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
+ <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-173'>
+ <underlying-type type-id='type-id-41'/>
+ <enumerator name='ZPOOL_STATUS_CORRUPT_CACHE' value='0'/>
+ <enumerator name='ZPOOL_STATUS_MISSING_DEV_R' value='1'/>
+ <enumerator name='ZPOOL_STATUS_MISSING_DEV_NR' value='2'/>
+ <enumerator name='ZPOOL_STATUS_CORRUPT_LABEL_R' value='3'/>
+ <enumerator name='ZPOOL_STATUS_CORRUPT_LABEL_NR' value='4'/>
+ <enumerator name='ZPOOL_STATUS_BAD_GUID_SUM' value='5'/>
+ <enumerator name='ZPOOL_STATUS_CORRUPT_POOL' value='6'/>
+ <enumerator name='ZPOOL_STATUS_CORRUPT_DATA' value='7'/>
+ <enumerator name='ZPOOL_STATUS_FAILING_DEV' value='8'/>
+ <enumerator name='ZPOOL_STATUS_VERSION_NEWER' value='9'/>
+ <enumerator name='ZPOOL_STATUS_HOSTID_MISMATCH' value='10'/>
+ <enumerator name='ZPOOL_STATUS_HOSTID_ACTIVE' value='11'/>
+ <enumerator name='ZPOOL_STATUS_HOSTID_REQUIRED' value='12'/>
+ <enumerator name='ZPOOL_STATUS_IO_FAILURE_WAIT' value='13'/>
+ <enumerator name='ZPOOL_STATUS_IO_FAILURE_CONTINUE' value='14'/>
+ <enumerator name='ZPOOL_STATUS_IO_FAILURE_MMP' value='15'/>
+ <enumerator name='ZPOOL_STATUS_BAD_LOG' value='16'/>
+ <enumerator name='ZPOOL_STATUS_ERRATA' value='17'/>
+ <enumerator name='ZPOOL_STATUS_UNSUP_FEAT_READ' value='18'/>
+ <enumerator name='ZPOOL_STATUS_UNSUP_FEAT_WRITE' value='19'/>
+ <enumerator name='ZPOOL_STATUS_FAULTED_DEV_R' value='20'/>
+ <enumerator name='ZPOOL_STATUS_FAULTED_DEV_NR' value='21'/>
+ <enumerator name='ZPOOL_STATUS_VERSION_OLDER' value='22'/>
+ <enumerator name='ZPOOL_STATUS_FEAT_DISABLED' value='23'/>
+ <enumerator name='ZPOOL_STATUS_RESILVERING' value='24'/>
+ <enumerator name='ZPOOL_STATUS_OFFLINE_DEV' value='25'/>
+ <enumerator name='ZPOOL_STATUS_REMOVED_DEV' value='26'/>
+ <enumerator name='ZPOOL_STATUS_REBUILDING' value='27'/>
+ <enumerator name='ZPOOL_STATUS_REBUILD_SCRUB' value='28'/>
+ <enumerator name='ZPOOL_STATUS_NON_NATIVE_ASHIFT' value='29'/>
+ <enumerator name='ZPOOL_STATUS_COMPATIBILITY_ERR' value='30'/>
+ <enumerator name='ZPOOL_STATUS_INCOMPATIBLE_FEAT' value='31'/>
+ <enumerator name='ZPOOL_STATUS_OK' value='32'/>
+ </enum-decl>
+ <typedef-decl name='zpool_status_t' type-id='type-id-173' id='type-id-174'/>
+ <enum-decl name='zpool_errata' id='type-id-175'>
+ <underlying-type type-id='type-id-41'/>
+ <enumerator name='ZPOOL_ERRATA_NONE' value='0'/>
+ <enumerator name='ZPOOL_ERRATA_ZOL_2094_SCRUB' value='1'/>
+ <enumerator name='ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY' value='2'/>
+ <enumerator name='ZPOOL_ERRATA_ZOL_6845_ENCRYPTION' value='3'/>
+ <enumerator name='ZPOOL_ERRATA_ZOL_8308_ENCRYPTION' value='4'/>
+ </enum-decl>
+ <typedef-decl name='zpool_errata_t' type-id='type-id-175' id='type-id-176'/>
+ <pointer-type-def type-id='type-id-176' size-in-bits='64' id='type-id-177'/>
+ <function-decl name='zpool_import_status' mangled-name='zpool_import_status' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_import_status'>
+ <parameter type-id='type-id-19' name='config'/>
+ <parameter type-id='type-id-117' name='msgid'/>
+ <parameter type-id='type-id-177' name='errata'/>
+ <return type-id='type-id-174'/>
+ </function-decl>
+ <function-decl name='zpool_get_status' mangled-name='zpool_get_status' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_status'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-117' name='msgid'/>
+ <parameter type-id='type-id-177' name='errata'/>
+ <return type-id='type-id-174'/>
+ </function-decl>
+ <function-decl name='zpool_load_compat' mangled-name='zpool_load_compat' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='libzfs_util.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
+ <function-decl name='printf_color' mangled-name='printf_color' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='printf_color'>
+ <parameter type-id='type-id-14' name='color'/>
+ <parameter type-id='type-id-14' name='format'/>
+ <parameter is-variadic='yes'/>
+ <return type-id='type-id-2'/>
+ </function-decl>
+ <function-decl name='zfs_version_print' mangled-name='zfs_version_print' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_version_print'>
+ <return type-id='type-id-2'/>
+ </function-decl>
+ <function-decl name='zfs_version_userland' mangled-name='zfs_version_userland' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_version_userland'>
+ <parameter type-id='type-id-14' name='version'/>
+ <parameter type-id='type-id-2' name='len'/>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <pointer-type-def type-id='type-id-178' size-in-bits='64' id='type-id-179'/>
+ <typedef-decl name='zprop_func' type-id='type-id-179' id='type-id-180'/>
+ <function-decl name='zprop_iter' mangled-name='zprop_iter' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_iter'>
+ <parameter type-id='type-id-180' name='func'/>
+ <parameter type-id='type-id-13' name='cb'/>
+ <parameter type-id='type-id-9' name='show_all'/>
+ <parameter type-id='type-id-9' name='ordered'/>
+ <parameter type-id='type-id-66' name='type'/>
+ <return type-id='type-id-2'/>
+ </function-decl>
+ <function-decl name='zprop_free_list' mangled-name='zprop_free_list' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_free_list'>
+ <parameter type-id='type-id-102' name='pl'/>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zprop_get_list' mangled-name='zprop_get_list' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_get_list'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <parameter type-id='type-id-14' name='props'/>
+ <parameter type-id='type-id-103' name='listp'/>
+ <parameter type-id='type-id-66' name='type'/>
+ <return type-id='type-id-2'/>
+ </function-decl>
+ <class-decl name='zprop_get_cbdata' size-in-bits='640' is-struct='yes' visibility='default' id='type-id-181'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='drr_object' type-id='type-id-22' visibility='default'/>
+ <var-decl name='cb_sources' type-id='type-id-2' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='drr_offset' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='drr_length' type-id='type-id-22' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='32'>
+ <var-decl name='cb_columns' type-id='type-id-182' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='drr_toguid' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='drr_refguid' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='drr_refobject' type-id='type-id-22' visibility='default'/>
+ <var-decl name='cb_colwidths' type-id='type-id-183' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='drr_refoffset' type-id='type-id-22' visibility='default'/>
+ <var-decl name='cb_scripted' type-id='type-id-9' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='drr_checksumtype' type-id='type-id-79' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='456'>
- <var-decl name='drr_flags' type-id='type-id-79' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='416'>
+ <var-decl name='cb_literal' type-id='type-id-9' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='464'>
- <var-decl name='drr_pad2' type-id='type-id-392' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='448'>
+ <var-decl name='cb_first' type-id='type-id-9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='drr_key' type-id='type-id-385' visibility='default'/>
+ <var-decl name='cb_proplist' type-id='type-id-102' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='576'>
+ <var-decl name='cb_type' type-id='type-id-66' visibility='default'/>
</data-member>
</class-decl>
+ <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-184'>
+ <underlying-type type-id='type-id-41'/>
+ <enumerator name='GET_COL_NONE' value='0'/>
+ <enumerator name='GET_COL_NAME' value='1'/>
+ <enumerator name='GET_COL_PROPERTY' value='2'/>
+ <enumerator name='GET_COL_VALUE' value='3'/>
+ <enumerator name='GET_COL_RECVD' value='4'/>
+ <enumerator name='GET_COL_SOURCE' value='5'/>
+ </enum-decl>
+ <typedef-decl name='zfs_get_column_t' type-id='type-id-184' id='type-id-185'/>
- <array-type-def dimensions='1' type-id='type-id-79' size-in-bits='48' id='type-id-392'>
- <subrange length='6' type-id='type-id-33' id='type-id-393'/>
+ <array-type-def dimensions='1' type-id='type-id-185' size-in-bits='160' alignment-in-bits='32' id='type-id-182'>
+ <subrange length='5' type-id='type-id-24' id='type-id-186'/>
</array-type-def>
- <class-decl name='drr_spill' size-in-bits='640' is-struct='yes' visibility='default' id='type-id-375'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='drr_object' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='drr_length' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='drr_toguid' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='drr_flags' type-id='type-id-79' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='200'>
- <var-decl name='drr_compressiontype' type-id='type-id-79' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='208'>
- <var-decl name='drr_pad' type-id='type-id-392' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='drr_compressed_size' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='drr_salt' type-id='type-id-386' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='drr_iv' type-id='type-id-387' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='480'>
- <var-decl name='drr_mac' type-id='type-id-388' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='608'>
- <var-decl name='drr_type' type-id='type-id-381' visibility='default'/>
- </data-member>
- </class-decl>
- <class-decl name='drr_write_embedded' size-in-bits='384' is-struct='yes' visibility='default' id='type-id-376'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='drr_object' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='drr_offset' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='drr_length' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='drr_toguid' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='drr_compression' type-id='type-id-79' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='264'>
- <var-decl name='drr_etype' type-id='type-id-79' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='272'>
- <var-decl name='drr_pad' type-id='type-id-392' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='drr_lsize' type-id='type-id-38' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='352'>
- <var-decl name='drr_psize' type-id='type-id-38' visibility='default'/>
- </data-member>
- </class-decl>
- <class-decl name='drr_object_range' size-in-bits='512' is-struct='yes' visibility='default' id='type-id-377'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='drr_firstobj' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='drr_numslots' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='drr_toguid' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='drr_salt' type-id='type-id-386' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='drr_iv' type-id='type-id-387' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='352'>
- <var-decl name='drr_mac' type-id='type-id-388' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='480'>
- <var-decl name='drr_flags' type-id='type-id-79' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='488'>
- <var-decl name='drr_pad' type-id='type-id-114' visibility='default'/>
- </data-member>
- </class-decl>
- <class-decl name='drr_redact' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-378'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='drr_object' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='drr_offset' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='drr_length' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='drr_toguid' type-id='type-id-22' visibility='default'/>
- </data-member>
- </class-decl>
- <class-decl name='drr_checksum' size-in-bits='2432' is-struct='yes' visibility='default' id='type-id-379'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='drr_pad' type-id='type-id-394' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='2176'>
- <var-decl name='drr_checksum' type-id='type-id-380' visibility='default'/>
- </data-member>
- </class-decl>
- <array-type-def dimensions='1' type-id='type-id-22' size-in-bits='2176' id='type-id-394'>
- <subrange length='34' type-id='type-id-33' id='type-id-395'/>
+ <array-type-def dimensions='1' type-id='type-id-2' size-in-bits='192' id='type-id-183'>
+ <subrange length='6' type-id='type-id-24' id='type-id-187'/>
</array-type-def>
- <qualified-type-def type-id='type-id-366' const='yes' id='type-id-396'/>
- <pointer-type-def type-id='type-id-396' size-in-bits='64' id='type-id-397'/>
- <function-decl name='lzc_receive_with_cmdprops' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-5'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-50'/>
- <parameter type-id='type-id-50'/>
- <parameter type-id='type-id-50'/>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-397'/>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-102'/>
- <parameter type-id='type-id-102'/>
- <parameter type-id='type-id-102'/>
- <parameter type-id='type-id-107'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='nvpair_value_int32' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-133'/>
- <parameter type-id='type-id-223'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='create_parents' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <parameter type-id='type-id-17'/>
- <parameter type-id='type-id-8'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='fnvlist_add_nvpair' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-133'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <function-decl name='fnvlist_remove' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-104'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-6'/>
+ <typedef-decl name='zprop_get_cbdata_t' type-id='type-id-181' id='type-id-188'/>
+ <pointer-type-def type-id='type-id-188' size-in-bits='64' id='type-id-189'/>
+ <function-decl name='zprop_print_one_property' mangled-name='zprop_print_one_property' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_print_one_property'>
+ <parameter type-id='type-id-84' name='name'/>
+ <parameter type-id='type-id-189' name='cbp'/>
+ <parameter type-id='type-id-84' name='propname'/>
+ <parameter type-id='type-id-84' name='value'/>
+ <parameter type-id='type-id-112' name='sourcetype'/>
+ <parameter type-id='type-id-84' name='source'/>
+ <parameter type-id='type-id-84' name='recvd_value'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_prop_set' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-82'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_path_to_zhandle' mangled-name='zfs_path_to_zhandle' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_path_to_zhandle'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <parameter type-id='type-id-84' name='path'/>
+ <parameter type-id='type-id-66' name='argtype'/>
+ <return type-id='type-id-76'/>
</function-decl>
- <function-decl name='lzc_rename' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_get_pool_handle' mangled-name='zfs_get_pool_handle' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_pool_handle'>
+ <parameter type-id='type-id-107' name='zhp'/>
+ <return type-id='type-id-4'/>
</function-decl>
- <function-type size-in-bits='64' id='type-id-355'>
- <parameter type-id='type-id-135'/>
- <parameter type-id='type-id-7'/>
+ <function-decl name='zfs_get_handle' mangled-name='zfs_get_handle' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_handle'>
+ <parameter type-id='type-id-76' name='zhp'/>
<return type-id='type-id-16'/>
- </function-type>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='libzfs_status.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libzfs' language='LANG_C99'>
- <typedef-decl name='zpool_status_t' type-id='type-id-325' id='type-id-398'/>
- <typedef-decl name='zpool_errata_t' type-id='type-id-326' id='type-id-399'/>
- <pointer-type-def type-id='type-id-399' size-in-bits='64' id='type-id-400'/>
- <function-decl name='zpool_get_status' mangled-name='zpool_get_status' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_status'>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-88' name='msgid'/>
- <parameter type-id='type-id-400' name='errata'/>
- <return type-id='type-id-398'/>
</function-decl>
- <function-decl name='zpool_load_compat' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-294'/>
- <parameter type-id='type-id-17'/>
- <parameter type-id='type-id-35'/>
- <return type-id='type-id-318'/>
+ <function-decl name='zpool_get_handle' mangled-name='zpool_get_handle' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_handle'>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <return type-id='type-id-16'/>
</function-decl>
- <function-decl name='zpool_import_status' mangled-name='zpool_import_status' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_import_status'>
- <parameter type-id='type-id-15' name='config'/>
- <parameter type-id='type-id-88' name='msgid'/>
- <parameter type-id='type-id-400' name='errata'/>
- <return type-id='type-id-398'/>
+ <function-decl name='libzfs_fini' mangled-name='libzfs_fini' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_fini'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <return type-id='type-id-1'/>
</function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='libzfs_util.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libzfs' language='LANG_C99'>
- <function-decl name='zprop_get_list' mangled-name='zprop_get_list' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_get_list'>
- <parameter type-id='type-id-10' name='hdl'/>
- <parameter type-id='type-id-17' name='props'/>
- <parameter type-id='type-id-259' name='listp'/>
- <parameter type-id='type-id-13' name='type'/>
- <return type-id='type-id-8'/>
+ <function-decl name='libzfs_init' mangled-name='libzfs_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_init'>
+ <return type-id='type-id-16'/>
</function-decl>
- <function-decl name='libzfs_errno' mangled-name='libzfs_errno' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_errno'>
- <parameter type-id='type-id-10' name='hdl'/>
- <return type-id='type-id-8'/>
+ <function-decl name='libzfs_envvar_is_set' mangled-name='libzfs_envvar_is_set' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_envvar_is_set'>
+ <parameter type-id='type-id-14' name='envvar'/>
+ <return type-id='type-id-2'/>
+ </function-decl>
+ <function-decl name='libzfs_free_str_array' mangled-name='libzfs_free_str_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_free_str_array'>
+ <parameter type-id='type-id-117' name='strs'/>
+ <parameter type-id='type-id-2' name='count'/>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <pointer-type-def type-id='type-id-117' size-in-bits='64' id='type-id-190'/>
+ <function-decl name='libzfs_run_process_get_stdout_nopath' mangled-name='libzfs_run_process_get_stdout_nopath' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_run_process_get_stdout_nopath'>
+ <parameter type-id='type-id-84' name='path'/>
+ <parameter type-id='type-id-117' name='argv'/>
+ <parameter type-id='type-id-117' name='env'/>
+ <parameter type-id='type-id-190' name='lines'/>
+ <parameter type-id='type-id-114' name='lines_cnt'/>
+ <return type-id='type-id-2'/>
+ </function-decl>
+ <function-decl name='libzfs_run_process_get_stdout' mangled-name='libzfs_run_process_get_stdout' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_run_process_get_stdout'>
+ <parameter type-id='type-id-84' name='path'/>
+ <parameter type-id='type-id-117' name='argv'/>
+ <parameter type-id='type-id-117' name='env'/>
+ <parameter type-id='type-id-190' name='lines'/>
+ <parameter type-id='type-id-114' name='lines_cnt'/>
+ <return type-id='type-id-2'/>
+ </function-decl>
+ <function-decl name='libzfs_run_process' mangled-name='libzfs_run_process' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_run_process'>
+ <parameter type-id='type-id-84' name='path'/>
+ <parameter type-id='type-id-117' name='argv'/>
+ <parameter type-id='type-id-2' name='flags'/>
+ <return type-id='type-id-2'/>
+ </function-decl>
+ <function-decl name='libzfs_print_on_error' mangled-name='libzfs_print_on_error' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_print_on_error'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <parameter type-id='type-id-9' name='enable'/>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zfs_standard_error' mangled-name='zfs_standard_error' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_standard_error'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <parameter type-id='type-id-2' name='error'/>
+ <parameter type-id='type-id-84' name='msg'/>
+ <return type-id='type-id-2'/>
</function-decl>
<function-decl name='libzfs_error_action' mangled-name='libzfs_error_action' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_error_action'>
- <parameter type-id='type-id-10' name='hdl'/>
+ <parameter type-id='type-id-16' name='hdl'/>
<return type-id='type-id-84'/>
</function-decl>
+ <function-decl name='libzfs_errno' mangled-name='libzfs_errno' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_errno'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <return type-id='type-id-2'/>
+ </function-decl>
<function-decl name='libzfs_error_description' mangled-name='libzfs_error_description' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_error_description'>
- <parameter type-id='type-id-10' name='hdl'/>
+ <parameter type-id='type-id-16' name='hdl'/>
<return type-id='type-id-84'/>
</function-decl>
- <function-decl name='zfs_standard_error' mangled-name='zfs_standard_error' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_standard_error'>
- <parameter type-id='type-id-10' name='hdl'/>
- <parameter type-id='type-id-8' name='error'/>
- <parameter type-id='type-id-84' name='msg'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_nicestrtonum' mangled-name='zfs_nicestrtonum' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_nicestrtonum'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <parameter type-id='type-id-84' name='value'/>
+ <parameter type-id='type-id-108' name='num'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <class-decl name='__va_list_tag' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-401'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='gp_offset' type-id='type-id-5' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='fp_offset' type-id='type-id-5' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='overflow_arg_area' type-id='type-id-7' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='reg_save_area' type-id='type-id-7' visibility='default'/>
- </data-member>
- </class-decl>
- <pointer-type-def type-id='type-id-401' size-in-bits='64' id='type-id-402'/>
- <function-decl name='vasprintf' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-88'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-402'/>
- <return type-id='type-id-8'/>
+ <function-decl name='color_start' mangled-name='color_start' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='color_start'>
+ <parameter type-id='type-id-14' name='color'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='libzfs_print_on_error' mangled-name='libzfs_print_on_error' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_print_on_error'>
- <parameter type-id='type-id-10' name='hdl'/>
- <parameter type-id='type-id-16' name='enable'/>
- <return type-id='type-id-6'/>
+ <function-decl name='color_end' mangled-name='color_end' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='color_end'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='libzfs_run_process' mangled-name='libzfs_run_process' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_run_process'>
- <parameter type-id='type-id-84' name='path'/>
- <parameter type-id='type-id-88' name='argv'/>
- <parameter type-id='type-id-8' name='flags'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='fork' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='waitpid' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-223'/>
- <parameter type-id='type-id-8'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='dup2' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-8'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <qualified-type-def type-id='type-id-17' const='yes' id='type-id-403'/>
- <pointer-type-def type-id='type-id-403' size-in-bits='64' id='type-id-404'/>
- <function-decl name='execv' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-404'/>
- <return type-id='type-id-8'/>
+ <function-decl name='__vfprintf_chk' mangled-name='__vfprintf_chk' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='execvp' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-404'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_version_kernel' mangled-name='zfs_version_kernel' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='execve' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-404'/>
- <parameter type-id='type-id-404'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zprop_iter_common' mangled-name='zprop_iter_common' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='execvpe' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-404'/>
- <parameter type-id='type-id-404'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zprop_width' mangled-name='zprop_width' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <pointer-type-def type-id='type-id-88' size-in-bits='64' id='type-id-405'/>
- <function-decl name='libzfs_run_process_get_stdout' mangled-name='libzfs_run_process_get_stdout' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_run_process_get_stdout'>
- <parameter type-id='type-id-84' name='path'/>
- <parameter type-id='type-id-88' name='argv'/>
- <parameter type-id='type-id-88' name='env'/>
- <parameter type-id='type-id-405' name='lines'/>
- <parameter type-id='type-id-223' name='lines_cnt'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zprop_name_to_prop' mangled-name='zprop_name_to_prop' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='libzfs_run_process_get_stdout_nopath' mangled-name='libzfs_run_process_get_stdout_nopath' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_run_process_get_stdout_nopath'>
- <parameter type-id='type-id-84' name='path'/>
- <parameter type-id='type-id-88' name='argv'/>
- <parameter type-id='type-id-88' name='env'/>
- <parameter type-id='type-id-405' name='lines'/>
- <parameter type-id='type-id-223' name='lines_cnt'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zprop_valid_for_type' mangled-name='zprop_valid_for_type' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='libzfs_free_str_array' mangled-name='libzfs_free_str_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_free_str_array'>
- <parameter type-id='type-id-88' name='strs'/>
- <parameter type-id='type-id-8' name='count'/>
- <return type-id='type-id-6'/>
+ <function-decl name='zpool_prop_unsupported' mangled-name='zpool_prop_unsupported' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='libzfs_envvar_is_set' mangled-name='libzfs_envvar_is_set' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_envvar_is_set'>
- <parameter type-id='type-id-17' name='envvar'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zprop_string_to_index' mangled-name='zprop_string_to_index' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='strnlen' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-35'/>
- <return type-id='type-id-35'/>
+ <function-decl name='zprop_values' mangled-name='zprop_values' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='libzfs_init' mangled-name='libzfs_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_init'>
- <return type-id='type-id-10'/>
+ <function-decl name='__ctype_toupper_loc' mangled-name='__ctype_toupper_loc' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='libzfs_load_module' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-8'/>
+ <function-decl name='getextmntent' mangled-name='getextmntent' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <pointer-type-def type-id='type-id-69' size-in-bits='64' id='type-id-406'/>
- <function-decl name='regcomp' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-406'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-8'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_free_handles' mangled-name='zpool_free_handles' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='libzfs_core_init' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-8'/>
+ <function-decl name='namespace_clear' mangled-name='namespace_clear' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_prop_init' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-6'/>
+ <function-decl name='libzfs_mnttab_fini' mangled-name='libzfs_mnttab_fini' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_prop_init' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-6'/>
+ <function-decl name='libzfs_core_fini' mangled-name='libzfs_core_fini' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_feature_init' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-6'/>
+ <function-decl name='regfree' mangled-name='regfree' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='libzfs_mnttab_init' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <return type-id='type-id-6'/>
+ <function-decl name='fletcher_4_fini' mangled-name='fletcher_4_fini' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_fini'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='fletcher_4_init' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-6'/>
+ <function-decl name='zpool_prop_get_table' mangled-name='zpool_prop_get_table' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_nicestrtonum' mangled-name='zfs_nicestrtonum' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_nicestrtonum'>
- <parameter type-id='type-id-10' name='hdl'/>
- <parameter type-id='type-id-84' name='value'/>
- <parameter type-id='type-id-248' name='num'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_prop_get_table' mangled-name='zfs_prop_get_table' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <class-decl name='__anonymous_struct__' size-in-bits='704' is-struct='yes' is-anonymous='yes' naming-typedef-id='type-id-407' visibility='default' id='type-id-408'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='pd_name' type-id='type-id-84' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='pd_propnum' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='96'>
- <var-decl name='pd_proptype' type-id='type-id-409' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='pd_strdefault' type-id='type-id-84' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='pd_numdefault' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='pd_attr' type-id='type-id-410' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='288'>
- <var-decl name='pd_types' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='pd_values' type-id='type-id-84' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='pd_colname' type-id='type-id-84' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='pd_rightalign' type-id='type-id-16' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='480'>
- <var-decl name='pd_visible' type-id='type-id-16' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='pd_zfs_mod_supported' type-id='type-id-16' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='576'>
- <var-decl name='pd_table' type-id='type-id-411' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='640'>
- <var-decl name='pd_table_size' type-id='type-id-28' visibility='default'/>
- </data-member>
- </class-decl>
- <typedef-decl name='zprop_type_t' type-id='type-id-234' id='type-id-409'/>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-412'>
- <underlying-type type-id='type-id-49'/>
- <enumerator name='PROP_DEFAULT' value='0'/>
- <enumerator name='PROP_READONLY' value='1'/>
- <enumerator name='PROP_INHERIT' value='2'/>
- <enumerator name='PROP_ONETIME' value='3'/>
- <enumerator name='PROP_ONETIME_DEFAULT' value='4'/>
- </enum-decl>
- <typedef-decl name='zprop_attr_t' type-id='type-id-412' id='type-id-410'/>
- <class-decl name='zfs_index' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-413'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='pi_name' type-id='type-id-84' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='pi_value' type-id='type-id-22' visibility='default'/>
- </data-member>
- </class-decl>
- <typedef-decl name='zprop_index_t' type-id='type-id-413' id='type-id-414'/>
- <qualified-type-def type-id='type-id-414' const='yes' id='type-id-415'/>
- <pointer-type-def type-id='type-id-415' size-in-bits='64' id='type-id-411'/>
- <pointer-type-def type-id='type-id-408' size-in-bits='64' id='type-id-416'/>
- <function-decl name='zpool_prop_get_table' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-416'/>
+ <function-decl name='libzfs_load_module' mangled-name='libzfs_load_module' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='regcomp' mangled-name='regcomp' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='libzfs_core_init' mangled-name='libzfs_core_init' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zfs_prop_init' mangled-name='zfs_prop_init' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zpool_prop_init' mangled-name='zpool_prop_init' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zpool_feature_init' mangled-name='zpool_feature_init' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='libzfs_mnttab_init' mangled-name='libzfs_mnttab_init' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='fletcher_4_init' mangled-name='fletcher_4_init' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='strnlen' mangled-name='strnlen' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='realloc' mangled-name='realloc' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='waitpid' mangled-name='waitpid' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='fork' mangled-name='fork' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='dup2' mangled-name='dup2' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='execve' mangled-name='execve' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='_exit' mangled-name='_exit' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='execvpe' mangled-name='execvpe' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='execv' mangled-name='execv' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='execvp' mangled-name='execvp' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='__vasprintf_chk' mangled-name='__vasprintf_chk' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='__builtin___vsnprintf_chk' mangled-name='__vsnprintf_chk' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='exit' mangled-name='exit' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='strtod' mangled-name='strtod' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='pow' mangled-name='pow' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-type size-in-bits='64' id='type-id-178'>
+ <parameter type-id='type-id-2'/>
+ <parameter type-id='type-id-13'/>
+ <return type-id='type-id-2'/>
+ </function-type>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='os/linux/libzfs_mount_os.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
+ <function-decl name='zfs_mount_delegation_check' mangled-name='zfs_mount_delegation_check' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_mount_delegation_check'>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zfs_prop_get_table' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-416'/>
+ <function-decl name='zfs_adjust_mount_options' mangled-name='zfs_adjust_mount_options' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_adjust_mount_options'>
+ <parameter type-id='type-id-76' name='zhp'/>
+ <parameter type-id='type-id-84' name='mntpoint'/>
+ <parameter type-id='type-id-14' name='mntopts'/>
+ <parameter type-id='type-id-14' name='mtabopt'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='libzfs_fini' mangled-name='libzfs_fini' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_fini'>
- <parameter type-id='type-id-10' name='hdl'/>
- <return type-id='type-id-6'/>
+ <pointer-type-def type-id='type-id-24' size-in-bits='64' id='type-id-191'/>
+ <function-decl name='zfs_parse_mount_options' mangled-name='zfs_parse_mount_options' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_parse_mount_options'>
+ <parameter type-id='type-id-14' name='mntopts'/>
+ <parameter type-id='type-id-191' name='mntflags'/>
+ <parameter type-id='type-id-191' name='zfsflags'/>
+ <parameter type-id='type-id-2' name='sloppy'/>
+ <parameter type-id='type-id-14' name='badopt'/>
+ <parameter type-id='type-id-14' name='mtabopt'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zpool_free_handles' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <return type-id='type-id-6'/>
+ <function-decl name='geteuid' mangled-name='geteuid' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='namespace_clear' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <return type-id='type-id-6'/>
+ <function-decl name='umount2' mangled-name='umount2' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='libzfs_mnttab_fini' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-91'/>
- <return type-id='type-id-6'/>
+ <function-decl name='libzfs_envvar_is_set' mangled-name='libzfs_envvar_is_set' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='libzfs_core_fini' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-6'/>
+ <function-decl name='libzfs_run_process' mangled-name='libzfs_run_process' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='regfree' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-406'/>
- <return type-id='type-id-6'/>
+ <function-decl name='mount' mangled-name='mount' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='fletcher_4_fini' mangled-name='fletcher_4_fini' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_fini'>
- <return type-id='type-id-6'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='os/linux/libzfs_pool_os.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
+ <function-decl name='zpool_label_disk' mangled-name='zpool_label_disk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_label_disk'>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <parameter type-id='type-id-4' name='zhp'/>
+ <parameter type-id='type-id-84' name='name'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='dlclose' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-7'/>
- <return type-id='type-id-8'/>
+ <function-decl name='rand' mangled-name='rand' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zpool_get_handle' mangled-name='zpool_get_handle' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_handle'>
- <parameter type-id='type-id-11' name='zhp'/>
- <return type-id='type-id-10'/>
+ <function-decl name='efi_alloc_and_read' mangled-name='efi_alloc_and_read' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_get_handle' mangled-name='zfs_get_handle' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_handle'>
- <parameter type-id='type-id-135' name='zhp'/>
- <return type-id='type-id-10'/>
+ <function-decl name='efi_free' mangled-name='efi_free' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_get_pool_handle' mangled-name='zfs_get_pool_handle' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_pool_handle'>
- <parameter type-id='type-id-250' name='zhp'/>
- <return type-id='type-id-11'/>
+ <function-decl name='efi_alloc_and_init' mangled-name='efi_alloc_and_init' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_path_to_zhandle' mangled-name='zfs_path_to_zhandle' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_path_to_zhandle'>
- <parameter type-id='type-id-10' name='hdl'/>
- <parameter type-id='type-id-84' name='path'/>
- <parameter type-id='type-id-13' name='argtype'/>
- <return type-id='type-id-135'/>
+ <function-decl name='efi_write' mangled-name='efi_write' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <class-decl name='extmnttab' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-417'>
+ <function-decl name='fsync' mangled-name='fsync' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='efi_rescan' mangled-name='efi_rescan' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zfs_append_partition' mangled-name='zfs_append_partition' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zpool_label_disk_wait' mangled-name='zpool_label_disk_wait' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='efi_use_whole_disk' mangled-name='efi_use_whole_disk' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='os/linux/libzfs_sendrecv_os.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
+ <function-decl name='fscanf' mangled-name='fscanf' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='fcntl' mangled-name='fcntl' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='os/linux/libzfs_util_os.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
+ <function-decl name='zfs_version_kernel' mangled-name='zfs_version_kernel' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_version_kernel'>
+ <parameter type-id='type-id-14' name='version'/>
+ <parameter type-id='type-id-2' name='len'/>
+ <return type-id='type-id-2'/>
+ </function-decl>
+ <function-decl name='libzfs_error_init' mangled-name='libzfs_error_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_error_init'>
+ <parameter type-id='type-id-2' name='error'/>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <class-decl name='zfs_cmd' size-in-bits='109952' is-struct='yes' visibility='default' id='type-id-192'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='mnt_special' type-id='type-id-17' visibility='default'/>
+ <var-decl name='zc_name' type-id='type-id-193' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='mnt_mountp' type-id='type-id-17' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='32768'>
+ <var-decl name='zc_nvlist_src' type-id='type-id-7' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='mnt_fstype' type-id='type-id-17' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='32832'>
+ <var-decl name='zc_nvlist_src_size' type-id='type-id-7' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='mnt_mntopts' type-id='type-id-17' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='32896'>
+ <var-decl name='zc_nvlist_dst' type-id='type-id-7' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='mnt_major' type-id='type-id-140' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='32960'>
+ <var-decl name='zc_nvlist_dst_size' type-id='type-id-7' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='288'>
- <var-decl name='mnt_minor' type-id='type-id-140' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='33024'>
+ <var-decl name='zc_nvlist_dst_filled' type-id='type-id-9' visibility='default'/>
</data-member>
- </class-decl>
- <pointer-type-def type-id='type-id-417' size-in-bits='64' id='type-id-418'/>
- <class-decl name='stat64' size-in-bits='1152' is-struct='yes' visibility='default' id='type-id-419'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='st_dev' type-id='type-id-420' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='33056'>
+ <var-decl name='zc_pad2' type-id='type-id-2' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='st_ino' type-id='type-id-307' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='33088'>
+ <var-decl name='zc_history' type-id='type-id-7' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='st_nlink' type-id='type-id-421' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='33152'>
+ <var-decl name='zc_value' type-id='type-id-194' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='st_mode' type-id='type-id-422' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='98688'>
+ <var-decl name='zc_string' type-id='type-id-17' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='224'>
- <var-decl name='st_uid' type-id='type-id-191' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='100736'>
+ <var-decl name='zc_guid' type-id='type-id-7' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='st_gid' type-id='type-id-225' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='100800'>
+ <var-decl name='zc_nvlist_conf' type-id='type-id-7' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='288'>
- <var-decl name='__pad0' type-id='type-id-8' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='100864'>
+ <var-decl name='zc_nvlist_conf_size' type-id='type-id-7' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='st_rdev' type-id='type-id-420' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='100928'>
+ <var-decl name='zc_cookie' type-id='type-id-7' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='st_size' type-id='type-id-151' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='100992'>
+ <var-decl name='zc_objset_type' type-id='type-id-7' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='st_blksize' type-id='type-id-423' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='101056'>
+ <var-decl name='zc_perm_action' type-id='type-id-7' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='st_blocks' type-id='type-id-424' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='101120'>
+ <var-decl name='zc_history_len' type-id='type-id-7' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='576'>
- <var-decl name='st_atim' type-id='type-id-425' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='101184'>
+ <var-decl name='zc_history_offset' type-id='type-id-7' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='704'>
- <var-decl name='st_mtim' type-id='type-id-425' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='101248'>
+ <var-decl name='zc_obj' type-id='type-id-7' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='832'>
- <var-decl name='st_ctim' type-id='type-id-425' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='101312'>
+ <var-decl name='zc_iflags' type-id='type-id-7' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='960'>
- <var-decl name='__glibc_reserved' type-id='type-id-426' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='101376'>
+ <var-decl name='zc_share' type-id='type-id-195' visibility='default'/>
</data-member>
- </class-decl>
- <typedef-decl name='__dev_t' type-id='type-id-35' id='type-id-420'/>
- <typedef-decl name='__nlink_t' type-id='type-id-35' id='type-id-421'/>
- <typedef-decl name='__mode_t' type-id='type-id-5' id='type-id-422'/>
- <typedef-decl name='__blksize_t' type-id='type-id-54' id='type-id-423'/>
- <typedef-decl name='__blkcnt64_t' type-id='type-id-54' id='type-id-424'/>
- <class-decl name='timespec' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-425'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='tv_sec' type-id='type-id-427' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='101632'>
+ <var-decl name='zc_objset_stats' type-id='type-id-67' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='tv_nsec' type-id='type-id-428' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='103936'>
+ <var-decl name='zc_begin_record' type-id='type-id-196' visibility='default'/>
</data-member>
- </class-decl>
- <typedef-decl name='__time_t' type-id='type-id-54' id='type-id-427'/>
- <typedef-decl name='__syscall_slong_t' type-id='type-id-54' id='type-id-428'/>
-
- <array-type-def dimensions='1' type-id='type-id-428' size-in-bits='192' id='type-id-426'>
- <subrange length='3' type-id='type-id-33' id='type-id-100'/>
-
- </array-type-def>
- <pointer-type-def type-id='type-id-419' size-in-bits='64' id='type-id-429'/>
- <function-decl name='getextmntent' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-418'/>
- <parameter type-id='type-id-429'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <class-decl name='zprop_get_cbdata' size-in-bits='640' is-struct='yes' visibility='default' id='type-id-430'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='cb_sources' type-id='type-id-8' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='106368'>
+ <var-decl name='zc_inject_record' type-id='type-id-197' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='cb_columns' type-id='type-id-431' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='109184'>
+ <var-decl name='zc_defer_destroy' type-id='type-id-28' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='cb_colwidths' type-id='type-id-432' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='109216'>
+ <var-decl name='zc_flags' type-id='type-id-28' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='cb_scripted' type-id='type-id-16' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='109248'>
+ <var-decl name='zc_action_handle' type-id='type-id-7' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='416'>
- <var-decl name='cb_literal' type-id='type-id-16' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='109312'>
+ <var-decl name='zc_cleanup_fd' type-id='type-id-2' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='cb_first' type-id='type-id-16' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='109344'>
+ <var-decl name='zc_simple' type-id='type-id-72' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='cb_proplist' type-id='type-id-258' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='109352'>
+ <var-decl name='zc_pad' type-id='type-id-198' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='576'>
- <var-decl name='cb_type' type-id='type-id-13' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='109376'>
+ <var-decl name='zc_sendobj' type-id='type-id-7' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='109440'>
+ <var-decl name='zc_fromobj' type-id='type-id-7' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='109504'>
+ <var-decl name='zc_createtxg' type-id='type-id-7' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='109568'>
+ <var-decl name='zc_stat' type-id='type-id-199' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='109888'>
+ <var-decl name='zc_zoneid' type-id='type-id-7' visibility='default'/>
</data-member>
</class-decl>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-433'>
- <underlying-type type-id='type-id-49'/>
- <enumerator name='GET_COL_NONE' value='0'/>
- <enumerator name='GET_COL_NAME' value='1'/>
- <enumerator name='GET_COL_PROPERTY' value='2'/>
- <enumerator name='GET_COL_VALUE' value='3'/>
- <enumerator name='GET_COL_RECVD' value='4'/>
- <enumerator name='GET_COL_SOURCE' value='5'/>
- </enum-decl>
- <typedef-decl name='zfs_get_column_t' type-id='type-id-433' id='type-id-434'/>
- <array-type-def dimensions='1' type-id='type-id-434' size-in-bits='160' alignment-in-bits='32' id='type-id-431'>
- <subrange length='5' type-id='type-id-33' id='type-id-384'/>
+ <array-type-def dimensions='1' type-id='type-id-23' size-in-bits='32768' id='type-id-193'>
+ <subrange length='4096' type-id='type-id-24' id='type-id-200'/>
</array-type-def>
- <array-type-def dimensions='1' type-id='type-id-8' size-in-bits='192' id='type-id-432'>
- <subrange length='6' type-id='type-id-33' id='type-id-393'/>
+ <array-type-def dimensions='1' type-id='type-id-23' size-in-bits='65536' id='type-id-194'>
+ <subrange length='8192' type-id='type-id-24' id='type-id-201'/>
</array-type-def>
- <typedef-decl name='zprop_get_cbdata_t' type-id='type-id-430' id='type-id-435'/>
- <pointer-type-def type-id='type-id-435' size-in-bits='64' id='type-id-436'/>
- <function-decl name='zprop_print_one_property' mangled-name='zprop_print_one_property' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_print_one_property'>
- <parameter type-id='type-id-84' name='name'/>
- <parameter type-id='type-id-436' name='cbp'/>
- <parameter type-id='type-id-84' name='propname'/>
- <parameter type-id='type-id-84' name='value'/>
- <parameter type-id='type-id-232' name='sourcetype'/>
- <parameter type-id='type-id-84' name='source'/>
- <parameter type-id='type-id-84' name='recvd_value'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <function-decl name='zprop_string_to_index' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-102'/>
- <parameter type-id='type-id-76'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='zprop_values' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-76'/>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='zprop_name_to_prop' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-76'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='zprop_valid_for_type' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-76'/>
- <parameter type-id='type-id-50'/>
- <return type-id='type-id-50'/>
- </function-decl>
- <function-decl name='zprop_width' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-294'/>
- <parameter type-id='type-id-76'/>
- <return type-id='type-id-35'/>
- </function-decl>
- <function-decl name='zpool_prop_unsupported' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-50'/>
- </function-decl>
- <function-decl name='zprop_free_list' mangled-name='zprop_free_list' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_free_list'>
- <parameter type-id='type-id-258' name='pl'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <pointer-type-def type-id='type-id-437' size-in-bits='64' id='type-id-438'/>
- <function-decl name='zprop_iter_common' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-438'/>
- <parameter type-id='type-id-7'/>
- <parameter type-id='type-id-50'/>
- <parameter type-id='type-id-50'/>
- <parameter type-id='type-id-76'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <typedef-decl name='zprop_func' type-id='type-id-438' id='type-id-439'/>
- <function-decl name='zprop_iter' mangled-name='zprop_iter' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_iter'>
- <parameter type-id='type-id-439' name='func'/>
- <parameter type-id='type-id-7' name='cb'/>
- <parameter type-id='type-id-16' name='show_all'/>
- <parameter type-id='type-id-16' name='ordered'/>
- <parameter type-id='type-id-13' name='type'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='zfs_version_userland' mangled-name='zfs_version_userland' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_version_userland'>
- <parameter type-id='type-id-17' name='version'/>
- <parameter type-id='type-id-8' name='len'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <function-decl name='zfs_version_print' mangled-name='zfs_version_print' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_version_print'>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='zfs_version_kernel' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-17'/>
- <parameter type-id='type-id-8'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='color_start' mangled-name='color_start' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='color_start'>
- <parameter type-id='type-id-17' name='color'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <function-decl name='color_end' mangled-name='color_end' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='color_end'>
- <return type-id='type-id-6'/>
- </function-decl>
- <function-decl name='printf_color' mangled-name='printf_color' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='printf_color'>
- <parameter type-id='type-id-17' name='color'/>
- <parameter type-id='type-id-17' name='format'/>
- <parameter is-variadic='yes'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-type size-in-bits='64' id='type-id-437'>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-7'/>
- <return type-id='type-id-8'/>
- </function-type>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='os/linux/libzfs_mount_os.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libzfs' language='LANG_C99'>
- <function-decl name='zfs_parse_mount_options' mangled-name='zfs_parse_mount_options' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_parse_mount_options'>
- <parameter type-id='type-id-17' name='mntopts'/>
- <parameter type-id='type-id-102' name='mntflags'/>
- <parameter type-id='type-id-102' name='zfsflags'/>
- <parameter type-id='type-id-8' name='sloppy'/>
- <parameter type-id='type-id-17' name='badopt'/>
- <parameter type-id='type-id-17' name='mtabopt'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='zfs_adjust_mount_options' mangled-name='zfs_adjust_mount_options' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_adjust_mount_options'>
- <parameter type-id='type-id-135' name='zhp'/>
- <parameter type-id='type-id-84' name='mntpoint'/>
- <parameter type-id='type-id-17' name='mntopts'/>
- <parameter type-id='type-id-17' name='mtabopt'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <function-decl name='mount' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-7'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='umount2' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-8'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='zfs_mount_delegation_check' mangled-name='zfs_mount_delegation_check' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_mount_delegation_check'>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='geteuid' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='os/linux/libzfs_pool_os.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libzfs' language='LANG_C99'>
- <function-decl name='efi_use_whole_disk' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-8'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='fsync' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-8'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='zpool_label_disk' mangled-name='zpool_label_disk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_label_disk'>
- <parameter type-id='type-id-10' name='hdl'/>
- <parameter type-id='type-id-11' name='zhp'/>
- <parameter type-id='type-id-84' name='name'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <class-decl name='dk_gpt' size-in-bits='1920' is-struct='yes' visibility='default' id='type-id-440'>
+ <class-decl name='zfs_share' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-202'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='efi_version' type-id='type-id-140' visibility='default'/>
+ <var-decl name='z_exportdata' type-id='type-id-7' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='efi_nparts' type-id='type-id-140' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='z_sharedata' type-id='type-id-7' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='z_sharetype' type-id='type-id-7' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='z_sharemax' type-id='type-id-7' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='zfs_share_t' type-id='type-id-202' id='type-id-195'/>
+ <class-decl name='drr_begin' size-in-bits='2432' is-struct='yes' visibility='default' id='type-id-196'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='drr_magic' type-id='type-id-7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='efi_part_size' type-id='type-id-140' visibility='default'/>
+ <var-decl name='drr_versioninfo' type-id='type-id-7' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='96'>
- <var-decl name='efi_lbasize' type-id='type-id-140' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='drr_creation_time' type-id='type-id-7' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='drr_type' type-id='type-id-71' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='224'>
+ <var-decl name='drr_flags' type-id='type-id-28' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='drr_toguid' type-id='type-id-7' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='320'>
+ <var-decl name='drr_fromguid' type-id='type-id-7' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='384'>
+ <var-decl name='drr_toname' type-id='type-id-17' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='zinject_record' size-in-bits='2816' is-struct='yes' visibility='default' id='type-id-203'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='zi_objset' type-id='type-id-7' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='zi_object' type-id='type-id-7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='efi_last_lba' type-id='type-id-29' visibility='default'/>
+ <var-decl name='zi_start' type-id='type-id-7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='efi_first_u_lba' type-id='type-id-29' visibility='default'/>
+ <var-decl name='zi_end' type-id='type-id-7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='efi_last_u_lba' type-id='type-id-29' visibility='default'/>
+ <var-decl name='zi_guid' type-id='type-id-7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='efi_disk_uguid' type-id='type-id-441' visibility='default'/>
+ <var-decl name='zi_level' type-id='type-id-28' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='352'>
+ <var-decl name='zi_error' type-id='type-id-28' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='384'>
+ <var-decl name='zi_type' type-id='type-id-7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='efi_flags' type-id='type-id-140' visibility='default'/>
+ <var-decl name='zi_freq' type-id='type-id-28' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='480'>
- <var-decl name='efi_reserved1' type-id='type-id-140' visibility='default'/>
+ <var-decl name='zi_failfast' type-id='type-id-28' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='efi_altern_lba' type-id='type-id-29' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='576'>
- <var-decl name='efi_reserved' type-id='type-id-442' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='960'>
- <var-decl name='efi_parts' type-id='type-id-443' visibility='default'/>
+ <var-decl name='zi_func' type-id='type-id-17' visibility='default'/>
</data-member>
- </class-decl>
- <class-decl name='uuid' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-441'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='time_low' type-id='type-id-38' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='2560'>
+ <var-decl name='zi_iotype' type-id='type-id-28' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='time_mid' type-id='type-id-444' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='2592'>
+ <var-decl name='zi_duration' type-id='type-id-27' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='48'>
- <var-decl name='time_hi_and_version' type-id='type-id-444' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='2624'>
+ <var-decl name='zi_timer' type-id='type-id-7' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='clock_seq_hi_and_reserved' type-id='type-id-79' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='2688'>
+ <var-decl name='zi_nlanes' type-id='type-id-7' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='72'>
- <var-decl name='clock_seq_low' type-id='type-id-79' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='2752'>
+ <var-decl name='zi_cmd' type-id='type-id-28' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='80'>
- <var-decl name='node_addr' type-id='type-id-392' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='2784'>
+ <var-decl name='zi_dvas' type-id='type-id-28' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='__uint16_t' type-id='type-id-152' id='type-id-445'/>
- <typedef-decl name='uint16_t' type-id='type-id-445' id='type-id-444'/>
+ <typedef-decl name='zinject_record_t' type-id='type-id-203' id='type-id-197'/>
- <array-type-def dimensions='1' type-id='type-id-140' size-in-bits='384' id='type-id-442'>
- <subrange length='12' type-id='type-id-33' id='type-id-391'/>
+ <array-type-def dimensions='1' type-id='type-id-72' size-in-bits='24' id='type-id-198'>
+ <subrange length='3' type-id='type-id-24' id='type-id-204'/>
</array-type-def>
- <class-decl name='dk_part' size-in-bits='960' is-struct='yes' visibility='default' id='type-id-446'>
+ <class-decl name='zfs_stat' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-205'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='p_start' type-id='type-id-29' visibility='default'/>
+ <var-decl name='zs_gen' type-id='type-id-7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='p_size' type-id='type-id-29' visibility='default'/>
+ <var-decl name='zs_mode' type-id='type-id-7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='p_guid' type-id='type-id-441' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='p_tag' type-id='type-id-447' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='272'>
- <var-decl name='p_flag' type-id='type-id-447' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='288'>
- <var-decl name='p_name' type-id='type-id-448' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='576'>
- <var-decl name='p_uguid' type-id='type-id-441' visibility='default'/>
+ <var-decl name='zs_links' type-id='type-id-7' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='704'>
- <var-decl name='p_resv' type-id='type-id-449' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='zs_ctime' type-id='type-id-206' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='ushort_t' type-id='type-id-152' id='type-id-447'/>
-
- <array-type-def dimensions='1' type-id='type-id-32' size-in-bits='288' id='type-id-448'>
- <subrange length='36' type-id='type-id-33' id='type-id-450'/>
-
- </array-type-def>
-
- <array-type-def dimensions='1' type-id='type-id-140' size-in-bits='256' id='type-id-449'>
- <subrange length='8' type-id='type-id-33' id='type-id-390'/>
-
- </array-type-def>
- <array-type-def dimensions='1' type-id='type-id-446' size-in-bits='960' id='type-id-443'>
- <subrange length='1' type-id='type-id-33' id='type-id-160'/>
+ <array-type-def dimensions='1' type-id='type-id-7' size-in-bits='128' id='type-id-206'>
+ <subrange length='2' type-id='type-id-24' id='type-id-59'/>
</array-type-def>
- <pointer-type-def type-id='type-id-440' size-in-bits='64' id='type-id-451'/>
- <pointer-type-def type-id='type-id-451' size-in-bits='64' id='type-id-452'/>
- <function-decl name='efi_alloc_and_init' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-5'/>
- <parameter type-id='type-id-452'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='rand' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='efi_write' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-451'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='efi_rescan' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-8'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='efi_free' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-451'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <function-decl name='zfs_append_partition' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-17'/>
- <parameter type-id='type-id-35'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='zpool_label_disk_wait' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-8'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='efi_alloc_and_read' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-452'/>
- <return type-id='type-id-8'/>
- </function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='os/linux/libzfs_sendrecv_os.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libzfs' language='LANG_C99'>
- <function-decl name='fcntl' mangled-name='fcntl64' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-8'/>
- <parameter is-variadic='yes'/>
- <return type-id='type-id-8'/>
- </function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='os/linux/libzfs_util_os.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libzfs' language='LANG_C99'>
- <typedef-decl name='zfs_cmd_t' type-id='type-id-108' id='type-id-453'/>
- <pointer-type-def type-id='type-id-453' size-in-bits='64' id='type-id-454'/>
+ <typedef-decl name='zfs_stat_t' type-id='type-id-205' id='type-id-199'/>
+ <typedef-decl name='zfs_cmd_t' type-id='type-id-192' id='type-id-207'/>
+ <pointer-type-def type-id='type-id-207' size-in-bits='64' id='type-id-208'/>
<function-decl name='zfs_ioctl' mangled-name='zfs_ioctl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_ioctl'>
- <parameter type-id='type-id-10' name='hdl'/>
- <parameter type-id='type-id-8' name='request'/>
- <parameter type-id='type-id-454' name='zc'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='libzfs_error_init' mangled-name='libzfs_error_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_error_init'>
- <parameter type-id='type-id-8' name='error'/>
- <return type-id='type-id-84'/>
+ <parameter type-id='type-id-16' name='hdl'/>
+ <parameter type-id='type-id-2' name='request'/>
+ <parameter type-id='type-id-208' name='zc'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='access' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-8'/>
- <return type-id='type-id-8'/>
+ <function-decl name='clock_gettime' mangled-name='clock_gettime' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <pointer-type-def type-id='type-id-425' size-in-bits='64' id='type-id-455'/>
- <function-decl name='clock_gettime' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-455'/>
- <return type-id='type-id-8'/>
+ <function-decl name='sched_yield' mangled-name='sched_yield' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='usleep' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-5'/>
- <return type-id='type-id-8'/>
+ <function-decl name='usleep' mangled-name='usleep' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='sched_yield' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-8'/>
+ <function-decl name='access' mangled-name='access' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='../../module/icp/algs/sha2/sha2.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libzfs' language='LANG_C99'>
- <function-decl name='htonl' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-5'/>
- <return type-id='type-id-5'/>
+ <abi-instr version='1.0' address-size='64' path='../../module/icp/algs/sha2/sha2.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
+ <function-decl name='htonl' mangled-name='htonl' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='../../module/zcommon/cityhash.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libzfs' language='LANG_C99'>
+ <abi-instr version='1.0' address-size='64' path='../../module/zcommon/cityhash.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
<function-decl name='cityhash4' mangled-name='cityhash4' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='cityhash4'>
- <parameter type-id='type-id-22' name='w1'/>
- <parameter type-id='type-id-22' name='w2'/>
- <parameter type-id='type-id-22' name='w3'/>
- <parameter type-id='type-id-22' name='w4'/>
- <return type-id='type-id-22'/>
+ <parameter type-id='type-id-7' name='w1'/>
+ <parameter type-id='type-id-7' name='w2'/>
+ <parameter type-id='type-id-7' name='w3'/>
+ <parameter type-id='type-id-7' name='w4'/>
+ <return type-id='type-id-7'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfeature_common.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libzfs' language='LANG_C99'>
- <var-decl name='zfeature_checks_disable' type-id='type-id-16' mangled-name='zfeature_checks_disable' visibility='default' elf-symbol-id='zfeature_checks_disable'/>
- <class-decl name='zfeature_info' size-in-bits='448' is-struct='yes' visibility='default' id='type-id-456'>
+ <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfeature_common.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
+ <var-decl name='zfeature_checks_disable' type-id='type-id-9' mangled-name='zfeature_checks_disable' visibility='default' elf-symbol-id='zfeature_checks_disable'/>
+ <class-decl name='zfeature_info' size-in-bits='448' is-struct='yes' visibility='default' id='type-id-209'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='fi_feature' type-id='type-id-457' visibility='default'/>
+ <var-decl name='fi_feature' type-id='type-id-210' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='fi_uname' type-id='type-id-84' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='fi_guid' type-id='type-id-84' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='fi_desc' type-id='type-id-84' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='fi_flags' type-id='type-id-458' visibility='default'/>
+ <var-decl name='fi_flags' type-id='type-id-211' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='288'>
- <var-decl name='fi_zfs_mod_supported' type-id='type-id-16' visibility='default'/>
+ <var-decl name='fi_zfs_mod_supported' type-id='type-id-9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='fi_type' type-id='type-id-459' visibility='default'/>
+ <var-decl name='fi_type' type-id='type-id-212' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='fi_depends' type-id='type-id-460' visibility='default'/>
+ <var-decl name='fi_depends' type-id='type-id-213' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='spa_feature_t' type-id='type-id-328' id='type-id-457'/>
- <enum-decl name='zfeature_flags' id='type-id-461'>
- <underlying-type type-id='type-id-49'/>
+ <enum-decl name='spa_feature' id='type-id-214'>
+ <underlying-type type-id='type-id-41'/>
+ <enumerator name='SPA_FEATURE_NONE' value='-1'/>
+ <enumerator name='SPA_FEATURE_ASYNC_DESTROY' value='0'/>
+ <enumerator name='SPA_FEATURE_EMPTY_BPOBJ' value='1'/>
+ <enumerator name='SPA_FEATURE_LZ4_COMPRESS' value='2'/>
+ <enumerator name='SPA_FEATURE_MULTI_VDEV_CRASH_DUMP' value='3'/>
+ <enumerator name='SPA_FEATURE_SPACEMAP_HISTOGRAM' value='4'/>
+ <enumerator name='SPA_FEATURE_ENABLED_TXG' value='5'/>
+ <enumerator name='SPA_FEATURE_HOLE_BIRTH' value='6'/>
+ <enumerator name='SPA_FEATURE_EXTENSIBLE_DATASET' value='7'/>
+ <enumerator name='SPA_FEATURE_EMBEDDED_DATA' value='8'/>
+ <enumerator name='SPA_FEATURE_BOOKMARKS' value='9'/>
+ <enumerator name='SPA_FEATURE_FS_SS_LIMIT' value='10'/>
+ <enumerator name='SPA_FEATURE_LARGE_BLOCKS' value='11'/>
+ <enumerator name='SPA_FEATURE_LARGE_DNODE' value='12'/>
+ <enumerator name='SPA_FEATURE_SHA512' value='13'/>
+ <enumerator name='SPA_FEATURE_SKEIN' value='14'/>
+ <enumerator name='SPA_FEATURE_EDONR' value='15'/>
+ <enumerator name='SPA_FEATURE_USEROBJ_ACCOUNTING' value='16'/>
+ <enumerator name='SPA_FEATURE_ENCRYPTION' value='17'/>
+ <enumerator name='SPA_FEATURE_PROJECT_QUOTA' value='18'/>
+ <enumerator name='SPA_FEATURE_DEVICE_REMOVAL' value='19'/>
+ <enumerator name='SPA_FEATURE_OBSOLETE_COUNTS' value='20'/>
+ <enumerator name='SPA_FEATURE_POOL_CHECKPOINT' value='21'/>
+ <enumerator name='SPA_FEATURE_SPACEMAP_V2' value='22'/>
+ <enumerator name='SPA_FEATURE_ALLOCATION_CLASSES' value='23'/>
+ <enumerator name='SPA_FEATURE_RESILVER_DEFER' value='24'/>
+ <enumerator name='SPA_FEATURE_BOOKMARK_V2' value='25'/>
+ <enumerator name='SPA_FEATURE_REDACTION_BOOKMARKS' value='26'/>
+ <enumerator name='SPA_FEATURE_REDACTED_DATASETS' value='27'/>
+ <enumerator name='SPA_FEATURE_BOOKMARK_WRITTEN' value='28'/>
+ <enumerator name='SPA_FEATURE_LOG_SPACEMAP' value='29'/>
+ <enumerator name='SPA_FEATURE_LIVELIST' value='30'/>
+ <enumerator name='SPA_FEATURE_DEVICE_REBUILD' value='31'/>
+ <enumerator name='SPA_FEATURE_ZSTD_COMPRESS' value='32'/>
+ <enumerator name='SPA_FEATURE_DRAID' value='33'/>
+ <enumerator name='SPA_FEATURES' value='34'/>
+ </enum-decl>
+ <typedef-decl name='spa_feature_t' type-id='type-id-214' id='type-id-210'/>
+ <enum-decl name='zfeature_flags' id='type-id-215'>
+ <underlying-type type-id='type-id-41'/>
<enumerator name='ZFEATURE_FLAG_READONLY_COMPAT' value='1'/>
<enumerator name='ZFEATURE_FLAG_MOS' value='2'/>
<enumerator name='ZFEATURE_FLAG_ACTIVATE_ON_ENABLE' value='4'/>
<enumerator name='ZFEATURE_FLAG_PER_DATASET' value='8'/>
</enum-decl>
- <typedef-decl name='zfeature_flags_t' type-id='type-id-461' id='type-id-458'/>
- <enum-decl name='zfeature_type' id='type-id-462'>
- <underlying-type type-id='type-id-49'/>
+ <typedef-decl name='zfeature_flags_t' type-id='type-id-215' id='type-id-211'/>
+ <enum-decl name='zfeature_type' id='type-id-216'>
+ <underlying-type type-id='type-id-41'/>
<enumerator name='ZFEATURE_TYPE_BOOLEAN' value='0'/>
<enumerator name='ZFEATURE_TYPE_UINT64_ARRAY' value='1'/>
<enumerator name='ZFEATURE_NUM_TYPES' value='2'/>
</enum-decl>
- <typedef-decl name='zfeature_type_t' type-id='type-id-462' id='type-id-459'/>
- <qualified-type-def type-id='type-id-457' const='yes' id='type-id-463'/>
- <pointer-type-def type-id='type-id-463' size-in-bits='64' id='type-id-460'/>
- <typedef-decl name='zfeature_info_t' type-id='type-id-456' id='type-id-464'/>
+ <typedef-decl name='zfeature_type_t' type-id='type-id-216' id='type-id-212'/>
+ <qualified-type-def type-id='type-id-210' const='yes' id='type-id-217'/>
+ <pointer-type-def type-id='type-id-217' size-in-bits='64' id='type-id-213'/>
+ <typedef-decl name='zfeature_info_t' type-id='type-id-209' id='type-id-218'/>
- <array-type-def dimensions='1' type-id='type-id-464' size-in-bits='15232' id='type-id-465'>
- <subrange length='34' type-id='type-id-33' id='type-id-395'/>
+ <array-type-def dimensions='1' type-id='type-id-218' size-in-bits='15232' id='type-id-219'>
+ <subrange length='34' type-id='type-id-24' id='type-id-220'/>
</array-type-def>
- <var-decl name='spa_feature_table' type-id='type-id-465' mangled-name='spa_feature_table' visibility='default' elf-symbol-id='spa_feature_table'/>
- <function-decl name='zfeature_is_valid_guid' mangled-name='zfeature_is_valid_guid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfeature_is_valid_guid'>
- <parameter type-id='type-id-84' name='name'/>
- <return type-id='type-id-16'/>
+ <var-decl name='spa_feature_table' type-id='type-id-219' mangled-name='spa_feature_table' visibility='default' elf-symbol-id='spa_feature_table'/>
+ <function-decl name='zfeature_depends_on' mangled-name='zfeature_depends_on' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfeature_depends_on'>
+ <parameter type-id='type-id-210' name='fid'/>
+ <parameter type-id='type-id-210' name='check'/>
+ <return type-id='type-id-9'/>
</function-decl>
- <function-decl name='zfeature_is_supported' mangled-name='zfeature_is_supported' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfeature_is_supported'>
- <parameter type-id='type-id-84' name='guid'/>
- <return type-id='type-id-16'/>
+ <pointer-type-def type-id='type-id-210' size-in-bits='64' id='type-id-221'/>
+ <function-decl name='zfeature_lookup_name' mangled-name='zfeature_lookup_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfeature_lookup_name'>
+ <parameter type-id='type-id-84' name='name'/>
+ <parameter type-id='type-id-221' name='res'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <pointer-type-def type-id='type-id-457' size-in-bits='64' id='type-id-466'/>
<function-decl name='zfeature_lookup_guid' mangled-name='zfeature_lookup_guid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfeature_lookup_guid'>
- <parameter type-id='type-id-84' name='guid'/>
- <parameter type-id='type-id-466' name='res'/>
- <return type-id='type-id-8'/>
+ <parameter type-id='type-id-84' name='name'/>
+ <parameter type-id='type-id-221' name='res'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zfeature_lookup_name' mangled-name='zfeature_lookup_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfeature_lookup_name'>
+ <function-decl name='zfeature_is_supported' mangled-name='zfeature_is_supported' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfeature_is_supported'>
<parameter type-id='type-id-84' name='guid'/>
- <parameter type-id='type-id-466' name='res'/>
- <return type-id='type-id-8'/>
+ <return type-id='type-id-9'/>
</function-decl>
- <function-decl name='zfeature_depends_on' mangled-name='zfeature_depends_on' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfeature_depends_on'>
- <parameter type-id='type-id-457' name='fid'/>
- <parameter type-id='type-id-457' name='check'/>
- <return type-id='type-id-16'/>
+ <function-decl name='zfeature_is_valid_guid' mangled-name='zfeature_is_valid_guid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfeature_is_valid_guid'>
+ <parameter type-id='type-id-84' name='name'/>
+ <return type-id='type-id-9'/>
</function-decl>
<function-decl name='zfs_mod_supported' mangled-name='zfs_mod_supported' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_mod_supported'>
<parameter type-id='type-id-84' name='scope'/>
<parameter type-id='type-id-84' name='name'/>
- <return type-id='type-id-16'/>
+ <return type-id='type-id-9'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfs_comutil.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libzfs' language='LANG_C99'>
+ <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfs_comutil.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
- <array-type-def dimensions='1' type-id='type-id-84' size-in-bits='2624' id='type-id-467'>
- <subrange length='41' type-id='type-id-33' id='type-id-468'/>
+ <array-type-def dimensions='1' type-id='type-id-84' size-in-bits='2624' id='type-id-222'>
+ <subrange length='41' type-id='type-id-24' id='type-id-223'/>
</array-type-def>
- <var-decl name='zfs_history_event_names' type-id='type-id-467' mangled-name='zfs_history_event_names' visibility='default' elf-symbol-id='zfs_history_event_names'/>
- <function-decl name='zfs_allocatable_devs' mangled-name='zfs_allocatable_devs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_allocatable_devs'>
- <parameter type-id='type-id-15' name='nv'/>
- <return type-id='type-id-16'/>
+ <var-decl name='zfs_history_event_names' type-id='type-id-222' mangled-name='zfs_history_event_names' visibility='default' elf-symbol-id='zfs_history_event_names'/>
+ <function-decl name='zfs_dataset_name_hidden' mangled-name='zfs_dataset_name_hidden' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_dataset_name_hidden'>
+ <parameter type-id='type-id-84' name='name'/>
+ <return type-id='type-id-9'/>
</function-decl>
- <function-decl name='zfs_special_devs' mangled-name='zfs_special_devs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_special_devs'>
- <parameter type-id='type-id-15' name='nv'/>
- <parameter type-id='type-id-17' name='type'/>
- <return type-id='type-id-16'/>
+ <function-decl name='zfs_spa_version_map' mangled-name='zfs_spa_version_map' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_spa_version_map'>
+ <parameter type-id='type-id-2' name='zpl_version'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <typedef-decl name='zpool_load_policy_t' type-id='type-id-330' id='type-id-469'/>
- <pointer-type-def type-id='type-id-469' size-in-bits='64' id='type-id-470'/>
- <function-decl name='zpool_get_load_policy' mangled-name='zpool_get_load_policy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_load_policy'>
- <parameter type-id='type-id-15' name='nvl'/>
- <parameter type-id='type-id-470' name='zlpp'/>
- <return type-id='type-id-6'/>
+ <function-decl name='zfs_zpl_version_map' mangled-name='zfs_zpl_version_map' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_zpl_version_map'>
+ <parameter type-id='type-id-2' name='zpl_version'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='nvpair_value_uint32' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-133'/>
- <parameter type-id='type-id-244'/>
- <return type-id='type-id-8'/>
+ <class-decl name='zpool_load_policy' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-224'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='zlp_rewind' type-id='type-id-28' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='zlp_maxmeta' type-id='type-id-7' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='zlp_maxdata' type-id='type-id-7' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='zlp_txg' type-id='type-id-7' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='zpool_load_policy_t' type-id='type-id-224' id='type-id-225'/>
+ <pointer-type-def type-id='type-id-225' size-in-bits='64' id='type-id-226'/>
+ <function-decl name='zpool_get_load_policy' mangled-name='zpool_get_load_policy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_load_policy'>
+ <parameter type-id='type-id-19' name='nvl'/>
+ <parameter type-id='type-id-226' name='zlpp'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_zpl_version_map' mangled-name='zfs_zpl_version_map' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_zpl_version_map'>
- <parameter type-id='type-id-8' name='spa_version'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_special_devs' mangled-name='zfs_special_devs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_special_devs'>
+ <parameter type-id='type-id-19' name='nv'/>
+ <parameter type-id='type-id-14' name='type'/>
+ <return type-id='type-id-9'/>
</function-decl>
- <function-decl name='zfs_spa_version_map' mangled-name='zfs_spa_version_map' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_spa_version_map'>
- <parameter type-id='type-id-8' name='spa_version'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_allocatable_devs' mangled-name='zfs_allocatable_devs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_allocatable_devs'>
+ <parameter type-id='type-id-19' name='nv'/>
+ <return type-id='type-id-9'/>
</function-decl>
- <function-decl name='zfs_dataset_name_hidden' mangled-name='zfs_dataset_name_hidden' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_dataset_name_hidden'>
- <parameter type-id='type-id-84' name='name'/>
- <return type-id='type-id-16'/>
+ <function-decl name='nvpair_value_uint32' mangled-name='nvpair_value_uint32' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfs_deleg.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libzfs' language='LANG_C99'>
- <class-decl name='zfs_deleg_perm_tab' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-471'>
+ <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfs_deleg.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
+ <class-decl name='zfs_deleg_perm_tab' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-227'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='z_perm' type-id='type-id-17' visibility='default'/>
+ <var-decl name='z_perm' type-id='type-id-14' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='z_note' type-id='type-id-472' visibility='default'/>
+ <var-decl name='z_note' type-id='type-id-228' visibility='default'/>
</data-member>
</class-decl>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-473'>
- <underlying-type type-id='type-id-49'/>
+ <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-229'>
+ <underlying-type type-id='type-id-41'/>
<enumerator name='ZFS_DELEG_NOTE_CREATE' value='0'/>
<enumerator name='ZFS_DELEG_NOTE_DESTROY' value='1'/>
<enumerator name='ZFS_DELEG_NOTE_SNAPSHOT' value='2'/>
<enumerator name='ZFS_DELEG_NOTE_ROLLBACK' value='3'/>
<enumerator name='ZFS_DELEG_NOTE_CLONE' value='4'/>
<enumerator name='ZFS_DELEG_NOTE_PROMOTE' value='5'/>
<enumerator name='ZFS_DELEG_NOTE_RENAME' value='6'/>
<enumerator name='ZFS_DELEG_NOTE_SEND' value='7'/>
<enumerator name='ZFS_DELEG_NOTE_RECEIVE' value='8'/>
<enumerator name='ZFS_DELEG_NOTE_ALLOW' value='9'/>
<enumerator name='ZFS_DELEG_NOTE_USERPROP' value='10'/>
<enumerator name='ZFS_DELEG_NOTE_MOUNT' value='11'/>
<enumerator name='ZFS_DELEG_NOTE_SHARE' value='12'/>
<enumerator name='ZFS_DELEG_NOTE_USERQUOTA' value='13'/>
<enumerator name='ZFS_DELEG_NOTE_GROUPQUOTA' value='14'/>
<enumerator name='ZFS_DELEG_NOTE_USERUSED' value='15'/>
<enumerator name='ZFS_DELEG_NOTE_GROUPUSED' value='16'/>
<enumerator name='ZFS_DELEG_NOTE_USEROBJQUOTA' value='17'/>
<enumerator name='ZFS_DELEG_NOTE_GROUPOBJQUOTA' value='18'/>
<enumerator name='ZFS_DELEG_NOTE_USEROBJUSED' value='19'/>
<enumerator name='ZFS_DELEG_NOTE_GROUPOBJUSED' value='20'/>
<enumerator name='ZFS_DELEG_NOTE_HOLD' value='21'/>
<enumerator name='ZFS_DELEG_NOTE_RELEASE' value='22'/>
<enumerator name='ZFS_DELEG_NOTE_DIFF' value='23'/>
<enumerator name='ZFS_DELEG_NOTE_BOOKMARK' value='24'/>
<enumerator name='ZFS_DELEG_NOTE_LOAD_KEY' value='25'/>
<enumerator name='ZFS_DELEG_NOTE_CHANGE_KEY' value='26'/>
<enumerator name='ZFS_DELEG_NOTE_PROJECTUSED' value='27'/>
<enumerator name='ZFS_DELEG_NOTE_PROJECTQUOTA' value='28'/>
<enumerator name='ZFS_DELEG_NOTE_PROJECTOBJUSED' value='29'/>
<enumerator name='ZFS_DELEG_NOTE_PROJECTOBJQUOTA' value='30'/>
<enumerator name='ZFS_DELEG_NOTE_NONE' value='31'/>
</enum-decl>
- <typedef-decl name='zfs_deleg_note_t' type-id='type-id-473' id='type-id-472'/>
- <typedef-decl name='zfs_deleg_perm_tab_t' type-id='type-id-471' id='type-id-474'/>
+ <typedef-decl name='zfs_deleg_note_t' type-id='type-id-229' id='type-id-228'/>
+ <typedef-decl name='zfs_deleg_perm_tab_t' type-id='type-id-227' id='type-id-230'/>
- <array-type-def dimensions='1' type-id='type-id-474' size-in-bits='4096' id='type-id-475'>
- <subrange length='32' type-id='type-id-33' id='type-id-208'/>
+ <array-type-def dimensions='1' type-id='type-id-230' size-in-bits='128' id='type-id-231'>
+ <subrange length='1' id='type-id-232'/>
</array-type-def>
- <var-decl name='zfs_deleg_perm_tab' type-id='type-id-475' mangled-name='zfs_deleg_perm_tab' visibility='default' elf-symbol-id='zfs_deleg_perm_tab'/>
- <function-decl name='zfs_deleg_canonicalize_perm' mangled-name='zfs_deleg_canonicalize_perm' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_deleg_canonicalize_perm'>
- <parameter type-id='type-id-84' name='perm'/>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='zfs_prop_delegatable' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-85'/>
- <return type-id='type-id-50'/>
- </function-decl>
- <function-decl name='zfs_deleg_verify_nvlist' mangled-name='zfs_deleg_verify_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_deleg_verify_nvlist'>
- <parameter type-id='type-id-15' name='nvp'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='permset_namecheck' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-213'/>
- <parameter type-id='type-id-17'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-476'>
- <underlying-type type-id='type-id-49'/>
+ <var-decl name='zfs_deleg_perm_tab' type-id='type-id-231' mangled-name='zfs_deleg_perm_tab' visibility='default' elf-symbol-id='zfs_deleg_perm_tab'/>
+ <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-233'>
+ <underlying-type type-id='type-id-41'/>
<enumerator name='ZFS_DELEG_WHO_UNKNOWN' value='0'/>
<enumerator name='ZFS_DELEG_USER' value='117'/>
<enumerator name='ZFS_DELEG_USER_SETS' value='85'/>
<enumerator name='ZFS_DELEG_GROUP' value='103'/>
<enumerator name='ZFS_DELEG_GROUP_SETS' value='71'/>
<enumerator name='ZFS_DELEG_EVERYONE' value='101'/>
<enumerator name='ZFS_DELEG_EVERYONE_SETS' value='69'/>
<enumerator name='ZFS_DELEG_CREATE' value='99'/>
<enumerator name='ZFS_DELEG_CREATE_SETS' value='67'/>
<enumerator name='ZFS_DELEG_NAMED_SET' value='115'/>
<enumerator name='ZFS_DELEG_NAMED_SET_SETS' value='83'/>
</enum-decl>
- <typedef-decl name='zfs_deleg_who_type_t' type-id='type-id-476' id='type-id-477'/>
+ <typedef-decl name='zfs_deleg_who_type_t' type-id='type-id-233' id='type-id-234'/>
<function-decl name='zfs_deleg_whokey' mangled-name='zfs_deleg_whokey' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_deleg_whokey'>
- <parameter type-id='type-id-17' name='attr'/>
- <parameter type-id='type-id-477' name='type'/>
- <parameter type-id='type-id-32' name='inheritchr'/>
- <parameter type-id='type-id-7' name='data'/>
- <return type-id='type-id-6'/>
+ <parameter type-id='type-id-14' name='attr'/>
+ <parameter type-id='type-id-234' name='type'/>
+ <parameter type-id='type-id-23' name='inheritchr'/>
+ <parameter type-id='type-id-13' name='data'/>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zfs_deleg_verify_nvlist' mangled-name='zfs_deleg_verify_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_deleg_verify_nvlist'>
+ <parameter type-id='type-id-19' name='nvp'/>
+ <return type-id='type-id-2'/>
+ </function-decl>
+ <function-decl name='zfs_deleg_canonicalize_perm' mangled-name='zfs_deleg_canonicalize_perm' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_deleg_canonicalize_perm'>
+ <parameter type-id='type-id-84' name='perm'/>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='permset_namecheck' mangled-name='permset_namecheck' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='zfs_prop_delegatable' mangled-name='zfs_prop_delegatable' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfs_fletcher.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libzfs' language='LANG_C99'>
- <class-decl name='zio_abd_checksum_func' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-478'>
+ <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfs_fletcher.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
+ <class-decl name='zio_abd_checksum_func' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-235'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='acf_init' type-id='type-id-479' visibility='default'/>
+ <var-decl name='acf_init' type-id='type-id-236' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='acf_fini' type-id='type-id-480' visibility='default'/>
+ <var-decl name='acf_fini' type-id='type-id-237' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='acf_iter' type-id='type-id-481' visibility='default'/>
+ <var-decl name='acf_iter' type-id='type-id-238' visibility='default'/>
</data-member>
</class-decl>
- <class-decl name='zio_abd_checksum_data' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-482'>
+ <class-decl name='zio_abd_checksum_data' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-239'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='acd_byteorder' type-id='type-id-483' visibility='default'/>
+ <var-decl name='acd_byteorder' type-id='type-id-240' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='acd_ctx' type-id='type-id-484' visibility='default'/>
+ <var-decl name='acd_ctx' type-id='type-id-241' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='acd_zcp' type-id='type-id-485' visibility='default'/>
+ <var-decl name='acd_zcp' type-id='type-id-242' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='acd_private' type-id='type-id-7' visibility='default'/>
+ <var-decl name='acd_private' type-id='type-id-13' visibility='default'/>
</data-member>
</class-decl>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-486'>
- <underlying-type type-id='type-id-49'/>
+ <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-243'>
+ <underlying-type type-id='type-id-41'/>
<enumerator name='ZIO_CHECKSUM_NATIVE' value='0'/>
<enumerator name='ZIO_CHECKSUM_BYTESWAP' value='1'/>
</enum-decl>
- <typedef-decl name='zio_byteorder_t' type-id='type-id-486' id='type-id-483'/>
- <union-decl name='fletcher_4_ctx' size-in-bits='2048' visibility='default' id='type-id-487'>
+ <typedef-decl name='zio_byteorder_t' type-id='type-id-243' id='type-id-240'/>
+ <union-decl name='fletcher_4_ctx' size-in-bits='2048' visibility='default' id='type-id-244'>
<data-member access='private'>
- <var-decl name='scalar' type-id='type-id-380' visibility='default'/>
+ <var-decl name='scalar' type-id='type-id-245' visibility='default'/>
</data-member>
<data-member access='private'>
- <var-decl name='superscalar' type-id='type-id-488' visibility='default'/>
+ <var-decl name='superscalar' type-id='type-id-246' visibility='default'/>
</data-member>
<data-member access='private'>
- <var-decl name='sse' type-id='type-id-489' visibility='default'/>
+ <var-decl name='sse' type-id='type-id-247' visibility='default'/>
</data-member>
<data-member access='private'>
- <var-decl name='avx' type-id='type-id-490' visibility='default'/>
+ <var-decl name='avx' type-id='type-id-248' visibility='default'/>
</data-member>
<data-member access='private'>
- <var-decl name='avx512' type-id='type-id-491' visibility='default'/>
+ <var-decl name='avx512' type-id='type-id-249' visibility='default'/>
</data-member>
</union-decl>
- <class-decl name='zfs_fletcher_superscalar' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-492'>
+ <class-decl name='zio_cksum' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-250'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='zc_word' type-id='type-id-251' visibility='default'/>
+ </data-member>
+ </class-decl>
+
+ <array-type-def dimensions='1' type-id='type-id-7' size-in-bits='256' id='type-id-251'>
+ <subrange length='4' type-id='type-id-24' id='type-id-252'/>
+
+ </array-type-def>
+ <typedef-decl name='zio_cksum_t' type-id='type-id-250' id='type-id-245'/>
+ <class-decl name='zfs_fletcher_superscalar' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-253'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='v' type-id='type-id-359' visibility='default'/>
+ <var-decl name='v' type-id='type-id-251' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='zfs_fletcher_superscalar_t' type-id='type-id-492' id='type-id-493'/>
+ <typedef-decl name='zfs_fletcher_superscalar_t' type-id='type-id-253' id='type-id-254'/>
- <array-type-def dimensions='1' type-id='type-id-493' size-in-bits='1024' id='type-id-488'>
- <subrange length='4' type-id='type-id-33' id='type-id-217'/>
+ <array-type-def dimensions='1' type-id='type-id-254' size-in-bits='1024' id='type-id-246'>
+ <subrange length='4' type-id='type-id-24' id='type-id-252'/>
</array-type-def>
- <class-decl name='zfs_fletcher_sse' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-494'>
+ <class-decl name='zfs_fletcher_sse' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-255'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='v' type-id='type-id-121' visibility='default'/>
+ <var-decl name='v' type-id='type-id-206' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='zfs_fletcher_sse_t' type-id='type-id-494' id='type-id-495'/>
+ <typedef-decl name='zfs_fletcher_sse_t' type-id='type-id-255' id='type-id-256'/>
- <array-type-def dimensions='1' type-id='type-id-495' size-in-bits='512' id='type-id-489'>
- <subrange length='4' type-id='type-id-33' id='type-id-217'/>
+ <array-type-def dimensions='1' type-id='type-id-256' size-in-bits='512' id='type-id-247'>
+ <subrange length='4' type-id='type-id-24' id='type-id-252'/>
</array-type-def>
- <class-decl name='zfs_fletcher_avx' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-496'>
+ <class-decl name='zfs_fletcher_avx' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-257'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='v' type-id='type-id-359' visibility='default'/>
+ <var-decl name='v' type-id='type-id-251' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='zfs_fletcher_avx_t' type-id='type-id-496' id='type-id-497'/>
+ <typedef-decl name='zfs_fletcher_avx_t' type-id='type-id-257' id='type-id-258'/>
- <array-type-def dimensions='1' type-id='type-id-497' size-in-bits='1024' id='type-id-490'>
- <subrange length='4' type-id='type-id-33' id='type-id-217'/>
+ <array-type-def dimensions='1' type-id='type-id-258' size-in-bits='1024' id='type-id-248'>
+ <subrange length='4' type-id='type-id-24' id='type-id-252'/>
</array-type-def>
- <class-decl name='zfs_fletcher_avx512' size-in-bits='512' is-struct='yes' visibility='default' id='type-id-498'>
+ <class-decl name='zfs_fletcher_avx512' size-in-bits='512' is-struct='yes' visibility='default' id='type-id-259'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='v' type-id='type-id-499' visibility='default'/>
+ <var-decl name='v' type-id='type-id-260' visibility='default'/>
</data-member>
</class-decl>
- <array-type-def dimensions='1' type-id='type-id-22' size-in-bits='512' id='type-id-499'>
- <subrange length='8' type-id='type-id-33' id='type-id-390'/>
+ <array-type-def dimensions='1' type-id='type-id-7' size-in-bits='512' id='type-id-260'>
+ <subrange length='8' type-id='type-id-24' id='type-id-261'/>
</array-type-def>
- <typedef-decl name='zfs_fletcher_avx512_t' type-id='type-id-498' id='type-id-500'/>
+ <typedef-decl name='zfs_fletcher_avx512_t' type-id='type-id-259' id='type-id-262'/>
- <array-type-def dimensions='1' type-id='type-id-500' size-in-bits='2048' id='type-id-491'>
- <subrange length='4' type-id='type-id-33' id='type-id-217'/>
+ <array-type-def dimensions='1' type-id='type-id-262' size-in-bits='2048' id='type-id-249'>
+ <subrange length='4' type-id='type-id-24' id='type-id-252'/>
</array-type-def>
- <typedef-decl name='fletcher_4_ctx_t' type-id='type-id-487' id='type-id-501'/>
- <pointer-type-def type-id='type-id-501' size-in-bits='64' id='type-id-484'/>
- <pointer-type-def type-id='type-id-380' size-in-bits='64' id='type-id-485'/>
- <typedef-decl name='zio_abd_checksum_data_t' type-id='type-id-482' id='type-id-502'/>
- <pointer-type-def type-id='type-id-502' size-in-bits='64' id='type-id-503'/>
- <typedef-decl name='zio_abd_checksum_init_t' type-id='type-id-504' id='type-id-505'/>
- <pointer-type-def type-id='type-id-505' size-in-bits='64' id='type-id-479'/>
- <typedef-decl name='zio_abd_checksum_fini_t' type-id='type-id-504' id='type-id-506'/>
- <pointer-type-def type-id='type-id-506' size-in-bits='64' id='type-id-480'/>
- <typedef-decl name='zio_abd_checksum_iter_t' type-id='type-id-507' id='type-id-508'/>
- <pointer-type-def type-id='type-id-508' size-in-bits='64' id='type-id-481'/>
- <qualified-type-def type-id='type-id-478' const='yes' id='type-id-509'/>
- <typedef-decl name='zio_abd_checksum_func_t' type-id='type-id-509' id='type-id-510'/>
- <var-decl name='fletcher_4_abd_ops' type-id='type-id-510' mangled-name='fletcher_4_abd_ops' visibility='default' elf-symbol-id='fletcher_4_abd_ops'/>
- <function-decl name='fletcher_init' mangled-name='fletcher_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_init'>
- <parameter type-id='type-id-485' name='zcp'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <function-decl name='fletcher_2_incremental_native' mangled-name='fletcher_2_incremental_native' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_2_incremental_native'>
- <parameter type-id='type-id-7' name='buf'/>
- <parameter type-id='type-id-28' name='size'/>
- <parameter type-id='type-id-7' name='data'/>
- <return type-id='type-id-8'/>
+ <typedef-decl name='fletcher_4_ctx_t' type-id='type-id-244' id='type-id-263'/>
+ <pointer-type-def type-id='type-id-263' size-in-bits='64' id='type-id-241'/>
+ <pointer-type-def type-id='type-id-245' size-in-bits='64' id='type-id-242'/>
+ <typedef-decl name='zio_abd_checksum_data_t' type-id='type-id-239' id='type-id-264'/>
+ <pointer-type-def type-id='type-id-264' size-in-bits='64' id='type-id-265'/>
+ <typedef-decl name='zio_abd_checksum_init_t' type-id='type-id-266' id='type-id-267'/>
+ <pointer-type-def type-id='type-id-267' size-in-bits='64' id='type-id-236'/>
+ <typedef-decl name='zio_abd_checksum_fini_t' type-id='type-id-266' id='type-id-268'/>
+ <pointer-type-def type-id='type-id-268' size-in-bits='64' id='type-id-237'/>
+ <typedef-decl name='zio_abd_checksum_iter_t' type-id='type-id-269' id='type-id-270'/>
+ <pointer-type-def type-id='type-id-270' size-in-bits='64' id='type-id-238'/>
+ <qualified-type-def type-id='type-id-235' const='yes' id='type-id-271'/>
+ <typedef-decl name='zio_abd_checksum_func_t' type-id='type-id-271' id='type-id-272'/>
+ <var-decl name='fletcher_4_abd_ops' type-id='type-id-272' mangled-name='fletcher_4_abd_ops' visibility='default' elf-symbol-id='fletcher_4_abd_ops'/>
+ <function-decl name='fletcher_4_incremental_byteswap' mangled-name='fletcher_4_incremental_byteswap' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_incremental_byteswap'>
+ <parameter type-id='type-id-13' name='buf'/>
+ <parameter type-id='type-id-18' name='size'/>
+ <parameter type-id='type-id-13' name='data'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='fletcher_2_native' mangled-name='fletcher_2_native' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_2_native'>
- <parameter type-id='type-id-7' name='buf'/>
- <parameter type-id='type-id-22' name='size'/>
- <parameter type-id='type-id-7' name='ctx_template'/>
- <parameter type-id='type-id-485' name='zcp'/>
- <return type-id='type-id-6'/>
+ <function-decl name='fletcher_4_native_varsize' mangled-name='fletcher_4_native_varsize' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_native_varsize'>
+ <parameter type-id='type-id-13' name='buf'/>
+ <parameter type-id='type-id-7' name='size'/>
+ <parameter type-id='type-id-242' name='zcp'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='fletcher_2_incremental_byteswap' mangled-name='fletcher_2_incremental_byteswap' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_2_incremental_byteswap'>
- <parameter type-id='type-id-7' name='buf'/>
- <parameter type-id='type-id-28' name='size'/>
- <parameter type-id='type-id-7' name='data'/>
- <return type-id='type-id-8'/>
+ <function-decl name='fletcher_4_impl_set' mangled-name='fletcher_4_impl_set' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_impl_set'>
+ <parameter type-id='type-id-84' name='val'/>
+ <return type-id='type-id-2'/>
</function-decl>
<function-decl name='fletcher_2_byteswap' mangled-name='fletcher_2_byteswap' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_2_byteswap'>
- <parameter type-id='type-id-7' name='buf'/>
- <parameter type-id='type-id-22' name='size'/>
- <parameter type-id='type-id-7' name='ctx_template'/>
- <parameter type-id='type-id-485' name='zcp'/>
- <return type-id='type-id-6'/>
+ <parameter type-id='type-id-13' name='buf'/>
+ <parameter type-id='type-id-7' name='size'/>
+ <parameter type-id='type-id-13' name='ctx_template'/>
+ <parameter type-id='type-id-242' name='zcp'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='fletcher_4_impl_set' mangled-name='fletcher_4_impl_set' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_impl_set'>
- <parameter type-id='type-id-84' name='val'/>
- <return type-id='type-id-8'/>
+ <function-decl name='fletcher_2_incremental_byteswap' mangled-name='fletcher_2_incremental_byteswap' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_2_incremental_byteswap'>
+ <parameter type-id='type-id-13' name='buf'/>
+ <parameter type-id='type-id-18' name='size'/>
+ <parameter type-id='type-id-13' name='data'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <qualified-type-def type-id='type-id-5' volatile='yes' id='type-id-511'/>
- <pointer-type-def type-id='type-id-511' size-in-bits='64' id='type-id-512'/>
- <function-decl name='atomic_swap_32' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-512'/>
- <parameter type-id='type-id-5'/>
- <return type-id='type-id-5'/>
+ <function-decl name='fletcher_2_native' mangled-name='fletcher_2_native' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_2_native'>
+ <parameter type-id='type-id-13' name='buf'/>
+ <parameter type-id='type-id-7' name='size'/>
+ <parameter type-id='type-id-13' name='ctx_template'/>
+ <parameter type-id='type-id-242' name='zcp'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='membar_producer' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-6'/>
+ <function-decl name='fletcher_2_incremental_native' mangled-name='fletcher_2_incremental_native' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_2_incremental_native'>
+ <parameter type-id='type-id-13' name='buf'/>
+ <parameter type-id='type-id-18' name='size'/>
+ <parameter type-id='type-id-13' name='data'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='fletcher_4_native' mangled-name='fletcher_4_native' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_native'>
- <parameter type-id='type-id-7' name='buf'/>
- <parameter type-id='type-id-22' name='size'/>
- <parameter type-id='type-id-7' name='ctx_template'/>
- <parameter type-id='type-id-485' name='zcp'/>
- <return type-id='type-id-6'/>
+ <function-decl name='fletcher_init' mangled-name='fletcher_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_init'>
+ <parameter type-id='type-id-242' name='zcp'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='fletcher_4_native_varsize' mangled-name='fletcher_4_native_varsize' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_native_varsize'>
- <parameter type-id='type-id-7' name='buf'/>
- <parameter type-id='type-id-22' name='size'/>
- <parameter type-id='type-id-485' name='zcp'/>
- <return type-id='type-id-6'/>
+ <function-decl name='fletcher_4_native' mangled-name='fletcher_4_native' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_native'>
+ <parameter type-id='type-id-13' name='buf'/>
+ <parameter type-id='type-id-7' name='size'/>
+ <parameter type-id='type-id-13' name='ctx_template'/>
+ <parameter type-id='type-id-242' name='zcp'/>
+ <return type-id='type-id-1'/>
</function-decl>
<function-decl name='fletcher_4_byteswap' mangled-name='fletcher_4_byteswap' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_byteswap'>
- <parameter type-id='type-id-7' name='buf'/>
- <parameter type-id='type-id-22' name='size'/>
- <parameter type-id='type-id-7' name='ctx_template'/>
- <parameter type-id='type-id-485' name='zcp'/>
- <return type-id='type-id-6'/>
+ <parameter type-id='type-id-13' name='buf'/>
+ <parameter type-id='type-id-7' name='size'/>
+ <parameter type-id='type-id-13' name='ctx_template'/>
+ <parameter type-id='type-id-242' name='zcp'/>
+ <return type-id='type-id-1'/>
</function-decl>
<function-decl name='fletcher_4_incremental_native' mangled-name='fletcher_4_incremental_native' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_incremental_native'>
- <parameter type-id='type-id-7' name='buf'/>
- <parameter type-id='type-id-28' name='size'/>
- <parameter type-id='type-id-7' name='data'/>
- <return type-id='type-id-8'/>
+ <parameter type-id='type-id-13' name='buf'/>
+ <parameter type-id='type-id-18' name='size'/>
+ <parameter type-id='type-id-13' name='data'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='fletcher_4_incremental_byteswap' mangled-name='fletcher_4_incremental_byteswap' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_incremental_byteswap'>
- <parameter type-id='type-id-7' name='buf'/>
- <parameter type-id='type-id-28' name='size'/>
- <parameter type-id='type-id-7' name='data'/>
- <return type-id='type-id-8'/>
+ <function-decl name='membar_producer' mangled-name='membar_producer' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-type size-in-bits='64' id='type-id-507'>
- <parameter type-id='type-id-7'/>
- <parameter type-id='type-id-28'/>
- <parameter type-id='type-id-7'/>
- <return type-id='type-id-8'/>
+ <function-decl name='atomic_swap_32' mangled-name='atomic_swap_32' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-type size-in-bits='64' id='type-id-269'>
+ <parameter type-id='type-id-13'/>
+ <parameter type-id='type-id-18'/>
+ <parameter type-id='type-id-13'/>
+ <return type-id='type-id-2'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-504'>
- <parameter type-id='type-id-503'/>
- <return type-id='type-id-6'/>
+ <function-type size-in-bits='64' id='type-id-266'>
+ <parameter type-id='type-id-265'/>
+ <return type-id='type-id-1'/>
</function-type>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfs_fletcher_avx512.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libzfs' language='LANG_C99'>
- <class-decl name='fletcher_4_func' size-in-bits='512' is-struct='yes' visibility='default' id='type-id-513'>
+ <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfs_fletcher_avx512.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
+ <class-decl name='fletcher_4_func' size-in-bits='512' is-struct='yes' visibility='default' id='type-id-273'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='init_native' type-id='type-id-514' visibility='default'/>
+ <var-decl name='init_native' type-id='type-id-274' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='fini_native' type-id='type-id-515' visibility='default'/>
+ <var-decl name='fini_native' type-id='type-id-275' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='compute_native' type-id='type-id-516' visibility='default'/>
+ <var-decl name='compute_native' type-id='type-id-276' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='init_byteswap' type-id='type-id-514' visibility='default'/>
+ <var-decl name='init_byteswap' type-id='type-id-274' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='fini_byteswap' type-id='type-id-515' visibility='default'/>
+ <var-decl name='fini_byteswap' type-id='type-id-275' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='compute_byteswap' type-id='type-id-516' visibility='default'/>
+ <var-decl name='compute_byteswap' type-id='type-id-276' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='valid' type-id='type-id-517' visibility='default'/>
+ <var-decl name='valid' type-id='type-id-277' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='name' type-id='type-id-84' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-518' size-in-bits='64' id='type-id-519'/>
- <typedef-decl name='fletcher_4_init_f' type-id='type-id-519' id='type-id-514'/>
- <pointer-type-def type-id='type-id-520' size-in-bits='64' id='type-id-521'/>
- <typedef-decl name='fletcher_4_fini_f' type-id='type-id-521' id='type-id-515'/>
- <pointer-type-def type-id='type-id-522' size-in-bits='64' id='type-id-523'/>
- <typedef-decl name='fletcher_4_compute_f' type-id='type-id-523' id='type-id-516'/>
- <pointer-type-def type-id='type-id-524' size-in-bits='64' id='type-id-517'/>
- <typedef-decl name='fletcher_4_ops_t' type-id='type-id-513' id='type-id-525'/>
- <qualified-type-def type-id='type-id-525' const='yes' id='type-id-526'/>
- <var-decl name='fletcher_4_avx512f_ops' type-id='type-id-526' mangled-name='fletcher_4_avx512f_ops' visibility='default' elf-symbol-id='fletcher_4_avx512f_ops'/>
- <var-decl name='fletcher_4_avx512bw_ops' type-id='type-id-526' mangled-name='fletcher_4_avx512bw_ops' visibility='default' elf-symbol-id='fletcher_4_avx512bw_ops'/>
- <function-type size-in-bits='64' id='type-id-524'>
- <return type-id='type-id-16'/>
+ <pointer-type-def type-id='type-id-278' size-in-bits='64' id='type-id-279'/>
+ <typedef-decl name='fletcher_4_init_f' type-id='type-id-279' id='type-id-274'/>
+ <pointer-type-def type-id='type-id-280' size-in-bits='64' id='type-id-281'/>
+ <typedef-decl name='fletcher_4_fini_f' type-id='type-id-281' id='type-id-275'/>
+ <pointer-type-def type-id='type-id-282' size-in-bits='64' id='type-id-283'/>
+ <typedef-decl name='fletcher_4_compute_f' type-id='type-id-283' id='type-id-276'/>
+ <pointer-type-def type-id='type-id-284' size-in-bits='64' id='type-id-277'/>
+ <typedef-decl name='fletcher_4_ops_t' type-id='type-id-273' id='type-id-285'/>
+ <qualified-type-def type-id='type-id-285' const='yes' id='type-id-286'/>
+ <var-decl name='fletcher_4_avx512f_ops' type-id='type-id-286' mangled-name='fletcher_4_avx512f_ops' visibility='default' elf-symbol-id='fletcher_4_avx512f_ops'/>
+ <var-decl name='fletcher_4_avx512bw_ops' type-id='type-id-286' mangled-name='fletcher_4_avx512bw_ops' visibility='default' elf-symbol-id='fletcher_4_avx512bw_ops'/>
+ <function-type size-in-bits='64' id='type-id-284'>
+ <return type-id='type-id-9'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-518'>
- <parameter type-id='type-id-484'/>
- <return type-id='type-id-6'/>
+ <function-type size-in-bits='64' id='type-id-278'>
+ <parameter type-id='type-id-241'/>
+ <return type-id='type-id-1'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-522'>
- <parameter type-id='type-id-484'/>
+ <function-type size-in-bits='64' id='type-id-282'>
+ <parameter type-id='type-id-241'/>
+ <parameter type-id='type-id-13'/>
<parameter type-id='type-id-7'/>
- <parameter type-id='type-id-22'/>
- <return type-id='type-id-6'/>
+ <return type-id='type-id-1'/>
</function-type>
- <function-type size-in-bits='64' id='type-id-520'>
- <parameter type-id='type-id-484'/>
- <parameter type-id='type-id-485'/>
- <return type-id='type-id-6'/>
+ <function-type size-in-bits='64' id='type-id-280'>
+ <parameter type-id='type-id-241'/>
+ <parameter type-id='type-id-242'/>
+ <return type-id='type-id-1'/>
</function-type>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfs_fletcher_intel.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libzfs' language='LANG_C99'>
- <var-decl name='fletcher_4_avx2_ops' type-id='type-id-526' mangled-name='fletcher_4_avx2_ops' visibility='default' elf-symbol-id='fletcher_4_avx2_ops'/>
+ <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfs_fletcher_intel.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
+ <var-decl name='fletcher_4_avx2_ops' type-id='type-id-286' mangled-name='fletcher_4_avx2_ops' visibility='default' elf-symbol-id='fletcher_4_avx2_ops'/>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfs_fletcher_sse.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libzfs' language='LANG_C99'>
- <var-decl name='fletcher_4_sse2_ops' type-id='type-id-526' mangled-name='fletcher_4_sse2_ops' visibility='default' elf-symbol-id='fletcher_4_sse2_ops'/>
- <var-decl name='fletcher_4_ssse3_ops' type-id='type-id-526' mangled-name='fletcher_4_ssse3_ops' visibility='default' elf-symbol-id='fletcher_4_ssse3_ops'/>
+ <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfs_fletcher_sse.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
+ <var-decl name='fletcher_4_sse2_ops' type-id='type-id-286' mangled-name='fletcher_4_sse2_ops' visibility='default' elf-symbol-id='fletcher_4_sse2_ops'/>
+ <var-decl name='fletcher_4_ssse3_ops' type-id='type-id-286' mangled-name='fletcher_4_ssse3_ops' visibility='default' elf-symbol-id='fletcher_4_ssse3_ops'/>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfs_fletcher_superscalar.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libzfs' language='LANG_C99'>
- <var-decl name='fletcher_4_superscalar_ops' type-id='type-id-526' mangled-name='fletcher_4_superscalar_ops' visibility='default' elf-symbol-id='fletcher_4_superscalar_ops'/>
+ <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfs_fletcher_superscalar.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
+ <var-decl name='fletcher_4_superscalar_ops' type-id='type-id-286' mangled-name='fletcher_4_superscalar_ops' visibility='default' elf-symbol-id='fletcher_4_superscalar_ops'/>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfs_fletcher_superscalar4.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libzfs' language='LANG_C99'>
- <var-decl name='fletcher_4_superscalar4_ops' type-id='type-id-526' mangled-name='fletcher_4_superscalar4_ops' visibility='default' elf-symbol-id='fletcher_4_superscalar4_ops'/>
+ <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfs_fletcher_superscalar4.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
+ <var-decl name='fletcher_4_superscalar4_ops' type-id='type-id-286' mangled-name='fletcher_4_superscalar4_ops' visibility='default' elf-symbol-id='fletcher_4_superscalar4_ops'/>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfs_namecheck.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libzfs' language='LANG_C99'>
- <var-decl name='zfs_max_dataset_nesting' type-id='type-id-8' mangled-name='zfs_max_dataset_nesting' visibility='default' elf-symbol-id='zfs_max_dataset_nesting'/>
- <function-decl name='get_dataset_depth' mangled-name='get_dataset_depth' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='get_dataset_depth'>
- <parameter type-id='type-id-84' name='path'/>
- <return type-id='type-id-8'/>
+ <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfs_namecheck.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
+ <var-decl name='zfs_max_dataset_nesting' type-id='type-id-2' mangled-name='zfs_max_dataset_nesting' visibility='default' elf-symbol-id='zfs_max_dataset_nesting'/>
+ <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-287'>
+ <underlying-type type-id='type-id-41'/>
+ <enumerator name='NAME_ERR_LEADING_SLASH' value='0'/>
+ <enumerator name='NAME_ERR_EMPTY_COMPONENT' value='1'/>
+ <enumerator name='NAME_ERR_TRAILING_SLASH' value='2'/>
+ <enumerator name='NAME_ERR_INVALCHAR' value='3'/>
+ <enumerator name='NAME_ERR_MULTIPLE_DELIMITERS' value='4'/>
+ <enumerator name='NAME_ERR_NOLETTER' value='5'/>
+ <enumerator name='NAME_ERR_RESERVED' value='6'/>
+ <enumerator name='NAME_ERR_DISKLIKE' value='7'/>
+ <enumerator name='NAME_ERR_TOOLONG' value='8'/>
+ <enumerator name='NAME_ERR_SELF_REF' value='9'/>
+ <enumerator name='NAME_ERR_PARENT_REF' value='10'/>
+ <enumerator name='NAME_ERR_NO_AT' value='11'/>
+ <enumerator name='NAME_ERR_NO_POUND' value='12'/>
+ </enum-decl>
+ <typedef-decl name='namecheck_err_t' type-id='type-id-287' id='type-id-288'/>
+ <pointer-type-def type-id='type-id-288' size-in-bits='64' id='type-id-289'/>
+ <function-decl name='pool_namecheck' mangled-name='pool_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='pool_namecheck'>
+ <parameter type-id='type-id-84' name='pool'/>
+ <parameter type-id='type-id-289' name='why'/>
+ <parameter type-id='type-id-14' name='what'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <typedef-decl name='namecheck_err_t' type-id='type-id-212' id='type-id-527'/>
- <pointer-type-def type-id='type-id-527' size-in-bits='64' id='type-id-528'/>
- <function-decl name='zfs_component_namecheck' mangled-name='zfs_component_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_component_namecheck'>
+ <function-decl name='mountpoint_namecheck' mangled-name='mountpoint_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='mountpoint_namecheck'>
<parameter type-id='type-id-84' name='path'/>
- <parameter type-id='type-id-528' name='why'/>
- <parameter type-id='type-id-17' name='what'/>
- <return type-id='type-id-8'/>
+ <parameter type-id='type-id-289' name='why'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='permset_namecheck' mangled-name='permset_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='permset_namecheck'>
- <parameter type-id='type-id-84' name='path'/>
- <parameter type-id='type-id-528' name='why'/>
- <parameter type-id='type-id-17' name='what'/>
- <return type-id='type-id-8'/>
+ <function-decl name='snapshot_namecheck' mangled-name='snapshot_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='snapshot_namecheck'>
+ <parameter type-id='type-id-84' name='pool'/>
+ <parameter type-id='type-id-289' name='why'/>
+ <parameter type-id='type-id-14' name='what'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='entity_namecheck' mangled-name='entity_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='entity_namecheck'>
- <parameter type-id='type-id-84' name='path'/>
- <parameter type-id='type-id-528' name='why'/>
- <parameter type-id='type-id-17' name='what'/>
- <return type-id='type-id-8'/>
+ <function-decl name='bookmark_namecheck' mangled-name='bookmark_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='bookmark_namecheck'>
+ <parameter type-id='type-id-84' name='pool'/>
+ <parameter type-id='type-id-289' name='why'/>
+ <parameter type-id='type-id-14' name='what'/>
+ <return type-id='type-id-2'/>
</function-decl>
<function-decl name='dataset_namecheck' mangled-name='dataset_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='dataset_namecheck'>
+ <parameter type-id='type-id-84' name='pool'/>
+ <parameter type-id='type-id-289' name='why'/>
+ <parameter type-id='type-id-14' name='what'/>
+ <return type-id='type-id-2'/>
+ </function-decl>
+ <function-decl name='dataset_nestcheck' mangled-name='dataset_nestcheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='dataset_nestcheck'>
<parameter type-id='type-id-84' name='path'/>
- <parameter type-id='type-id-528' name='why'/>
- <parameter type-id='type-id-17' name='what'/>
- <return type-id='type-id-8'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='bookmark_namecheck' mangled-name='bookmark_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='bookmark_namecheck'>
+ <function-decl name='permset_namecheck' mangled-name='permset_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='permset_namecheck'>
<parameter type-id='type-id-84' name='path'/>
- <parameter type-id='type-id-528' name='why'/>
- <parameter type-id='type-id-17' name='what'/>
- <return type-id='type-id-8'/>
+ <parameter type-id='type-id-289' name='why'/>
+ <parameter type-id='type-id-14' name='what'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='snapshot_namecheck' mangled-name='snapshot_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='snapshot_namecheck'>
+ <function-decl name='get_dataset_depth' mangled-name='get_dataset_depth' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='get_dataset_depth'>
<parameter type-id='type-id-84' name='path'/>
- <parameter type-id='type-id-528' name='why'/>
- <parameter type-id='type-id-17' name='what'/>
- <return type-id='type-id-8'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='mountpoint_namecheck' mangled-name='mountpoint_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='mountpoint_namecheck'>
+ <function-decl name='zfs_component_namecheck' mangled-name='zfs_component_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_component_namecheck'>
<parameter type-id='type-id-84' name='path'/>
- <parameter type-id='type-id-528' name='why'/>
- <return type-id='type-id-8'/>
+ <parameter type-id='type-id-289' name='why'/>
+ <parameter type-id='type-id-14' name='what'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='pool_namecheck' mangled-name='pool_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='pool_namecheck'>
+ <function-decl name='entity_namecheck' mangled-name='entity_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='entity_namecheck'>
<parameter type-id='type-id-84' name='path'/>
- <parameter type-id='type-id-528' name='why'/>
- <parameter type-id='type-id-17' name='what'/>
- <return type-id='type-id-8'/>
+ <parameter type-id='type-id-289' name='why'/>
+ <parameter type-id='type-id-14' name='what'/>
+ <return type-id='type-id-2'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfs_prop.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libzfs' language='LANG_C99'>
+ <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfs_prop.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
- <array-type-def dimensions='1' type-id='type-id-84' size-in-bits='768' id='type-id-529'>
- <subrange length='12' type-id='type-id-33' id='type-id-391'/>
+ <array-type-def dimensions='1' type-id='type-id-84' size-in-bits='768' id='type-id-290'>
+ <subrange length='12' type-id='type-id-24' id='type-id-291'/>
</array-type-def>
- <var-decl name='zfs_userquota_prop_prefixes' type-id='type-id-529' mangled-name='zfs_userquota_prop_prefixes' visibility='default' elf-symbol-id='zfs_userquota_prop_prefixes'/>
- <function-decl name='zfs_prop_written' mangled-name='zfs_prop_written' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_written'>
- <parameter type-id='type-id-84' name='name'/>
- <return type-id='type-id-16'/>
+ <var-decl name='zfs_userquota_prop_prefixes' type-id='type-id-290' mangled-name='zfs_userquota_prop_prefixes' visibility='default' elf-symbol-id='zfs_userquota_prop_prefixes'/>
+ <function-decl name='zfs_prop_align_right' mangled-name='zfs_prop_align_right' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_align_right'>
+ <parameter type-id='type-id-110' name='prop'/>
+ <return type-id='type-id-9'/>
</function-decl>
- <typedef-decl name='zprop_desc_t' type-id='type-id-408' id='type-id-407'/>
- <pointer-type-def type-id='type-id-407' size-in-bits='64' id='type-id-530'/>
- <function-decl name='zfs_prop_get_table' mangled-name='zfs_prop_get_table' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_table'>
- <return type-id='type-id-530'/>
+ <function-decl name='zfs_prop_column_name' mangled-name='zfs_prop_column_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_column_name'>
+ <parameter type-id='type-id-110' name='prop'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <qualified-type-def type-id='type-id-413' const='yes' id='type-id-531'/>
- <pointer-type-def type-id='type-id-531' size-in-bits='64' id='type-id-532'/>
- <function-decl name='zprop_register_index' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-412'/>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-532'/>
- <return type-id='type-id-6'/>
+ <function-decl name='zfs_prop_is_string' mangled-name='zfs_prop_is_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_is_string'>
+ <parameter type-id='type-id-110' name='prop'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zprop_register_string' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-412'/>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-6'/>
+ <function-decl name='zfs_prop_values' mangled-name='zfs_prop_values' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_values'>
+ <parameter type-id='type-id-110' name='prop'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='zprop_register_number' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-412'/>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-6'/>
+ <function-decl name='zfs_prop_valid_keylocation' mangled-name='zfs_prop_valid_keylocation' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_valid_keylocation'>
+ <parameter type-id='type-id-84' name='str'/>
+ <parameter type-id='type-id-9' name='encrypted'/>
+ <return type-id='type-id-9'/>
</function-decl>
- <function-decl name='zprop_register_hidden' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-234'/>
- <parameter type-id='type-id-412'/>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-6'/>
+ <function-decl name='zfs_prop_encryption_key_param' mangled-name='zfs_prop_encryption_key_param' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_encryption_key_param'>
+ <parameter type-id='type-id-110' name='prop'/>
+ <return type-id='type-id-9'/>
</function-decl>
- <function-decl name='zprop_register_impl' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-234'/>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-412'/>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-50'/>
- <parameter type-id='type-id-50'/>
- <parameter type-id='type-id-532'/>
- <return type-id='type-id-6'/>
+ <function-decl name='zfs_prop_inheritable' mangled-name='zfs_prop_inheritable' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_inheritable'>
+ <parameter type-id='type-id-110' name='prop'/>
+ <return type-id='type-id-9'/>
</function-decl>
- <function-decl name='zfs_prop_delegatable' mangled-name='zfs_prop_delegatable' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_delegatable'>
- <parameter type-id='type-id-229' name='prop'/>
- <return type-id='type-id-16'/>
+ <function-decl name='zfs_prop_to_name' mangled-name='zfs_prop_to_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_to_name'>
+ <parameter type-id='type-id-110' name='prop'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='zfs_name_to_prop' mangled-name='zfs_name_to_prop' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_name_to_prop'>
- <parameter type-id='type-id-84' name='propname'/>
- <return type-id='type-id-229'/>
+ <function-decl name='zfs_prop_default_numeric' mangled-name='zfs_prop_default_numeric' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_default_numeric'>
+ <parameter type-id='type-id-110' name='prop'/>
+ <return type-id='type-id-7'/>
</function-decl>
- <function-decl name='zfs_prop_user' mangled-name='zfs_prop_user' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_user'>
- <parameter type-id='type-id-84' name='name'/>
- <return type-id='type-id-16'/>
+ <function-decl name='zfs_prop_default_string' mangled-name='zfs_prop_default_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_default_string'>
+ <parameter type-id='type-id-110' name='prop'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='zfs_prop_userquota' mangled-name='zfs_prop_userquota' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_userquota'>
- <parameter type-id='type-id-84' name='name'/>
- <return type-id='type-id-16'/>
+ <function-decl name='zfs_prop_setonce' mangled-name='zfs_prop_setonce' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_setonce'>
+ <parameter type-id='type-id-110' name='prop'/>
+ <return type-id='type-id-9'/>
+ </function-decl>
+ <function-decl name='zfs_prop_visible' mangled-name='zfs_prop_visible' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_visible'>
+ <parameter type-id='type-id-110' name='prop'/>
+ <return type-id='type-id-9'/>
+ </function-decl>
+ <function-decl name='zfs_prop_readonly' mangled-name='zfs_prop_readonly' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_readonly'>
+ <parameter type-id='type-id-110' name='prop'/>
+ <return type-id='type-id-9'/>
+ </function-decl>
+ <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-292'>
+ <underlying-type type-id='type-id-41'/>
+ <enumerator name='PROP_TYPE_NUMBER' value='0'/>
+ <enumerator name='PROP_TYPE_STRING' value='1'/>
+ <enumerator name='PROP_TYPE_INDEX' value='2'/>
+ </enum-decl>
+ <typedef-decl name='zprop_type_t' type-id='type-id-292' id='type-id-293'/>
+ <function-decl name='zfs_prop_get_type' mangled-name='zfs_prop_get_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_type'>
+ <parameter type-id='type-id-110' name='prop'/>
+ <return type-id='type-id-293'/>
+ </function-decl>
+ <function-decl name='zfs_prop_valid_for_type' mangled-name='zfs_prop_valid_for_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_valid_for_type'>
+ <parameter type-id='type-id-2' name='prop'/>
+ <parameter type-id='type-id-66' name='types'/>
+ <parameter type-id='type-id-9' name='headcheck'/>
+ <return type-id='type-id-9'/>
+ </function-decl>
+ <function-decl name='zfs_prop_random_value' mangled-name='zfs_prop_random_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_random_value'>
+ <parameter type-id='type-id-110' name='prop'/>
+ <parameter type-id='type-id-7' name='seed'/>
+ <return type-id='type-id-7'/>
+ </function-decl>
+ <pointer-type-def type-id='type-id-84' size-in-bits='64' id='type-id-294'/>
+ <function-decl name='zfs_prop_index_to_string' mangled-name='zfs_prop_index_to_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_index_to_string'>
+ <parameter type-id='type-id-110' name='prop'/>
+ <parameter type-id='type-id-7' name='index'/>
+ <parameter type-id='type-id-294' name='string'/>
+ <return type-id='type-id-2'/>
</function-decl>
<function-decl name='zfs_prop_string_to_index' mangled-name='zfs_prop_string_to_index' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_string_to_index'>
- <parameter type-id='type-id-229' name='prop'/>
+ <parameter type-id='type-id-110' name='prop'/>
<parameter type-id='type-id-84' name='string'/>
- <parameter type-id='type-id-248' name='index'/>
- <return type-id='type-id-8'/>
+ <parameter type-id='type-id-108' name='index'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zfs_prop_index_to_string' mangled-name='zfs_prop_index_to_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_index_to_string'>
- <parameter type-id='type-id-229' name='prop'/>
- <parameter type-id='type-id-22' name='index'/>
- <parameter type-id='type-id-241' name='string'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='zprop_index_to_string' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-241'/>
- <parameter type-id='type-id-76'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_prop_written' mangled-name='zfs_prop_written' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_written'>
+ <parameter type-id='type-id-84' name='name'/>
+ <return type-id='type-id-9'/>
</function-decl>
- <function-decl name='zfs_prop_random_value' mangled-name='zfs_prop_random_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_random_value'>
- <parameter type-id='type-id-229' name='prop'/>
- <parameter type-id='type-id-22' name='seed'/>
- <return type-id='type-id-22'/>
+ <function-decl name='zfs_prop_userquota' mangled-name='zfs_prop_userquota' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_userquota'>
+ <parameter type-id='type-id-84' name='name'/>
+ <return type-id='type-id-9'/>
</function-decl>
- <function-decl name='zprop_random_value' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-76'/>
- <return type-id='type-id-35'/>
+ <function-decl name='zfs_prop_user' mangled-name='zfs_prop_user' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_user'>
+ <parameter type-id='type-id-84' name='name'/>
+ <return type-id='type-id-9'/>
</function-decl>
- <function-decl name='zfs_prop_valid_for_type' mangled-name='zfs_prop_valid_for_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_valid_for_type'>
- <parameter type-id='type-id-8' name='prop'/>
- <parameter type-id='type-id-13' name='types'/>
- <parameter type-id='type-id-16' name='headcheck'/>
- <return type-id='type-id-16'/>
+ <function-decl name='zfs_name_to_prop' mangled-name='zfs_name_to_prop' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_name_to_prop'>
+ <parameter type-id='type-id-84' name='propname'/>
+ <return type-id='type-id-110'/>
</function-decl>
- <function-decl name='zfs_prop_get_type' mangled-name='zfs_prop_get_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_type'>
- <parameter type-id='type-id-229' name='prop'/>
- <return type-id='type-id-409'/>
+ <function-decl name='zfs_prop_delegatable' mangled-name='zfs_prop_delegatable' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_delegatable'>
+ <parameter type-id='type-id-110' name='prop'/>
+ <return type-id='type-id-9'/>
</function-decl>
- <function-decl name='zfs_prop_readonly' mangled-name='zfs_prop_readonly' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_readonly'>
- <parameter type-id='type-id-229' name='prop'/>
- <return type-id='type-id-16'/>
+ <class-decl name='__anonymous_struct__' size-in-bits='704' is-struct='yes' is-anonymous='yes' naming-typedef-id='type-id-295' visibility='default' id='type-id-296'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='pd_name' type-id='type-id-84' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='pd_propnum' type-id='type-id-2' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='96'>
+ <var-decl name='pd_proptype' type-id='type-id-293' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='pd_strdefault' type-id='type-id-84' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='pd_numdefault' type-id='type-id-7' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='pd_attr' type-id='type-id-297' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='288'>
+ <var-decl name='pd_types' type-id='type-id-2' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='320'>
+ <var-decl name='pd_values' type-id='type-id-84' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='384'>
+ <var-decl name='pd_colname' type-id='type-id-84' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='448'>
+ <var-decl name='pd_rightalign' type-id='type-id-9' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='480'>
+ <var-decl name='pd_visible' type-id='type-id-9' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='512'>
+ <var-decl name='pd_zfs_mod_supported' type-id='type-id-9' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='576'>
+ <var-decl name='pd_table' type-id='type-id-298' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='640'>
+ <var-decl name='pd_table_size' type-id='type-id-18' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-299'>
+ <underlying-type type-id='type-id-41'/>
+ <enumerator name='PROP_DEFAULT' value='0'/>
+ <enumerator name='PROP_READONLY' value='1'/>
+ <enumerator name='PROP_INHERIT' value='2'/>
+ <enumerator name='PROP_ONETIME' value='3'/>
+ <enumerator name='PROP_ONETIME_DEFAULT' value='4'/>
+ </enum-decl>
+ <typedef-decl name='zprop_attr_t' type-id='type-id-299' id='type-id-297'/>
+ <class-decl name='zfs_index' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-300'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='pi_name' type-id='type-id-84' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='pi_value' type-id='type-id-7' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='zprop_index_t' type-id='type-id-300' id='type-id-301'/>
+ <qualified-type-def type-id='type-id-301' const='yes' id='type-id-302'/>
+ <pointer-type-def type-id='type-id-302' size-in-bits='64' id='type-id-298'/>
+ <typedef-decl name='zprop_desc_t' type-id='type-id-296' id='type-id-295'/>
+ <pointer-type-def type-id='type-id-295' size-in-bits='64' id='type-id-303'/>
+ <function-decl name='zfs_prop_get_table' mangled-name='zfs_prop_get_table' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_table'>
+ <return type-id='type-id-303'/>
</function-decl>
- <function-decl name='zfs_prop_visible' mangled-name='zfs_prop_visible' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_visible'>
- <parameter type-id='type-id-229' name='prop'/>
- <return type-id='type-id-16'/>
+ <function-decl name='zprop_random_value' mangled-name='zprop_random_value' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_prop_setonce' mangled-name='zfs_prop_setonce' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_setonce'>
- <parameter type-id='type-id-229' name='prop'/>
- <return type-id='type-id-16'/>
+ <function-decl name='zprop_index_to_string' mangled-name='zprop_index_to_string' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_prop_default_string' mangled-name='zfs_prop_default_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_default_string'>
- <parameter type-id='type-id-229' name='prop'/>
- <return type-id='type-id-84'/>
+ <function-decl name='zprop_register_index' mangled-name='zprop_register_index' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_prop_default_numeric' mangled-name='zfs_prop_default_numeric' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_default_numeric'>
- <parameter type-id='type-id-229' name='prop'/>
- <return type-id='type-id-22'/>
+ <function-decl name='zprop_register_string' mangled-name='zprop_register_string' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_prop_to_name' mangled-name='zfs_prop_to_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_to_name'>
- <parameter type-id='type-id-229' name='prop'/>
- <return type-id='type-id-84'/>
+ <function-decl name='zprop_register_number' mangled-name='zprop_register_number' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_prop_inheritable' mangled-name='zfs_prop_inheritable' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_inheritable'>
- <parameter type-id='type-id-229' name='prop'/>
- <return type-id='type-id-16'/>
+ <function-decl name='zprop_register_hidden' mangled-name='zprop_register_hidden' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_prop_encryption_key_param' mangled-name='zfs_prop_encryption_key_param' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_encryption_key_param'>
- <parameter type-id='type-id-229' name='prop'/>
- <return type-id='type-id-16'/>
+ <function-decl name='zprop_register_impl' mangled-name='zprop_register_impl' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zfs_prop_valid_keylocation' mangled-name='zfs_prop_valid_keylocation' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_valid_keylocation'>
- <parameter type-id='type-id-84' name='str'/>
- <parameter type-id='type-id-16' name='encrypted'/>
- <return type-id='type-id-16'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zpool_prop.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
+ <function-decl name='zpool_prop_align_right' mangled-name='zpool_prop_align_right' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_align_right'>
+ <parameter type-id='type-id-160' name='prop'/>
+ <return type-id='type-id-9'/>
</function-decl>
- <function-decl name='zfs_prop_values' mangled-name='zfs_prop_values' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_values'>
- <parameter type-id='type-id-229' name='prop'/>
+ <function-decl name='zpool_prop_column_name' mangled-name='zpool_prop_column_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_column_name'>
+ <parameter type-id='type-id-160' name='prop'/>
<return type-id='type-id-84'/>
</function-decl>
- <function-decl name='zfs_prop_is_string' mangled-name='zfs_prop_is_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_is_string'>
- <parameter type-id='type-id-229' name='prop'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='zfs_prop_column_name' mangled-name='zfs_prop_column_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_column_name'>
- <parameter type-id='type-id-229' name='prop'/>
+ <function-decl name='zpool_prop_values' mangled-name='zpool_prop_values' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_values'>
+ <parameter type-id='type-id-160' name='prop'/>
<return type-id='type-id-84'/>
</function-decl>
- <function-decl name='zfs_prop_align_right' mangled-name='zfs_prop_align_right' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_align_right'>
- <parameter type-id='type-id-229' name='prop'/>
- <return type-id='type-id-16'/>
+ <function-decl name='zpool_prop_random_value' mangled-name='zpool_prop_random_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_random_value'>
+ <parameter type-id='type-id-160' name='prop'/>
+ <parameter type-id='type-id-7' name='seed'/>
+ <return type-id='type-id-7'/>
</function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zpool_prop.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libzfs' language='LANG_C99'>
- <function-decl name='zpool_prop_feature' mangled-name='zpool_prop_feature' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_feature'>
- <parameter type-id='type-id-84' name='name'/>
- <return type-id='type-id-16'/>
+ <function-decl name='zpool_prop_index_to_string' mangled-name='zpool_prop_index_to_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_index_to_string'>
+ <parameter type-id='type-id-160' name='prop'/>
+ <parameter type-id='type-id-7' name='index'/>
+ <parameter type-id='type-id-294' name='string'/>
+ <return type-id='type-id-2'/>
+ </function-decl>
+ <function-decl name='zpool_prop_string_to_index' mangled-name='zpool_prop_string_to_index' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_string_to_index'>
+ <parameter type-id='type-id-160' name='prop'/>
+ <parameter type-id='type-id-84' name='string'/>
+ <parameter type-id='type-id-108' name='index'/>
+ <return type-id='type-id-2'/>
</function-decl>
<function-decl name='zpool_prop_unsupported' mangled-name='zpool_prop_unsupported' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_unsupported'>
<parameter type-id='type-id-84' name='name'/>
- <return type-id='type-id-16'/>
+ <return type-id='type-id-9'/>
</function-decl>
- <function-decl name='zpool_prop_get_table' mangled-name='zpool_prop_get_table' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_get_table'>
- <return type-id='type-id-530'/>
+ <function-decl name='zpool_prop_feature' mangled-name='zpool_prop_feature' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_feature'>
+ <parameter type-id='type-id-84' name='name'/>
+ <return type-id='type-id-9'/>
</function-decl>
- <function-decl name='zpool_name_to_prop' mangled-name='zpool_name_to_prop' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_name_to_prop'>
- <parameter type-id='type-id-84' name='propname'/>
- <return type-id='type-id-320'/>
+ <function-decl name='zpool_prop_default_numeric' mangled-name='zpool_prop_default_numeric' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_default_numeric'>
+ <parameter type-id='type-id-160' name='prop'/>
+ <return type-id='type-id-7'/>
</function-decl>
- <function-decl name='zpool_prop_to_name' mangled-name='zpool_prop_to_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_to_name'>
- <parameter type-id='type-id-320' name='prop'/>
+ <function-decl name='zpool_prop_default_string' mangled-name='zpool_prop_default_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_default_string'>
+ <parameter type-id='type-id-160' name='prop'/>
<return type-id='type-id-84'/>
</function-decl>
- <function-decl name='zpool_prop_get_type' mangled-name='zpool_prop_get_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_get_type'>
- <parameter type-id='type-id-320' name='prop'/>
- <return type-id='type-id-409'/>
+ <function-decl name='zpool_prop_setonce' mangled-name='zpool_prop_setonce' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_setonce'>
+ <parameter type-id='type-id-160' name='prop'/>
+ <return type-id='type-id-9'/>
</function-decl>
<function-decl name='zpool_prop_readonly' mangled-name='zpool_prop_readonly' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_readonly'>
- <parameter type-id='type-id-320' name='prop'/>
- <return type-id='type-id-16'/>
+ <parameter type-id='type-id-160' name='prop'/>
+ <return type-id='type-id-9'/>
</function-decl>
- <function-decl name='zpool_prop_setonce' mangled-name='zpool_prop_setonce' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_setonce'>
- <parameter type-id='type-id-320' name='prop'/>
- <return type-id='type-id-16'/>
+ <function-decl name='zpool_prop_get_type' mangled-name='zpool_prop_get_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_get_type'>
+ <parameter type-id='type-id-160' name='prop'/>
+ <return type-id='type-id-293'/>
</function-decl>
- <function-decl name='zpool_prop_default_string' mangled-name='zpool_prop_default_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_default_string'>
- <parameter type-id='type-id-320' name='prop'/>
+ <function-decl name='zpool_prop_to_name' mangled-name='zpool_prop_to_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_to_name'>
+ <parameter type-id='type-id-160' name='prop'/>
<return type-id='type-id-84'/>
</function-decl>
- <function-decl name='zpool_prop_default_numeric' mangled-name='zpool_prop_default_numeric' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_default_numeric'>
- <parameter type-id='type-id-320' name='prop'/>
- <return type-id='type-id-22'/>
+ <function-decl name='zpool_name_to_prop' mangled-name='zpool_name_to_prop' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_name_to_prop'>
+ <parameter type-id='type-id-84' name='propname'/>
+ <return type-id='type-id-160'/>
</function-decl>
- <function-decl name='zpool_prop_string_to_index' mangled-name='zpool_prop_string_to_index' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_string_to_index'>
- <parameter type-id='type-id-320' name='prop'/>
- <parameter type-id='type-id-84' name='string'/>
- <parameter type-id='type-id-248' name='index'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zpool_prop_get_table' mangled-name='zpool_prop_get_table' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_get_table'>
+ <return type-id='type-id-303'/>
</function-decl>
- <function-decl name='zpool_prop_index_to_string' mangled-name='zpool_prop_index_to_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_index_to_string'>
- <parameter type-id='type-id-320' name='prop'/>
- <parameter type-id='type-id-22' name='index'/>
- <parameter type-id='type-id-241' name='string'/>
- <return type-id='type-id-8'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zprop_common.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
+ <function-decl name='zprop_width' mangled-name='zprop_width' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_width'>
+ <parameter type-id='type-id-2' name='prop'/>
+ <parameter type-id='type-id-85' name='fixed'/>
+ <parameter type-id='type-id-66' name='type'/>
+ <return type-id='type-id-18'/>
</function-decl>
- <function-decl name='zpool_prop_random_value' mangled-name='zpool_prop_random_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_random_value'>
- <parameter type-id='type-id-320' name='prop'/>
- <parameter type-id='type-id-22' name='seed'/>
- <return type-id='type-id-22'/>
+ <function-decl name='zprop_valid_for_type' mangled-name='zprop_valid_for_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_valid_for_type'>
+ <parameter type-id='type-id-2' name='prop'/>
+ <parameter type-id='type-id-66' name='type'/>
+ <parameter type-id='type-id-9' name='headcheck'/>
+ <return type-id='type-id-9'/>
</function-decl>
- <function-decl name='zpool_prop_values' mangled-name='zpool_prop_values' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_values'>
- <parameter type-id='type-id-320' name='prop'/>
+ <function-decl name='zprop_values' mangled-name='zprop_values' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_values'>
+ <parameter type-id='type-id-2' name='prop'/>
+ <parameter type-id='type-id-66' name='type'/>
<return type-id='type-id-84'/>
</function-decl>
- <function-decl name='zpool_prop_column_name' mangled-name='zpool_prop_column_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_column_name'>
- <parameter type-id='type-id-320' name='prop'/>
- <return type-id='type-id-84'/>
+ <function-decl name='zprop_random_value' mangled-name='zprop_random_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_random_value'>
+ <parameter type-id='type-id-2' name='prop'/>
+ <parameter type-id='type-id-7' name='seed'/>
+ <parameter type-id='type-id-66' name='type'/>
+ <return type-id='type-id-7'/>
</function-decl>
- <function-decl name='zpool_prop_align_right' mangled-name='zpool_prop_align_right' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_align_right'>
- <parameter type-id='type-id-320' name='prop'/>
- <return type-id='type-id-16'/>
+ <function-decl name='zprop_index_to_string' mangled-name='zprop_index_to_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_index_to_string'>
+ <parameter type-id='type-id-2' name='prop'/>
+ <parameter type-id='type-id-7' name='index'/>
+ <parameter type-id='type-id-294' name='string'/>
+ <parameter type-id='type-id-66' name='type'/>
+ <return type-id='type-id-2'/>
</function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zprop_common.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libzfs' language='LANG_C99'>
- <function-decl name='zprop_register_impl' mangled-name='zprop_register_impl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_register_impl'>
- <parameter type-id='type-id-8' name='prop'/>
+ <function-decl name='zprop_string_to_index' mangled-name='zprop_string_to_index' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_string_to_index'>
+ <parameter type-id='type-id-2' name='prop'/>
+ <parameter type-id='type-id-84' name='string'/>
+ <parameter type-id='type-id-108' name='index'/>
+ <parameter type-id='type-id-66' name='type'/>
+ <return type-id='type-id-2'/>
+ </function-decl>
+ <function-decl name='zprop_name_to_prop' mangled-name='zprop_name_to_prop' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_name_to_prop'>
+ <parameter type-id='type-id-84' name='propname'/>
+ <parameter type-id='type-id-66' name='type'/>
+ <return type-id='type-id-2'/>
+ </function-decl>
+ <function-decl name='zprop_iter_common' mangled-name='zprop_iter_common' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_iter_common'>
+ <parameter type-id='type-id-180' name='func'/>
+ <parameter type-id='type-id-13' name='cb'/>
+ <parameter type-id='type-id-9' name='show_all'/>
+ <parameter type-id='type-id-9' name='ordered'/>
+ <parameter type-id='type-id-66' name='type'/>
+ <return type-id='type-id-2'/>
+ </function-decl>
+ <function-decl name='zprop_register_hidden' mangled-name='zprop_register_hidden' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_register_hidden'>
+ <parameter type-id='type-id-2' name='prop'/>
<parameter type-id='type-id-84' name='name'/>
- <parameter type-id='type-id-409' name='type'/>
- <parameter type-id='type-id-22' name='numdefault'/>
- <parameter type-id='type-id-84' name='strdefault'/>
- <parameter type-id='type-id-410' name='attr'/>
- <parameter type-id='type-id-8' name='objset_types'/>
- <parameter type-id='type-id-84' name='values'/>
+ <parameter type-id='type-id-293' name='type'/>
+ <parameter type-id='type-id-297' name='attr'/>
+ <parameter type-id='type-id-2' name='objset_types'/>
<parameter type-id='type-id-84' name='colname'/>
- <parameter type-id='type-id-16' name='rightalign'/>
- <parameter type-id='type-id-16' name='visible'/>
- <parameter type-id='type-id-411' name='idx_tbl'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <function-decl name='zfs_mod_supported' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-50'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zprop_register_string' mangled-name='zprop_register_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_register_string'>
- <parameter type-id='type-id-8' name='prop'/>
+ <function-decl name='zprop_register_index' mangled-name='zprop_register_index' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_register_index'>
+ <parameter type-id='type-id-2' name='prop'/>
<parameter type-id='type-id-84' name='name'/>
- <parameter type-id='type-id-84' name='def'/>
- <parameter type-id='type-id-410' name='attr'/>
- <parameter type-id='type-id-8' name='objset_types'/>
+ <parameter type-id='type-id-7' name='def'/>
+ <parameter type-id='type-id-297' name='attr'/>
+ <parameter type-id='type-id-2' name='objset_types'/>
<parameter type-id='type-id-84' name='values'/>
<parameter type-id='type-id-84' name='colname'/>
- <return type-id='type-id-6'/>
+ <parameter type-id='type-id-298' name='idx_tbl'/>
+ <return type-id='type-id-1'/>
</function-decl>
<function-decl name='zprop_register_number' mangled-name='zprop_register_number' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_register_number'>
- <parameter type-id='type-id-8' name='prop'/>
+ <parameter type-id='type-id-2' name='prop'/>
<parameter type-id='type-id-84' name='name'/>
- <parameter type-id='type-id-22' name='def'/>
- <parameter type-id='type-id-410' name='attr'/>
- <parameter type-id='type-id-8' name='objset_types'/>
+ <parameter type-id='type-id-7' name='def'/>
+ <parameter type-id='type-id-297' name='attr'/>
+ <parameter type-id='type-id-2' name='objset_types'/>
<parameter type-id='type-id-84' name='values'/>
<parameter type-id='type-id-84' name='colname'/>
- <return type-id='type-id-6'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zprop_register_index' mangled-name='zprop_register_index' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_register_index'>
- <parameter type-id='type-id-8' name='prop'/>
+ <function-decl name='zprop_register_string' mangled-name='zprop_register_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_register_string'>
+ <parameter type-id='type-id-2' name='prop'/>
<parameter type-id='type-id-84' name='name'/>
- <parameter type-id='type-id-22' name='def'/>
- <parameter type-id='type-id-410' name='attr'/>
- <parameter type-id='type-id-8' name='objset_types'/>
+ <parameter type-id='type-id-84' name='def'/>
+ <parameter type-id='type-id-297' name='attr'/>
+ <parameter type-id='type-id-2' name='objset_types'/>
<parameter type-id='type-id-84' name='values'/>
<parameter type-id='type-id-84' name='colname'/>
- <parameter type-id='type-id-411' name='idx_tbl'/>
- <return type-id='type-id-6'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zprop_register_hidden' mangled-name='zprop_register_hidden' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_register_hidden'>
- <parameter type-id='type-id-8' name='prop'/>
+ <function-decl name='zprop_register_impl' mangled-name='zprop_register_impl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_register_impl'>
+ <parameter type-id='type-id-2' name='prop'/>
<parameter type-id='type-id-84' name='name'/>
- <parameter type-id='type-id-409' name='type'/>
- <parameter type-id='type-id-410' name='attr'/>
- <parameter type-id='type-id-8' name='objset_types'/>
+ <parameter type-id='type-id-293' name='type'/>
+ <parameter type-id='type-id-7' name='numdefault'/>
+ <parameter type-id='type-id-84' name='strdefault'/>
+ <parameter type-id='type-id-297' name='attr'/>
+ <parameter type-id='type-id-2' name='objset_types'/>
+ <parameter type-id='type-id-84' name='values'/>
<parameter type-id='type-id-84' name='colname'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <function-decl name='zprop_iter_common' mangled-name='zprop_iter_common' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_iter_common'>
- <parameter type-id='type-id-439' name='func'/>
- <parameter type-id='type-id-7' name='cb'/>
- <parameter type-id='type-id-16' name='show_all'/>
- <parameter type-id='type-id-16' name='ordered'/>
- <parameter type-id='type-id-13' name='type'/>
- <return type-id='type-id-8'/>
+ <parameter type-id='type-id-9' name='rightalign'/>
+ <parameter type-id='type-id-9' name='visible'/>
+ <parameter type-id='type-id-298' name='idx_tbl'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zprop_name_to_prop' mangled-name='zprop_name_to_prop' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_name_to_prop'>
- <parameter type-id='type-id-84' name='propname'/>
- <parameter type-id='type-id-13' name='type'/>
- <return type-id='type-id-8'/>
+ <function-decl name='__ctype_tolower_loc' mangled-name='__ctype_tolower_loc' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zprop_string_to_index' mangled-name='zprop_string_to_index' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_string_to_index'>
- <parameter type-id='type-id-8' name='prop'/>
- <parameter type-id='type-id-84' name='string'/>
- <parameter type-id='type-id-248' name='index'/>
- <parameter type-id='type-id-13' name='type'/>
- <return type-id='type-id-8'/>
+ <function-decl name='zfs_mod_supported' mangled-name='zfs_mod_supported' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zprop_index_to_string' mangled-name='zprop_index_to_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_index_to_string'>
- <parameter type-id='type-id-8' name='prop'/>
- <parameter type-id='type-id-22' name='index'/>
- <parameter type-id='type-id-241' name='string'/>
- <parameter type-id='type-id-13' name='type'/>
- <return type-id='type-id-8'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='libshare.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libshare' language='LANG_C99'>
+ <function-decl name='sa_validate_shareopts' mangled-name='sa_validate_shareopts' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='sa_validate_shareopts'>
+ <parameter type-id='type-id-14' name='options'/>
+ <parameter type-id='type-id-14' name='proto'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='zprop_random_value' mangled-name='zprop_random_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_random_value'>
- <parameter type-id='type-id-8' name='prop'/>
- <parameter type-id='type-id-22' name='seed'/>
- <parameter type-id='type-id-13' name='type'/>
- <return type-id='type-id-22'/>
+ <function-decl name='sa_errorstr' mangled-name='sa_errorstr' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='sa_errorstr'>
+ <parameter type-id='type-id-2' name='err'/>
+ <return type-id='type-id-14'/>
</function-decl>
- <function-decl name='zprop_values' mangled-name='zprop_values' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_values'>
- <parameter type-id='type-id-8' name='prop'/>
- <parameter type-id='type-id-13' name='type'/>
- <return type-id='type-id-84'/>
+ <function-decl name='sa_commit_shares' mangled-name='sa_commit_shares' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='sa_commit_shares'>
+ <parameter type-id='type-id-84' name='protocol'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='zprop_valid_for_type' mangled-name='zprop_valid_for_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_valid_for_type'>
- <parameter type-id='type-id-8' name='prop'/>
- <parameter type-id='type-id-13' name='type'/>
- <parameter type-id='type-id-16' name='headcheck'/>
- <return type-id='type-id-16'/>
+ <function-decl name='sa_is_shared' mangled-name='sa_is_shared' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='sa_is_shared'>
+ <parameter type-id='type-id-84' name='mountpoint'/>
+ <parameter type-id='type-id-14' name='protocol'/>
+ <return type-id='type-id-9'/>
</function-decl>
- <function-decl name='zprop_width' mangled-name='zprop_width' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_width'>
- <parameter type-id='type-id-8' name='prop'/>
- <parameter type-id='type-id-106' name='fixed'/>
- <parameter type-id='type-id-13' name='type'/>
- <return type-id='type-id-28'/>
+ <function-decl name='sa_disable_share' mangled-name='sa_disable_share' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='sa_disable_share'>
+ <parameter type-id='type-id-84' name='mountpoint'/>
+ <parameter type-id='type-id-14' name='protocol'/>
+ <return type-id='type-id-2'/>
</function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='libshare.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libshare' language='LANG_C99'>
- <function-decl name='libshare_nfs_init' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-6'/>
+ <function-decl name='sa_enable_share' mangled-name='sa_enable_share' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='sa_enable_share'>
+ <parameter type-id='type-id-84' name='zfsname'/>
+ <parameter type-id='type-id-84' name='mountpoint'/>
+ <parameter type-id='type-id-84' name='shareopts'/>
+ <parameter type-id='type-id-14' name='protocol'/>
+ <return type-id='type-id-2'/>
</function-decl>
- <function-decl name='libshare_smb_init' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-6'/>
+ <function-decl name='libshare_nfs_init' mangled-name='libshare_nfs_init' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='sa_is_shared' mangled-name='sa_is_shared' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='sa_is_shared'>
- <parameter type-id='type-id-84' name='mountpoint'/>
- <parameter type-id='type-id-17' name='protocol'/>
- <return type-id='type-id-16'/>
+ <function-decl name='libshare_smb_init' mangled-name='libshare_smb_init' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='nfs.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libshare' language='LANG_C99'>
- <function-decl name='mkdir' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-5'/>
- <return type-id='type-id-8'/>
- </function-decl>
+ <abi-instr version='1.0' address-size='64' path='nfs.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libshare' language='LANG_C99'>
<function-decl name='mkostemp' mangled-name='mkostemp64' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-17'/>
- <parameter type-id='type-id-8'/>
- <return type-id='type-id-8'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='flock' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-8'/>
- <return type-id='type-id-8'/>
+ <function-decl name='mkdir' mangled-name='mkdir' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nfs_copy_entries' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-17'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-8'/>
+ <function-decl name='flock' mangled-name='flock' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='rename' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-8'/>
+ <function-decl name='rename' mangled-name='rename' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='unlink' mangled-name='unlink' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='nfs_copy_entries' mangled-name='nfs_copy_entries' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='os/linux/nfs.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libshare' language='LANG_C99'>
- <function-decl name='fputs' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-150'/>
- <return type-id='type-id-8'/>
+ <abi-instr version='1.0' address-size='64' path='os/linux/nfs.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libshare' language='LANG_C99'>
+ <function-decl name='register_fstype' mangled-name='register_fstype' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <class-decl name='sa_fstype' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-533'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='next' type-id='type-id-534' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='name' type-id='type-id-84' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='ops' type-id='type-id-535' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='fsinfo_index' type-id='type-id-8' visibility='default'/>
- </data-member>
- </class-decl>
- <pointer-type-def type-id='type-id-533' size-in-bits='64' id='type-id-534'/>
- <class-decl name='sa_share_ops' size-in-bits='448' is-struct='yes' visibility='default' id='type-id-536'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='enable_share' type-id='type-id-537' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='disable_share' type-id='type-id-537' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='is_shared' type-id='type-id-538' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='validate_shareopts' type-id='type-id-539' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='update_shareopts' type-id='type-id-540' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='clear_shareopts' type-id='type-id-541' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='commit_shares' type-id='type-id-542' visibility='default'/>
- </data-member>
- </class-decl>
- <class-decl name='sa_share_impl' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-543'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='sa_mountpoint' type-id='type-id-17' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='sa_zfsname' type-id='type-id-17' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='sa_fsinfo' type-id='type-id-544' visibility='default'/>
- </data-member>
- </class-decl>
- <class-decl name='sa_share_fsinfo' size-in-bits='64' is-struct='yes' visibility='default' id='type-id-545'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='shareopts' type-id='type-id-17' visibility='default'/>
- </data-member>
- </class-decl>
- <typedef-decl name='sa_share_fsinfo_t' type-id='type-id-545' id='type-id-546'/>
- <pointer-type-def type-id='type-id-546' size-in-bits='64' id='type-id-544'/>
- <pointer-type-def type-id='type-id-543' size-in-bits='64' id='type-id-547'/>
- <typedef-decl name='sa_share_impl_t' type-id='type-id-547' id='type-id-548'/>
- <pointer-type-def type-id='type-id-549' size-in-bits='64' id='type-id-537'/>
- <pointer-type-def type-id='type-id-550' size-in-bits='64' id='type-id-538'/>
- <pointer-type-def type-id='type-id-551' size-in-bits='64' id='type-id-539'/>
- <pointer-type-def type-id='type-id-552' size-in-bits='64' id='type-id-540'/>
- <pointer-type-def type-id='type-id-553' size-in-bits='64' id='type-id-541'/>
- <pointer-type-def type-id='type-id-554' size-in-bits='64' id='type-id-542'/>
- <typedef-decl name='sa_share_ops_t' type-id='type-id-536' id='type-id-555'/>
- <qualified-type-def type-id='type-id-555' const='yes' id='type-id-556'/>
- <pointer-type-def type-id='type-id-556' size-in-bits='64' id='type-id-535'/>
- <qualified-type-def type-id='type-id-536' const='yes' id='type-id-557'/>
- <pointer-type-def type-id='type-id-557' size-in-bits='64' id='type-id-558'/>
- <function-decl name='register_fstype' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-558'/>
- <return type-id='type-id-534'/>
+ <function-decl name='nfs_toggle_share' mangled-name='nfs_toggle_share' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <pointer-type-def type-id='type-id-559' size-in-bits='64' id='type-id-560'/>
- <function-decl name='nfs_toggle_share' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-547'/>
- <parameter type-id='type-id-560'/>
- <return type-id='type-id-8'/>
+ <function-decl name='fputs' mangled-name='fputs' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-type size-in-bits='64' id='type-id-554'>
- <return type-id='type-id-8'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-551'>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-8'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-559'>
- <parameter type-id='type-id-547'/>
- <parameter type-id='type-id-17'/>
- <return type-id='type-id-8'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-549'>
- <parameter type-id='type-id-548'/>
- <return type-id='type-id-8'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-552'>
- <parameter type-id='type-id-548'/>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-8'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-550'>
- <parameter type-id='type-id-548'/>
- <return type-id='type-id-16'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-553'>
- <parameter type-id='type-id-548'/>
- <return type-id='type-id-6'/>
- </function-type>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='os/linux/smb.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libshare' language='LANG_C99'>
- <function-decl name='opendir' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-84'/>
- <return type-id='type-id-305'/>
+ <function-decl name='__builtin_stpcpy' mangled-name='stpcpy' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
- <class-decl name='dirent' size-in-bits='2240' is-struct='yes' visibility='default' id='type-id-561'>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='os/linux/smb.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libshare' language='LANG_C99'>
+ <class-decl name='smb_share_s' size-in-bits='36992' is-struct='yes' visibility='default' id='type-id-304'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='d_ino' type-id='type-id-307' visibility='default'/>
+ <var-decl name='name' type-id='type-id-305' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='d_off' type-id='type-id-155' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='2040'>
+ <var-decl name='path' type-id='type-id-193' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='d_reclen' type-id='type-id-152' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='34808'>
+ <var-decl name='comment' type-id='type-id-305' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='144'>
- <var-decl name='d_type' type-id='type-id-75' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='36864'>
+ <var-decl name='guest_ok' type-id='type-id-9' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='152'>
- <var-decl name='d_name' type-id='type-id-12' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='36928'>
+ <var-decl name='next' type-id='type-id-306' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-561' size-in-bits='64' id='type-id-562'/>
- <function-decl name='readdir' mangled-name='readdir64' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-305'/>
- <return type-id='type-id-562'/>
- </function-decl>
- <function-decl name='fgets' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-17'/>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-150'/>
- <return type-id='type-id-17'/>
+
+ <array-type-def dimensions='1' type-id='type-id-23' size-in-bits='2040' id='type-id-305'>
+ <subrange length='255' type-id='type-id-24' id='type-id-307'/>
+
+ </array-type-def>
+ <pointer-type-def type-id='type-id-304' size-in-bits='64' id='type-id-306'/>
+ <typedef-decl name='smb_share_t' type-id='type-id-304' id='type-id-308'/>
+ <pointer-type-def type-id='type-id-308' size-in-bits='64' id='type-id-309'/>
+ <var-decl name='smb_shares' type-id='type-id-309' visibility='default'/>
+ <function-decl name='__fgets_alias' mangled-name='fgets' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='opendir' mangled-name='opendir' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-1'/>
</function-decl>
</abi-instr>
</abi-corpus>
diff --git a/sys/contrib/openzfs/lib/libzfs/libzfs_crypto.c b/sys/contrib/openzfs/lib/libzfs/libzfs_crypto.c
index c3cded24f6ba..644dd26859f1 100644
--- a/sys/contrib/openzfs/lib/libzfs/libzfs_crypto.c
+++ b/sys/contrib/openzfs/lib/libzfs/libzfs_crypto.c
@@ -1,1805 +1,1805 @@
/*
* CDDL HEADER START
*
* This file and its contents are supplied under the terms of the
* Common Development and Distribution License ("CDDL"), version 1.0.
* You may only use this file in accordance with the terms of version
* 1.0 of the CDDL.
*
* A full copy of the text of the CDDL should have accompanied this
* source. A copy of the CDDL is also available via the Internet at
* http://www.illumos.org/license/CDDL.
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2017, Datto, Inc. All rights reserved.
* Copyright 2020 Joyent, Inc.
*/
#include <sys/zfs_context.h>
#include <sys/fs/zfs.h>
#include <sys/dsl_crypt.h>
#include <libintl.h>
#include <termios.h>
#include <signal.h>
#include <errno.h>
#include <openssl/evp.h>
#if LIBFETCH_DYNAMIC
#include <dlfcn.h>
#endif
#if LIBFETCH_IS_FETCH
#include <sys/param.h>
#include <stdio.h>
#include <fetch.h>
#elif LIBFETCH_IS_LIBCURL
#include <curl/curl.h>
#endif
#include <libzfs.h>
#include "libzfs_impl.h"
#include "zfeature_common.h"
/*
* User keys are used to decrypt the master encryption keys of a dataset. This
* indirection allows a user to change his / her access key without having to
* re-encrypt the entire dataset. User keys can be provided in one of several
* ways. Raw keys are simply given to the kernel as is. Similarly, hex keys
* are converted to binary and passed into the kernel. Password based keys are
* a bit more complicated. Passwords alone do not provide suitable entropy for
* encryption and may be too short or too long to be used. In order to derive
* a more appropriate key we use a PBKDF2 function. This function is designed
* to take a (relatively) long time to calculate in order to discourage
* attackers from guessing from a list of common passwords. PBKDF2 requires
* 2 additional parameters. The first is the number of iterations to run, which
* will ultimately determine how long it takes to derive the resulting key from
* the password. The second parameter is a salt that is randomly generated for
* each dataset. The salt is used to "tweak" PBKDF2 such that a group of
* attackers cannot reasonably generate a table of commonly known passwords to
* their output keys and expect it work for all past and future PBKDF2 users.
* We store the salt as a hidden property of the dataset (although it is
* technically ok if the salt is known to the attacker).
*/
#define MIN_PASSPHRASE_LEN 8
#define MAX_PASSPHRASE_LEN 512
#define MAX_KEY_PROMPT_ATTEMPTS 3
static int caught_interrupt;
static int get_key_material_file(libzfs_handle_t *, const char *, const char *,
zfs_keyformat_t, boolean_t, uint8_t **, size_t *);
static int get_key_material_https(libzfs_handle_t *, const char *, const char *,
zfs_keyformat_t, boolean_t, uint8_t **, size_t *);
static zfs_uri_handler_t uri_handlers[] = {
{ "file", get_key_material_file },
{ "https", get_key_material_https },
{ "http", get_key_material_https },
{ NULL, NULL }
};
static int
pkcs11_get_urandom(uint8_t *buf, size_t bytes)
{
int rand;
ssize_t bytes_read = 0;
rand = open("/dev/urandom", O_RDONLY | O_CLOEXEC);
if (rand < 0)
return (rand);
while (bytes_read < bytes) {
ssize_t rc = read(rand, buf + bytes_read, bytes - bytes_read);
if (rc < 0)
break;
bytes_read += rc;
}
(void) close(rand);
return (bytes_read);
}
static int
zfs_prop_parse_keylocation(libzfs_handle_t *restrict hdl, const char *str,
zfs_keylocation_t *restrict locp, char **restrict schemep)
{
*locp = ZFS_KEYLOCATION_NONE;
*schemep = NULL;
if (strcmp("prompt", str) == 0) {
*locp = ZFS_KEYLOCATION_PROMPT;
return (0);
}
regmatch_t pmatch[2];
if (regexec(&hdl->libzfs_urire, str, ARRAY_SIZE(pmatch),
pmatch, 0) == 0) {
size_t scheme_len;
if (pmatch[1].rm_so == -1) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Invalid URI"));
return (EINVAL);
}
scheme_len = pmatch[1].rm_eo - pmatch[1].rm_so;
*schemep = calloc(1, scheme_len + 1);
if (*schemep == NULL) {
int ret = errno;
errno = 0;
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Invalid URI"));
return (ret);
}
(void) memcpy(*schemep, str + pmatch[1].rm_so, scheme_len);
*locp = ZFS_KEYLOCATION_URI;
return (0);
}
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Invalid keylocation"));
return (EINVAL);
}
static int
hex_key_to_raw(char *hex, int hexlen, uint8_t *out)
{
int ret, i;
unsigned int c;
for (i = 0; i < hexlen; i += 2) {
if (!isxdigit(hex[i]) || !isxdigit(hex[i + 1])) {
ret = EINVAL;
goto error;
}
ret = sscanf(&hex[i], "%02x", &c);
if (ret != 1) {
ret = EINVAL;
goto error;
}
out[i / 2] = c;
}
return (0);
error:
return (ret);
}
static void
catch_signal(int sig)
{
caught_interrupt = sig;
}
static const char *
get_format_prompt_string(zfs_keyformat_t format)
{
switch (format) {
case ZFS_KEYFORMAT_RAW:
return ("raw key");
case ZFS_KEYFORMAT_HEX:
return ("hex key");
case ZFS_KEYFORMAT_PASSPHRASE:
return ("passphrase");
default:
/* shouldn't happen */
return (NULL);
}
}
/* do basic validation of the key material */
static int
validate_key(libzfs_handle_t *hdl, zfs_keyformat_t keyformat,
const char *key, size_t keylen)
{
switch (keyformat) {
case ZFS_KEYFORMAT_RAW:
/* verify the key length is correct */
if (keylen < WRAPPING_KEY_LEN) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Raw key too short (expected %u)."),
WRAPPING_KEY_LEN);
return (EINVAL);
}
if (keylen > WRAPPING_KEY_LEN) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Raw key too long (expected %u)."),
WRAPPING_KEY_LEN);
return (EINVAL);
}
break;
case ZFS_KEYFORMAT_HEX:
/* verify the key length is correct */
if (keylen < WRAPPING_KEY_LEN * 2) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Hex key too short (expected %u)."),
WRAPPING_KEY_LEN * 2);
return (EINVAL);
}
if (keylen > WRAPPING_KEY_LEN * 2) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Hex key too long (expected %u)."),
WRAPPING_KEY_LEN * 2);
return (EINVAL);
}
/* check for invalid hex digits */
for (size_t i = 0; i < WRAPPING_KEY_LEN * 2; i++) {
if (!isxdigit(key[i])) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Invalid hex character detected."));
return (EINVAL);
}
}
break;
case ZFS_KEYFORMAT_PASSPHRASE:
/* verify the length is within bounds */
if (keylen > MAX_PASSPHRASE_LEN) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Passphrase too long (max %u)."),
MAX_PASSPHRASE_LEN);
return (EINVAL);
}
if (keylen < MIN_PASSPHRASE_LEN) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Passphrase too short (min %u)."),
MIN_PASSPHRASE_LEN);
return (EINVAL);
}
break;
default:
/* can't happen, checked above */
break;
}
return (0);
}
static int
libzfs_getpassphrase(zfs_keyformat_t keyformat, boolean_t is_reenter,
boolean_t new_key, const char *fsname,
char **restrict res, size_t *restrict reslen)
{
FILE *f = stdin;
size_t buflen = 0;
ssize_t bytes;
int ret = 0;
struct termios old_term, new_term;
struct sigaction act, osigint, osigtstp;
*res = NULL;
*reslen = 0;
/*
* handle SIGINT and ignore SIGSTP. This is necessary to
* restore the state of the terminal.
*/
caught_interrupt = 0;
act.sa_flags = 0;
(void) sigemptyset(&act.sa_mask);
act.sa_handler = catch_signal;
(void) sigaction(SIGINT, &act, &osigint);
act.sa_handler = SIG_IGN;
(void) sigaction(SIGTSTP, &act, &osigtstp);
(void) printf("%s %s%s",
is_reenter ? "Re-enter" : "Enter",
new_key ? "new " : "",
get_format_prompt_string(keyformat));
if (fsname != NULL)
(void) printf(" for '%s'", fsname);
(void) fputc(':', stdout);
(void) fflush(stdout);
/* disable the terminal echo for key input */
(void) tcgetattr(fileno(f), &old_term);
new_term = old_term;
new_term.c_lflag &= ~(ECHO | ECHOE | ECHOK | ECHONL);
ret = tcsetattr(fileno(f), TCSAFLUSH, &new_term);
if (ret != 0) {
ret = errno;
errno = 0;
goto out;
}
bytes = getline(res, &buflen, f);
if (bytes < 0) {
ret = errno;
errno = 0;
goto out;
}
/* trim the ending newline if it exists */
if (bytes > 0 && (*res)[bytes - 1] == '\n') {
(*res)[bytes - 1] = '\0';
bytes--;
}
*reslen = bytes;
out:
/* reset the terminal */
(void) tcsetattr(fileno(f), TCSAFLUSH, &old_term);
(void) sigaction(SIGINT, &osigint, NULL);
(void) sigaction(SIGTSTP, &osigtstp, NULL);
/* if we caught a signal, re-throw it now */
if (caught_interrupt != 0)
(void) kill(getpid(), caught_interrupt);
/* print the newline that was not echo'd */
(void) printf("\n");
return (ret);
}
static int
get_key_interactive(libzfs_handle_t *restrict hdl, const char *fsname,
zfs_keyformat_t keyformat, boolean_t confirm_key, boolean_t newkey,
uint8_t **restrict outbuf, size_t *restrict len_out)
{
char *buf = NULL, *buf2 = NULL;
size_t buflen = 0, buf2len = 0;
int ret = 0;
ASSERT(isatty(fileno(stdin)));
/* raw keys cannot be entered on the terminal */
if (keyformat == ZFS_KEYFORMAT_RAW) {
ret = EINVAL;
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Cannot enter raw keys on the terminal"));
goto out;
}
/* prompt for the key */
if ((ret = libzfs_getpassphrase(keyformat, B_FALSE, newkey, fsname,
&buf, &buflen)) != 0) {
free(buf);
buf = NULL;
buflen = 0;
goto out;
}
if (!confirm_key)
goto out;
if ((ret = validate_key(hdl, keyformat, buf, buflen)) != 0) {
free(buf);
return (ret);
}
ret = libzfs_getpassphrase(keyformat, B_TRUE, newkey, fsname, &buf2,
&buf2len);
if (ret != 0) {
free(buf);
free(buf2);
buf = buf2 = NULL;
buflen = buf2len = 0;
goto out;
}
if (buflen != buf2len || strcmp(buf, buf2) != 0) {
free(buf);
buf = NULL;
buflen = 0;
ret = EINVAL;
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Provided keys do not match."));
}
free(buf2);
out:
*outbuf = (uint8_t *)buf;
*len_out = buflen;
return (ret);
}
static int
get_key_material_raw(FILE *fd, zfs_keyformat_t keyformat,
uint8_t **buf, size_t *len_out)
{
int ret = 0;
size_t buflen = 0;
*len_out = 0;
/* read the key material */
if (keyformat != ZFS_KEYFORMAT_RAW) {
ssize_t bytes;
bytes = getline((char **)buf, &buflen, fd);
if (bytes < 0) {
ret = errno;
errno = 0;
goto out;
}
/* trim the ending newline if it exists */
if (bytes > 0 && (*buf)[bytes - 1] == '\n') {
(*buf)[bytes - 1] = '\0';
bytes--;
}
*len_out = bytes;
} else {
size_t n;
/*
* Raw keys may have newline characters in them and so can't
* use getline(). Here we attempt to read 33 bytes so that we
* can properly check the key length (the file should only have
* 32 bytes).
*/
*buf = malloc((WRAPPING_KEY_LEN + 1) * sizeof (uint8_t));
if (*buf == NULL) {
ret = ENOMEM;
goto out;
}
n = fread(*buf, 1, WRAPPING_KEY_LEN + 1, fd);
if (n == 0 || ferror(fd)) {
/* size errors are handled by the calling function */
free(*buf);
*buf = NULL;
ret = errno;
errno = 0;
goto out;
}
*len_out = n;
}
out:
return (ret);
}
static int
get_key_material_file(libzfs_handle_t *hdl, const char *uri,
const char *fsname, zfs_keyformat_t keyformat, boolean_t newkey,
uint8_t **restrict buf, size_t *restrict len_out)
{
FILE *f = NULL;
int ret = 0;
if (strlen(uri) < 7)
return (EINVAL);
if ((f = fopen(uri + 7, "re")) == NULL) {
ret = errno;
errno = 0;
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "Failed to open key material file"));
+ "Failed to open key material file: %s"), strerror(ret));
return (ret);
}
ret = get_key_material_raw(f, keyformat, buf, len_out);
(void) fclose(f);
return (ret);
}
static int
get_key_material_https(libzfs_handle_t *hdl, const char *uri,
const char *fsname, zfs_keyformat_t keyformat, boolean_t newkey,
uint8_t **restrict buf, size_t *restrict len_out)
{
int ret = 0;
FILE *key = NULL;
boolean_t is_http = strncmp(uri, "http:", strlen("http:")) == 0;
if (strlen(uri) < (is_http ? 7 : 8)) {
ret = EINVAL;
goto end;
}
#if LIBFETCH_DYNAMIC
#define LOAD_FUNCTION(func) \
__typeof__(func) *func = dlsym(hdl->libfetch, #func);
if (hdl->libfetch == NULL)
hdl->libfetch = dlopen(LIBFETCH_SONAME, RTLD_LAZY);
if (hdl->libfetch == NULL) {
hdl->libfetch = (void *)-1;
char *err = dlerror();
if (err)
hdl->libfetch_load_error = strdup(err);
}
if (hdl->libfetch == (void *)-1) {
ret = ENOSYS;
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Couldn't load %s: %s"),
LIBFETCH_SONAME, hdl->libfetch_load_error ?: "(?)");
goto end;
}
boolean_t ok;
#if LIBFETCH_IS_FETCH
LOAD_FUNCTION(fetchGetURL);
char *fetchLastErrString = dlsym(hdl->libfetch, "fetchLastErrString");
ok = fetchGetURL && fetchLastErrString;
#elif LIBFETCH_IS_LIBCURL
LOAD_FUNCTION(curl_easy_init);
LOAD_FUNCTION(curl_easy_setopt);
LOAD_FUNCTION(curl_easy_perform);
LOAD_FUNCTION(curl_easy_cleanup);
LOAD_FUNCTION(curl_easy_strerror);
LOAD_FUNCTION(curl_easy_getinfo);
ok = curl_easy_init && curl_easy_setopt && curl_easy_perform &&
curl_easy_cleanup && curl_easy_strerror && curl_easy_getinfo;
#endif
if (!ok) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"keylocation=%s back-end %s missing symbols."),
is_http ? "http://" : "https://", LIBFETCH_SONAME);
ret = ENOSYS;
goto end;
}
#endif
#if LIBFETCH_IS_FETCH
key = fetchGetURL(uri, "");
if (key == NULL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Couldn't GET %s: %s"),
uri, fetchLastErrString);
ret = ENETDOWN;
}
#elif LIBFETCH_IS_LIBCURL
CURL *curl = curl_easy_init();
if (curl == NULL) {
ret = ENOTSUP;
goto end;
}
int kfd = -1;
#ifdef O_TMPFILE
kfd = open(getenv("TMPDIR") ?: "/tmp",
O_RDWR | O_TMPFILE | O_EXCL | O_CLOEXEC, 0600);
if (kfd != -1)
goto kfdok;
#endif
char *path;
if (asprintf(&path,
"%s/libzfs-XXXXXXXX.https", getenv("TMPDIR") ?: "/tmp") == -1) {
ret = ENOMEM;
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s"),
strerror(ret));
goto end;
}
kfd = mkostemps(path, strlen(".https"), O_CLOEXEC);
if (kfd == -1) {
ret = errno;
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Couldn't create temporary file %s: %s"),
path, strerror(ret));
free(path);
goto end;
}
(void) unlink(path);
free(path);
kfdok:
if ((key = fdopen(kfd, "r+")) == NULL) {
ret = errno;
free(path);
(void) close(kfd);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Couldn't reopen temporary file: %s"), strerror(ret));
goto end;
}
char errbuf[CURL_ERROR_SIZE] = "";
char *cainfo = getenv("SSL_CA_CERT_FILE"); /* matches fetch(3) */
char *capath = getenv("SSL_CA_CERT_PATH"); /* matches fetch(3) */
char *clcert = getenv("SSL_CLIENT_CERT_FILE"); /* matches fetch(3) */
char *clkey = getenv("SSL_CLIENT_KEY_FILE"); /* matches fetch(3) */
(void) curl_easy_setopt(curl, CURLOPT_URL, uri);
(void) curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);
(void) curl_easy_setopt(curl, CURLOPT_TIMEOUT_MS, 30000L);
(void) curl_easy_setopt(curl, CURLOPT_WRITEDATA, key);
(void) curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, errbuf);
if (cainfo != NULL)
(void) curl_easy_setopt(curl, CURLOPT_CAINFO, cainfo);
if (capath != NULL)
(void) curl_easy_setopt(curl, CURLOPT_CAPATH, capath);
if (clcert != NULL)
(void) curl_easy_setopt(curl, CURLOPT_SSLCERT, clcert);
if (clkey != NULL)
(void) curl_easy_setopt(curl, CURLOPT_SSLKEY, clkey);
CURLcode res = curl_easy_perform(curl);
if (res != CURLE_OK) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Failed to connect to %s: %s"),
uri, strlen(errbuf) ? errbuf : curl_easy_strerror(res));
ret = ENETDOWN;
} else {
long resp = 200;
(void) curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &resp);
if (resp < 200 || resp >= 300) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Couldn't GET %s: %ld"),
uri, resp);
ret = ENOENT;
} else
rewind(key);
}
curl_easy_cleanup(curl);
#else
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"No keylocation=%s back-end."), is_http ? "http://" : "https://");
ret = ENOSYS;
#endif
end:
if (ret == 0)
ret = get_key_material_raw(key, keyformat, buf, len_out);
if (key != NULL)
fclose(key);
return (ret);
}
/*
* Attempts to fetch key material, no matter where it might live. The key
* material is allocated and returned in km_out. *can_retry_out will be set
* to B_TRUE if the user is providing the key material interactively, allowing
* for re-entry attempts.
*/
static int
get_key_material(libzfs_handle_t *hdl, boolean_t do_verify, boolean_t newkey,
zfs_keyformat_t keyformat, char *keylocation, const char *fsname,
uint8_t **km_out, size_t *kmlen_out, boolean_t *can_retry_out)
{
int ret;
zfs_keylocation_t keyloc = ZFS_KEYLOCATION_NONE;
uint8_t *km = NULL;
size_t kmlen = 0;
char *uri_scheme = NULL;
zfs_uri_handler_t *handler = NULL;
boolean_t can_retry = B_FALSE;
/* verify and parse the keylocation */
ret = zfs_prop_parse_keylocation(hdl, keylocation, &keyloc,
&uri_scheme);
if (ret != 0)
goto error;
/* open the appropriate file descriptor */
switch (keyloc) {
case ZFS_KEYLOCATION_PROMPT:
if (isatty(fileno(stdin))) {
can_retry = keyformat != ZFS_KEYFORMAT_RAW;
ret = get_key_interactive(hdl, fsname, keyformat,
do_verify, newkey, &km, &kmlen);
} else {
/* fetch the key material into the buffer */
ret = get_key_material_raw(stdin, keyformat, &km,
&kmlen);
}
if (ret != 0)
goto error;
break;
case ZFS_KEYLOCATION_URI:
ret = ENOTSUP;
for (handler = uri_handlers; handler->zuh_scheme != NULL;
handler++) {
if (strcmp(handler->zuh_scheme, uri_scheme) != 0)
continue;
if ((ret = handler->zuh_handler(hdl, keylocation,
fsname, keyformat, newkey, &km, &kmlen)) != 0)
goto error;
break;
}
if (ret == ENOTSUP) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"URI scheme is not supported"));
goto error;
}
break;
default:
ret = EINVAL;
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Invalid keylocation."));
goto error;
}
if ((ret = validate_key(hdl, keyformat, (const char *)km, kmlen)) != 0)
goto error;
*km_out = km;
*kmlen_out = kmlen;
if (can_retry_out != NULL)
*can_retry_out = can_retry;
free(uri_scheme);
return (0);
error:
free(km);
*km_out = NULL;
*kmlen_out = 0;
if (can_retry_out != NULL)
*can_retry_out = can_retry;
free(uri_scheme);
return (ret);
}
static int
derive_key(libzfs_handle_t *hdl, zfs_keyformat_t format, uint64_t iters,
uint8_t *key_material, size_t key_material_len, uint64_t salt,
uint8_t **key_out)
{
int ret;
uint8_t *key;
*key_out = NULL;
key = zfs_alloc(hdl, WRAPPING_KEY_LEN);
if (!key)
return (ENOMEM);
switch (format) {
case ZFS_KEYFORMAT_RAW:
bcopy(key_material, key, WRAPPING_KEY_LEN);
break;
case ZFS_KEYFORMAT_HEX:
ret = hex_key_to_raw((char *)key_material,
WRAPPING_KEY_LEN * 2, key);
if (ret != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Invalid hex key provided."));
goto error;
}
break;
case ZFS_KEYFORMAT_PASSPHRASE:
salt = LE_64(salt);
ret = PKCS5_PBKDF2_HMAC_SHA1((char *)key_material,
strlen((char *)key_material), ((uint8_t *)&salt),
sizeof (uint64_t), iters, WRAPPING_KEY_LEN, key);
if (ret != 1) {
ret = EIO;
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Failed to generate key from passphrase."));
goto error;
}
break;
default:
ret = EINVAL;
goto error;
}
*key_out = key;
return (0);
error:
free(key);
*key_out = NULL;
return (ret);
}
static boolean_t
encryption_feature_is_enabled(zpool_handle_t *zph)
{
nvlist_t *features;
uint64_t feat_refcount;
/* check that features can be enabled */
if (zpool_get_prop_int(zph, ZPOOL_PROP_VERSION, NULL)
< SPA_VERSION_FEATURES)
return (B_FALSE);
/* check for crypto feature */
features = zpool_get_features(zph);
if (!features || nvlist_lookup_uint64(features,
spa_feature_table[SPA_FEATURE_ENCRYPTION].fi_guid,
&feat_refcount) != 0)
return (B_FALSE);
return (B_TRUE);
}
static int
populate_create_encryption_params_nvlists(libzfs_handle_t *hdl,
zfs_handle_t *zhp, boolean_t newkey, zfs_keyformat_t keyformat,
char *keylocation, nvlist_t *props, uint8_t **wkeydata, uint_t *wkeylen)
{
int ret;
uint64_t iters = 0, salt = 0;
uint8_t *key_material = NULL;
size_t key_material_len = 0;
uint8_t *key_data = NULL;
const char *fsname = (zhp) ? zfs_get_name(zhp) : NULL;
/* get key material from keyformat and keylocation */
ret = get_key_material(hdl, B_TRUE, newkey, keyformat, keylocation,
fsname, &key_material, &key_material_len, NULL);
if (ret != 0)
goto error;
/* passphrase formats require a salt and pbkdf2 iters property */
if (keyformat == ZFS_KEYFORMAT_PASSPHRASE) {
/* always generate a new salt */
ret = pkcs11_get_urandom((uint8_t *)&salt, sizeof (uint64_t));
if (ret != sizeof (uint64_t)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Failed to generate salt."));
goto error;
}
ret = nvlist_add_uint64(props,
zfs_prop_to_name(ZFS_PROP_PBKDF2_SALT), salt);
if (ret != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Failed to add salt to properties."));
goto error;
}
/*
* If not otherwise specified, use the default number of
* pbkdf2 iterations. If specified, we have already checked
* that the given value is greater than MIN_PBKDF2_ITERATIONS
* during zfs_valid_proplist().
*/
ret = nvlist_lookup_uint64(props,
zfs_prop_to_name(ZFS_PROP_PBKDF2_ITERS), &iters);
if (ret == ENOENT) {
iters = DEFAULT_PBKDF2_ITERATIONS;
ret = nvlist_add_uint64(props,
zfs_prop_to_name(ZFS_PROP_PBKDF2_ITERS), iters);
if (ret != 0)
goto error;
} else if (ret != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Failed to get pbkdf2 iterations."));
goto error;
}
} else {
/* check that pbkdf2iters was not specified by the user */
ret = nvlist_lookup_uint64(props,
zfs_prop_to_name(ZFS_PROP_PBKDF2_ITERS), &iters);
if (ret == 0) {
ret = EINVAL;
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Cannot specify pbkdf2iters with a non-passphrase "
"keyformat."));
goto error;
}
}
/* derive a key from the key material */
ret = derive_key(hdl, keyformat, iters, key_material, key_material_len,
salt, &key_data);
if (ret != 0)
goto error;
free(key_material);
*wkeydata = key_data;
*wkeylen = WRAPPING_KEY_LEN;
return (0);
error:
if (key_material != NULL)
free(key_material);
if (key_data != NULL)
free(key_data);
*wkeydata = NULL;
*wkeylen = 0;
return (ret);
}
static boolean_t
proplist_has_encryption_props(nvlist_t *props)
{
int ret;
uint64_t intval;
char *strval;
ret = nvlist_lookup_uint64(props,
zfs_prop_to_name(ZFS_PROP_ENCRYPTION), &intval);
if (ret == 0 && intval != ZIO_CRYPT_OFF)
return (B_TRUE);
ret = nvlist_lookup_string(props,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION), &strval);
if (ret == 0 && strcmp(strval, "none") != 0)
return (B_TRUE);
ret = nvlist_lookup_uint64(props,
zfs_prop_to_name(ZFS_PROP_KEYFORMAT), &intval);
if (ret == 0)
return (B_TRUE);
ret = nvlist_lookup_uint64(props,
zfs_prop_to_name(ZFS_PROP_PBKDF2_ITERS), &intval);
if (ret == 0)
return (B_TRUE);
return (B_FALSE);
}
int
zfs_crypto_get_encryption_root(zfs_handle_t *zhp, boolean_t *is_encroot,
char *buf)
{
int ret;
char prop_encroot[MAXNAMELEN];
/* if the dataset isn't encrypted, just return */
if (zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION) == ZIO_CRYPT_OFF) {
*is_encroot = B_FALSE;
if (buf != NULL)
buf[0] = '\0';
return (0);
}
ret = zfs_prop_get(zhp, ZFS_PROP_ENCRYPTION_ROOT, prop_encroot,
sizeof (prop_encroot), NULL, NULL, 0, B_TRUE);
if (ret != 0) {
*is_encroot = B_FALSE;
if (buf != NULL)
buf[0] = '\0';
return (ret);
}
*is_encroot = strcmp(prop_encroot, zfs_get_name(zhp)) == 0;
if (buf != NULL)
strcpy(buf, prop_encroot);
return (0);
}
int
zfs_crypto_create(libzfs_handle_t *hdl, char *parent_name, nvlist_t *props,
nvlist_t *pool_props, boolean_t stdin_available, uint8_t **wkeydata_out,
uint_t *wkeylen_out)
{
int ret;
char errbuf[1024];
uint64_t crypt = ZIO_CRYPT_INHERIT, pcrypt = ZIO_CRYPT_INHERIT;
uint64_t keyformat = ZFS_KEYFORMAT_NONE;
char *keylocation = NULL;
zfs_handle_t *pzhp = NULL;
uint8_t *wkeydata = NULL;
uint_t wkeylen = 0;
boolean_t local_crypt = B_TRUE;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "Encryption create error"));
/* lookup crypt from props */
ret = nvlist_lookup_uint64(props,
zfs_prop_to_name(ZFS_PROP_ENCRYPTION), &crypt);
if (ret != 0)
local_crypt = B_FALSE;
/* lookup key location and format from props */
(void) nvlist_lookup_uint64(props,
zfs_prop_to_name(ZFS_PROP_KEYFORMAT), &keyformat);
(void) nvlist_lookup_string(props,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION), &keylocation);
if (parent_name != NULL) {
/* get a reference to parent dataset */
pzhp = make_dataset_handle(hdl, parent_name);
if (pzhp == NULL) {
ret = ENOENT;
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Failed to lookup parent."));
goto out;
}
/* Lookup parent's crypt */
pcrypt = zfs_prop_get_int(pzhp, ZFS_PROP_ENCRYPTION);
/* Params require the encryption feature */
if (!encryption_feature_is_enabled(pzhp->zpool_hdl)) {
if (proplist_has_encryption_props(props)) {
ret = EINVAL;
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Encryption feature not enabled."));
goto out;
}
ret = 0;
goto out;
}
} else {
/*
* special case for root dataset where encryption feature
* feature won't be on disk yet
*/
if (!nvlist_exists(pool_props, "feature@encryption")) {
if (proplist_has_encryption_props(props)) {
ret = EINVAL;
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Encryption feature not enabled."));
goto out;
}
ret = 0;
goto out;
}
pcrypt = ZIO_CRYPT_OFF;
}
/* Get the inherited encryption property if we don't have it locally */
if (!local_crypt)
crypt = pcrypt;
/*
* At this point crypt should be the actual encryption value. If
* encryption is off just verify that no encryption properties have
* been specified and return.
*/
if (crypt == ZIO_CRYPT_OFF) {
if (proplist_has_encryption_props(props)) {
ret = EINVAL;
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Encryption must be turned on to set encryption "
"properties."));
goto out;
}
ret = 0;
goto out;
}
/*
* If we have a parent crypt it is valid to specify encryption alone.
* This will result in a child that is encrypted with the chosen
* encryption suite that will also inherit the parent's key. If
* the parent is not encrypted we need an encryption suite provided.
*/
if (pcrypt == ZIO_CRYPT_OFF && keylocation == NULL &&
keyformat == ZFS_KEYFORMAT_NONE) {
ret = EINVAL;
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Keyformat required for new encryption root."));
goto out;
}
/*
* Specifying a keylocation implies this will be a new encryption root.
* Check that a keyformat is also specified.
*/
if (keylocation != NULL && keyformat == ZFS_KEYFORMAT_NONE) {
ret = EINVAL;
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Keyformat required for new encryption root."));
goto out;
}
/* default to prompt if no keylocation is specified */
if (keyformat != ZFS_KEYFORMAT_NONE && keylocation == NULL) {
keylocation = "prompt";
ret = nvlist_add_string(props,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION), keylocation);
if (ret != 0)
goto out;
}
/*
* If a local key is provided, this dataset will be a new
* encryption root. Populate the encryption params.
*/
if (keylocation != NULL) {
/*
* 'zfs recv -o keylocation=prompt' won't work because stdin
* is being used by the send stream, so we disallow it.
*/
if (!stdin_available && strcmp(keylocation, "prompt") == 0) {
ret = EINVAL;
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Cannot use "
"'prompt' keylocation because stdin is in use."));
goto out;
}
ret = populate_create_encryption_params_nvlists(hdl, NULL,
B_TRUE, keyformat, keylocation, props, &wkeydata,
&wkeylen);
if (ret != 0)
goto out;
}
if (pzhp != NULL)
zfs_close(pzhp);
*wkeydata_out = wkeydata;
*wkeylen_out = wkeylen;
return (0);
out:
if (pzhp != NULL)
zfs_close(pzhp);
if (wkeydata != NULL)
free(wkeydata);
*wkeydata_out = NULL;
*wkeylen_out = 0;
return (ret);
}
int
zfs_crypto_clone_check(libzfs_handle_t *hdl, zfs_handle_t *origin_zhp,
char *parent_name, nvlist_t *props)
{
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "Encryption clone error"));
/*
* No encryption properties should be specified. They will all be
* inherited from the origin dataset.
*/
if (nvlist_exists(props, zfs_prop_to_name(ZFS_PROP_KEYFORMAT)) ||
nvlist_exists(props, zfs_prop_to_name(ZFS_PROP_KEYLOCATION)) ||
nvlist_exists(props, zfs_prop_to_name(ZFS_PROP_ENCRYPTION)) ||
nvlist_exists(props, zfs_prop_to_name(ZFS_PROP_PBKDF2_ITERS))) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Encryption properties must inherit from origin dataset."));
return (EINVAL);
}
return (0);
}
typedef struct loadkeys_cbdata {
uint64_t cb_numfailed;
uint64_t cb_numattempted;
} loadkey_cbdata_t;
static int
load_keys_cb(zfs_handle_t *zhp, void *arg)
{
int ret;
boolean_t is_encroot;
loadkey_cbdata_t *cb = arg;
uint64_t keystatus = zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS);
/* only attempt to load keys for encryption roots */
ret = zfs_crypto_get_encryption_root(zhp, &is_encroot, NULL);
if (ret != 0 || !is_encroot)
goto out;
/* don't attempt to load already loaded keys */
if (keystatus == ZFS_KEYSTATUS_AVAILABLE)
goto out;
/* Attempt to load the key. Record status in cb. */
cb->cb_numattempted++;
ret = zfs_crypto_load_key(zhp, B_FALSE, NULL);
if (ret)
cb->cb_numfailed++;
out:
(void) zfs_iter_filesystems(zhp, load_keys_cb, cb);
zfs_close(zhp);
/* always return 0, since this function is best effort */
return (0);
}
/*
* This function is best effort. It attempts to load all the keys for the given
* filesystem and all of its children.
*/
int
zfs_crypto_attempt_load_keys(libzfs_handle_t *hdl, char *fsname)
{
int ret;
zfs_handle_t *zhp = NULL;
loadkey_cbdata_t cb = { 0 };
zhp = zfs_open(hdl, fsname, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL) {
ret = ENOENT;
goto error;
}
ret = load_keys_cb(zfs_handle_dup(zhp), &cb);
if (ret)
goto error;
(void) printf(gettext("%llu / %llu keys successfully loaded\n"),
(u_longlong_t)(cb.cb_numattempted - cb.cb_numfailed),
(u_longlong_t)cb.cb_numattempted);
if (cb.cb_numfailed != 0) {
ret = -1;
goto error;
}
zfs_close(zhp);
return (0);
error:
if (zhp != NULL)
zfs_close(zhp);
return (ret);
}
int
zfs_crypto_load_key(zfs_handle_t *zhp, boolean_t noop, char *alt_keylocation)
{
int ret, attempts = 0;
char errbuf[1024];
uint64_t keystatus, iters = 0, salt = 0;
uint64_t keyformat = ZFS_KEYFORMAT_NONE;
char prop_keylocation[MAXNAMELEN];
char prop_encroot[MAXNAMELEN];
char *keylocation = NULL;
uint8_t *key_material = NULL, *key_data = NULL;
size_t key_material_len;
boolean_t is_encroot, can_retry = B_FALSE, correctible = B_FALSE;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "Key load error"));
/* check that encryption is enabled for the pool */
if (!encryption_feature_is_enabled(zhp->zpool_hdl)) {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Encryption feature not enabled."));
ret = EINVAL;
goto error;
}
/* Fetch the keyformat. Check that the dataset is encrypted. */
keyformat = zfs_prop_get_int(zhp, ZFS_PROP_KEYFORMAT);
if (keyformat == ZFS_KEYFORMAT_NONE) {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"'%s' is not encrypted."), zfs_get_name(zhp));
ret = EINVAL;
goto error;
}
/*
* Fetch the key location. Check that we are working with an
* encryption root.
*/
ret = zfs_crypto_get_encryption_root(zhp, &is_encroot, prop_encroot);
if (ret != 0) {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Failed to get encryption root for '%s'."),
zfs_get_name(zhp));
goto error;
} else if (!is_encroot) {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Keys must be loaded for encryption root of '%s' (%s)."),
zfs_get_name(zhp), prop_encroot);
ret = EINVAL;
goto error;
}
/*
* if the caller has elected to override the keylocation property
* use that instead
*/
if (alt_keylocation != NULL) {
keylocation = alt_keylocation;
} else {
ret = zfs_prop_get(zhp, ZFS_PROP_KEYLOCATION, prop_keylocation,
sizeof (prop_keylocation), NULL, NULL, 0, B_TRUE);
if (ret != 0) {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Failed to get keylocation for '%s'."),
zfs_get_name(zhp));
goto error;
}
keylocation = prop_keylocation;
}
/* check that the key is unloaded unless this is a noop */
if (!noop) {
keystatus = zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS);
if (keystatus == ZFS_KEYSTATUS_AVAILABLE) {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Key already loaded for '%s'."), zfs_get_name(zhp));
ret = EEXIST;
goto error;
}
}
/* passphrase formats require a salt and pbkdf2_iters property */
if (keyformat == ZFS_KEYFORMAT_PASSPHRASE) {
salt = zfs_prop_get_int(zhp, ZFS_PROP_PBKDF2_SALT);
iters = zfs_prop_get_int(zhp, ZFS_PROP_PBKDF2_ITERS);
}
try_again:
/* fetching and deriving the key are correctable errors. set the flag */
correctible = B_TRUE;
/* get key material from key format and location */
ret = get_key_material(zhp->zfs_hdl, B_FALSE, B_FALSE, keyformat,
keylocation, zfs_get_name(zhp), &key_material, &key_material_len,
&can_retry);
if (ret != 0)
goto error;
/* derive a key from the key material */
ret = derive_key(zhp->zfs_hdl, keyformat, iters, key_material,
key_material_len, salt, &key_data);
if (ret != 0)
goto error;
correctible = B_FALSE;
/* pass the wrapping key and noop flag to the ioctl */
ret = lzc_load_key(zhp->zfs_name, noop, key_data, WRAPPING_KEY_LEN);
if (ret != 0) {
switch (ret) {
case EPERM:
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Permission denied."));
break;
case EINVAL:
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Invalid parameters provided for dataset %s."),
zfs_get_name(zhp));
break;
case EEXIST:
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Key already loaded for '%s'."), zfs_get_name(zhp));
break;
case EBUSY:
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"'%s' is busy."), zfs_get_name(zhp));
break;
case EACCES:
correctible = B_TRUE;
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Incorrect key provided for '%s'."),
zfs_get_name(zhp));
break;
}
goto error;
}
free(key_material);
free(key_data);
return (0);
error:
zfs_error(zhp->zfs_hdl, EZFS_CRYPTOFAILED, errbuf);
if (key_material != NULL) {
free(key_material);
key_material = NULL;
}
if (key_data != NULL) {
free(key_data);
key_data = NULL;
}
/*
* Here we decide if it is ok to allow the user to retry entering their
* key. The can_retry flag will be set if the user is entering their
* key from an interactive prompt. The correctable flag will only be
* set if an error that occurred could be corrected by retrying. Both
* flags are needed to allow the user to attempt key entry again
*/
attempts++;
if (can_retry && correctible && attempts < MAX_KEY_PROMPT_ATTEMPTS)
goto try_again;
return (ret);
}
int
zfs_crypto_unload_key(zfs_handle_t *zhp)
{
int ret;
char errbuf[1024];
char prop_encroot[MAXNAMELEN];
uint64_t keystatus, keyformat;
boolean_t is_encroot;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "Key unload error"));
/* check that encryption is enabled for the pool */
if (!encryption_feature_is_enabled(zhp->zpool_hdl)) {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Encryption feature not enabled."));
ret = EINVAL;
goto error;
}
/* Fetch the keyformat. Check that the dataset is encrypted. */
keyformat = zfs_prop_get_int(zhp, ZFS_PROP_KEYFORMAT);
if (keyformat == ZFS_KEYFORMAT_NONE) {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"'%s' is not encrypted."), zfs_get_name(zhp));
ret = EINVAL;
goto error;
}
/*
* Fetch the key location. Check that we are working with an
* encryption root.
*/
ret = zfs_crypto_get_encryption_root(zhp, &is_encroot, prop_encroot);
if (ret != 0) {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Failed to get encryption root for '%s'."),
zfs_get_name(zhp));
goto error;
} else if (!is_encroot) {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Keys must be unloaded for encryption root of '%s' (%s)."),
zfs_get_name(zhp), prop_encroot);
ret = EINVAL;
goto error;
}
/* check that the key is loaded */
keystatus = zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS);
if (keystatus == ZFS_KEYSTATUS_UNAVAILABLE) {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Key already unloaded for '%s'."), zfs_get_name(zhp));
ret = EACCES;
goto error;
}
/* call the ioctl */
ret = lzc_unload_key(zhp->zfs_name);
if (ret != 0) {
switch (ret) {
case EPERM:
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Permission denied."));
break;
case EACCES:
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Key already unloaded for '%s'."),
zfs_get_name(zhp));
break;
case EBUSY:
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"'%s' is busy."), zfs_get_name(zhp));
break;
}
zfs_error(zhp->zfs_hdl, EZFS_CRYPTOFAILED, errbuf);
}
return (ret);
error:
zfs_error(zhp->zfs_hdl, EZFS_CRYPTOFAILED, errbuf);
return (ret);
}
static int
zfs_crypto_verify_rewrap_nvlist(zfs_handle_t *zhp, nvlist_t *props,
nvlist_t **props_out, char *errbuf)
{
int ret;
nvpair_t *elem = NULL;
zfs_prop_t prop;
nvlist_t *new_props = NULL;
new_props = fnvlist_alloc();
/*
* loop through all provided properties, we should only have
* keyformat, keylocation and pbkdf2iters. The actual validation of
* values is done by zfs_valid_proplist().
*/
while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
const char *propname = nvpair_name(elem);
prop = zfs_name_to_prop(propname);
switch (prop) {
case ZFS_PROP_PBKDF2_ITERS:
case ZFS_PROP_KEYFORMAT:
case ZFS_PROP_KEYLOCATION:
break;
default:
ret = EINVAL;
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Only keyformat, keylocation and pbkdf2iters may "
"be set with this command."));
goto error;
}
}
new_props = zfs_valid_proplist(zhp->zfs_hdl, zhp->zfs_type, props,
zfs_prop_get_int(zhp, ZFS_PROP_ZONED), NULL, zhp->zpool_hdl,
B_TRUE, errbuf);
if (new_props == NULL) {
ret = EINVAL;
goto error;
}
*props_out = new_props;
return (0);
error:
nvlist_free(new_props);
*props_out = NULL;
return (ret);
}
int
zfs_crypto_rewrap(zfs_handle_t *zhp, nvlist_t *raw_props, boolean_t inheritkey)
{
int ret;
char errbuf[1024];
boolean_t is_encroot;
nvlist_t *props = NULL;
uint8_t *wkeydata = NULL;
uint_t wkeylen = 0;
dcp_cmd_t cmd = (inheritkey) ? DCP_CMD_INHERIT : DCP_CMD_NEW_KEY;
uint64_t crypt, pcrypt, keystatus, pkeystatus;
uint64_t keyformat = ZFS_KEYFORMAT_NONE;
zfs_handle_t *pzhp = NULL;
char *keylocation = NULL;
char origin_name[MAXNAMELEN];
char prop_keylocation[MAXNAMELEN];
char parent_name[ZFS_MAX_DATASET_NAME_LEN];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "Key change error"));
/* check that encryption is enabled for the pool */
if (!encryption_feature_is_enabled(zhp->zpool_hdl)) {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Encryption feature not enabled."));
ret = EINVAL;
goto error;
}
/* get crypt from dataset */
crypt = zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION);
if (crypt == ZIO_CRYPT_OFF) {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Dataset not encrypted."));
ret = EINVAL;
goto error;
}
/* get the encryption root of the dataset */
ret = zfs_crypto_get_encryption_root(zhp, &is_encroot, NULL);
if (ret != 0) {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Failed to get encryption root for '%s'."),
zfs_get_name(zhp));
goto error;
}
/* Clones use their origin's key and cannot rewrap it */
ret = zfs_prop_get(zhp, ZFS_PROP_ORIGIN, origin_name,
sizeof (origin_name), NULL, NULL, 0, B_TRUE);
if (ret == 0 && strcmp(origin_name, "") != 0) {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Keys cannot be changed on clones."));
ret = EINVAL;
goto error;
}
/*
* If the user wants to use the inheritkey variant of this function
* we don't need to collect any crypto arguments.
*/
if (!inheritkey) {
/* validate the provided properties */
ret = zfs_crypto_verify_rewrap_nvlist(zhp, raw_props, &props,
errbuf);
if (ret != 0)
goto error;
/*
* Load keyformat and keylocation from the nvlist. Fetch from
* the dataset properties if not specified.
*/
(void) nvlist_lookup_uint64(props,
zfs_prop_to_name(ZFS_PROP_KEYFORMAT), &keyformat);
(void) nvlist_lookup_string(props,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION), &keylocation);
if (is_encroot) {
/*
* If this is already an encryption root, just keep
* any properties not set by the user.
*/
if (keyformat == ZFS_KEYFORMAT_NONE) {
keyformat = zfs_prop_get_int(zhp,
ZFS_PROP_KEYFORMAT);
ret = nvlist_add_uint64(props,
zfs_prop_to_name(ZFS_PROP_KEYFORMAT),
keyformat);
if (ret != 0) {
zfs_error_aux(zhp->zfs_hdl,
dgettext(TEXT_DOMAIN, "Failed to "
"get existing keyformat "
"property."));
goto error;
}
}
if (keylocation == NULL) {
ret = zfs_prop_get(zhp, ZFS_PROP_KEYLOCATION,
prop_keylocation, sizeof (prop_keylocation),
NULL, NULL, 0, B_TRUE);
if (ret != 0) {
zfs_error_aux(zhp->zfs_hdl,
dgettext(TEXT_DOMAIN, "Failed to "
"get existing keylocation "
"property."));
goto error;
}
keylocation = prop_keylocation;
}
} else {
/* need a new key for non-encryption roots */
if (keyformat == ZFS_KEYFORMAT_NONE) {
ret = EINVAL;
zfs_error_aux(zhp->zfs_hdl,
dgettext(TEXT_DOMAIN, "Keyformat required "
"for new encryption root."));
goto error;
}
/* default to prompt if no keylocation is specified */
if (keylocation == NULL) {
keylocation = "prompt";
ret = nvlist_add_string(props,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION),
keylocation);
if (ret != 0)
goto error;
}
}
/* fetch the new wrapping key and associated properties */
ret = populate_create_encryption_params_nvlists(zhp->zfs_hdl,
zhp, B_TRUE, keyformat, keylocation, props, &wkeydata,
&wkeylen);
if (ret != 0)
goto error;
} else {
/* check that zhp is an encryption root */
if (!is_encroot) {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Key inheritting can only be performed on "
"encryption roots."));
ret = EINVAL;
goto error;
}
/* get the parent's name */
ret = zfs_parent_name(zhp, parent_name, sizeof (parent_name));
if (ret != 0) {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Root dataset cannot inherit key."));
ret = EINVAL;
goto error;
}
/* get a handle to the parent */
pzhp = make_dataset_handle(zhp->zfs_hdl, parent_name);
if (pzhp == NULL) {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Failed to lookup parent."));
ret = ENOENT;
goto error;
}
/* parent must be encrypted */
pcrypt = zfs_prop_get_int(pzhp, ZFS_PROP_ENCRYPTION);
if (pcrypt == ZIO_CRYPT_OFF) {
zfs_error_aux(pzhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Parent must be encrypted."));
ret = EINVAL;
goto error;
}
/* check that the parent's key is loaded */
pkeystatus = zfs_prop_get_int(pzhp, ZFS_PROP_KEYSTATUS);
if (pkeystatus == ZFS_KEYSTATUS_UNAVAILABLE) {
zfs_error_aux(pzhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Parent key must be loaded."));
ret = EACCES;
goto error;
}
}
/* check that the key is loaded */
keystatus = zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS);
if (keystatus == ZFS_KEYSTATUS_UNAVAILABLE) {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Key must be loaded."));
ret = EACCES;
goto error;
}
/* call the ioctl */
ret = lzc_change_key(zhp->zfs_name, cmd, props, wkeydata, wkeylen);
if (ret != 0) {
switch (ret) {
case EPERM:
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Permission denied."));
break;
case EINVAL:
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Invalid properties for key change."));
break;
case EACCES:
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Key is not currently loaded."));
break;
}
zfs_error(zhp->zfs_hdl, EZFS_CRYPTOFAILED, errbuf);
}
if (pzhp != NULL)
zfs_close(pzhp);
if (props != NULL)
nvlist_free(props);
if (wkeydata != NULL)
free(wkeydata);
return (ret);
error:
if (pzhp != NULL)
zfs_close(pzhp);
if (props != NULL)
nvlist_free(props);
if (wkeydata != NULL)
free(wkeydata);
zfs_error(zhp->zfs_hdl, EZFS_CRYPTOFAILED, errbuf);
return (ret);
}
diff --git a/sys/contrib/openzfs/lib/libzfs/libzfs_dataset.c b/sys/contrib/openzfs/lib/libzfs/libzfs_dataset.c
index 99e352dd4883..0e3198d9c856 100644
--- a/sys/contrib/openzfs/lib/libzfs/libzfs_dataset.c
+++ b/sys/contrib/openzfs/lib/libzfs/libzfs_dataset.c
@@ -1,5568 +1,5568 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2019 Joyent, Inc.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2012 DEY Storage Systems, Inc. All rights reserved.
* Copyright (c) 2012 Pawel Jakub Dawidek <pawel@dawidek.net>.
* Copyright (c) 2013 Martin Matuska. All rights reserved.
* Copyright (c) 2013 Steven Hartland. All rights reserved.
* Copyright 2017 Nexenta Systems, Inc.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
* Copyright 2017-2018 RackTop Systems.
* Copyright (c) 2019 Datto Inc.
* Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>
* Copyright (c) 2021 Matt Fiddaman
*/
#include <ctype.h>
#include <errno.h>
#include <libintl.h>
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <unistd.h>
#include <stddef.h>
#include <zone.h>
#include <fcntl.h>
#include <sys/mntent.h>
#include <sys/mount.h>
#include <pwd.h>
#include <grp.h>
#include <ucred.h>
#ifdef HAVE_IDMAP
#include <idmap.h>
#include <aclutils.h>
#include <directory.h>
#endif /* HAVE_IDMAP */
#include <sys/dnode.h>
#include <sys/spa.h>
#include <sys/zap.h>
#include <sys/dsl_crypt.h>
#include <libzfs.h>
#include <libzutil.h>
#include "zfs_namecheck.h"
#include "zfs_prop.h"
#include "libzfs_impl.h"
#include "zfs_deleg.h"
static int userquota_propname_decode(const char *propname, boolean_t zoned,
zfs_userquota_prop_t *typep, char *domain, int domainlen, uint64_t *ridp);
/*
* Given a single type (not a mask of types), return the type in a human
* readable form.
*/
const char *
zfs_type_to_name(zfs_type_t type)
{
switch (type) {
case ZFS_TYPE_FILESYSTEM:
return (dgettext(TEXT_DOMAIN, "filesystem"));
case ZFS_TYPE_SNAPSHOT:
return (dgettext(TEXT_DOMAIN, "snapshot"));
case ZFS_TYPE_VOLUME:
return (dgettext(TEXT_DOMAIN, "volume"));
case ZFS_TYPE_POOL:
return (dgettext(TEXT_DOMAIN, "pool"));
case ZFS_TYPE_BOOKMARK:
return (dgettext(TEXT_DOMAIN, "bookmark"));
default:
assert(!"unhandled zfs_type_t");
}
return (NULL);
}
/*
* Validate a ZFS path. This is used even before trying to open the dataset, to
* provide a more meaningful error message. We call zfs_error_aux() to
* explain exactly why the name was not valid.
*/
int
zfs_validate_name(libzfs_handle_t *hdl, const char *path, int type,
boolean_t modifying)
{
namecheck_err_t why;
char what;
if (!(type & ZFS_TYPE_SNAPSHOT) && strchr(path, '@') != NULL) {
if (hdl != NULL)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"snapshot delimiter '@' is not expected here"));
return (0);
}
if (type == ZFS_TYPE_SNAPSHOT && strchr(path, '@') == NULL) {
if (hdl != NULL)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"missing '@' delimiter in snapshot name"));
return (0);
}
if (!(type & ZFS_TYPE_BOOKMARK) && strchr(path, '#') != NULL) {
if (hdl != NULL)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"bookmark delimiter '#' is not expected here"));
return (0);
}
if (type == ZFS_TYPE_BOOKMARK && strchr(path, '#') == NULL) {
if (hdl != NULL)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"missing '#' delimiter in bookmark name"));
return (0);
}
if (modifying && strchr(path, '%') != NULL) {
if (hdl != NULL)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid character %c in name"), '%');
return (0);
}
if (entity_namecheck(path, &why, &what) != 0) {
if (hdl != NULL) {
switch (why) {
case NAME_ERR_TOOLONG:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"name is too long"));
break;
case NAME_ERR_LEADING_SLASH:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"leading slash in name"));
break;
case NAME_ERR_EMPTY_COMPONENT:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"empty component or misplaced '@'"
" or '#' delimiter in name"));
break;
case NAME_ERR_TRAILING_SLASH:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"trailing slash in name"));
break;
case NAME_ERR_INVALCHAR:
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "invalid character "
"'%c' in name"), what);
break;
case NAME_ERR_MULTIPLE_DELIMITERS:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"multiple '@' and/or '#' delimiters in "
"name"));
break;
case NAME_ERR_NOLETTER:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool doesn't begin with a letter"));
break;
case NAME_ERR_RESERVED:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"name is reserved"));
break;
case NAME_ERR_DISKLIKE:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"reserved disk name"));
break;
case NAME_ERR_SELF_REF:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"self reference, '.' is found in name"));
break;
case NAME_ERR_PARENT_REF:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"parent reference, '..' is found in name"));
break;
default:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"(%d) not defined"), why);
break;
}
}
return (0);
}
return (-1);
}
int
zfs_name_valid(const char *name, zfs_type_t type)
{
if (type == ZFS_TYPE_POOL)
return (zpool_name_valid(NULL, B_FALSE, name));
return (zfs_validate_name(NULL, name, type, B_FALSE));
}
/*
* This function takes the raw DSL properties, and filters out the user-defined
* properties into a separate nvlist.
*/
static nvlist_t *
process_user_props(zfs_handle_t *zhp, nvlist_t *props)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
nvpair_t *elem;
nvlist_t *propval;
nvlist_t *nvl;
if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) {
(void) no_memory(hdl);
return (NULL);
}
elem = NULL;
while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
if (!zfs_prop_user(nvpair_name(elem)))
continue;
verify(nvpair_value_nvlist(elem, &propval) == 0);
if (nvlist_add_nvlist(nvl, nvpair_name(elem), propval) != 0) {
nvlist_free(nvl);
(void) no_memory(hdl);
return (NULL);
}
}
return (nvl);
}
static zpool_handle_t *
zpool_add_handle(zfs_handle_t *zhp, const char *pool_name)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
zpool_handle_t *zph;
if ((zph = zpool_open_canfail(hdl, pool_name)) != NULL) {
if (hdl->libzfs_pool_handles != NULL)
zph->zpool_next = hdl->libzfs_pool_handles;
hdl->libzfs_pool_handles = zph;
}
return (zph);
}
static zpool_handle_t *
zpool_find_handle(zfs_handle_t *zhp, const char *pool_name, int len)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
zpool_handle_t *zph = hdl->libzfs_pool_handles;
while ((zph != NULL) &&
(strncmp(pool_name, zpool_get_name(zph), len) != 0))
zph = zph->zpool_next;
return (zph);
}
/*
* Returns a handle to the pool that contains the provided dataset.
* If a handle to that pool already exists then that handle is returned.
* Otherwise, a new handle is created and added to the list of handles.
*/
static zpool_handle_t *
zpool_handle(zfs_handle_t *zhp)
{
char *pool_name;
int len;
zpool_handle_t *zph;
len = strcspn(zhp->zfs_name, "/@#") + 1;
pool_name = zfs_alloc(zhp->zfs_hdl, len);
(void) strlcpy(pool_name, zhp->zfs_name, len);
zph = zpool_find_handle(zhp, pool_name, len);
if (zph == NULL)
zph = zpool_add_handle(zhp, pool_name);
free(pool_name);
return (zph);
}
void
zpool_free_handles(libzfs_handle_t *hdl)
{
zpool_handle_t *next, *zph = hdl->libzfs_pool_handles;
while (zph != NULL) {
next = zph->zpool_next;
zpool_close(zph);
zph = next;
}
hdl->libzfs_pool_handles = NULL;
}
/*
* Utility function to gather stats (objset and zpl) for the given object.
*/
static int
get_stats_ioctl(zfs_handle_t *zhp, zfs_cmd_t *zc)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
(void) strlcpy(zc->zc_name, zhp->zfs_name, sizeof (zc->zc_name));
while (zfs_ioctl(hdl, ZFS_IOC_OBJSET_STATS, zc) != 0) {
if (errno == ENOMEM) {
if (zcmd_expand_dst_nvlist(hdl, zc) != 0) {
return (-1);
}
} else {
return (-1);
}
}
return (0);
}
/*
* Utility function to get the received properties of the given object.
*/
static int
get_recvd_props_ioctl(zfs_handle_t *zhp)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
nvlist_t *recvdprops;
zfs_cmd_t zc = {"\0"};
int err;
if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
return (-1);
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
while (zfs_ioctl(hdl, ZFS_IOC_OBJSET_RECVD_PROPS, &zc) != 0) {
if (errno == ENOMEM) {
if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
return (-1);
}
} else {
zcmd_free_nvlists(&zc);
return (-1);
}
}
err = zcmd_read_dst_nvlist(zhp->zfs_hdl, &zc, &recvdprops);
zcmd_free_nvlists(&zc);
if (err != 0)
return (-1);
nvlist_free(zhp->zfs_recvd_props);
zhp->zfs_recvd_props = recvdprops;
return (0);
}
static int
put_stats_zhdl(zfs_handle_t *zhp, zfs_cmd_t *zc)
{
nvlist_t *allprops, *userprops;
zhp->zfs_dmustats = zc->zc_objset_stats; /* structure assignment */
if (zcmd_read_dst_nvlist(zhp->zfs_hdl, zc, &allprops) != 0) {
return (-1);
}
/*
* XXX Why do we store the user props separately, in addition to
* storing them in zfs_props?
*/
if ((userprops = process_user_props(zhp, allprops)) == NULL) {
nvlist_free(allprops);
return (-1);
}
nvlist_free(zhp->zfs_props);
nvlist_free(zhp->zfs_user_props);
zhp->zfs_props = allprops;
zhp->zfs_user_props = userprops;
return (0);
}
static int
get_stats(zfs_handle_t *zhp)
{
int rc = 0;
zfs_cmd_t zc = {"\0"};
if (zcmd_alloc_dst_nvlist(zhp->zfs_hdl, &zc, 0) != 0)
return (-1);
if (get_stats_ioctl(zhp, &zc) != 0)
rc = -1;
else if (put_stats_zhdl(zhp, &zc) != 0)
rc = -1;
zcmd_free_nvlists(&zc);
return (rc);
}
/*
* Refresh the properties currently stored in the handle.
*/
void
zfs_refresh_properties(zfs_handle_t *zhp)
{
(void) get_stats(zhp);
}
/*
* Makes a handle from the given dataset name. Used by zfs_open() and
* zfs_iter_* to create child handles on the fly.
*/
static int
make_dataset_handle_common(zfs_handle_t *zhp, zfs_cmd_t *zc)
{
if (put_stats_zhdl(zhp, zc) != 0)
return (-1);
/*
* We've managed to open the dataset and gather statistics. Determine
* the high-level type.
*/
if (zhp->zfs_dmustats.dds_type == DMU_OST_ZVOL)
zhp->zfs_head_type = ZFS_TYPE_VOLUME;
else if (zhp->zfs_dmustats.dds_type == DMU_OST_ZFS)
zhp->zfs_head_type = ZFS_TYPE_FILESYSTEM;
else if (zhp->zfs_dmustats.dds_type == DMU_OST_OTHER)
return (-1);
else
abort();
if (zhp->zfs_dmustats.dds_is_snapshot)
zhp->zfs_type = ZFS_TYPE_SNAPSHOT;
else if (zhp->zfs_dmustats.dds_type == DMU_OST_ZVOL)
zhp->zfs_type = ZFS_TYPE_VOLUME;
else if (zhp->zfs_dmustats.dds_type == DMU_OST_ZFS)
zhp->zfs_type = ZFS_TYPE_FILESYSTEM;
else
abort(); /* we should never see any other types */
if ((zhp->zpool_hdl = zpool_handle(zhp)) == NULL)
return (-1);
return (0);
}
zfs_handle_t *
make_dataset_handle(libzfs_handle_t *hdl, const char *path)
{
zfs_cmd_t zc = {"\0"};
zfs_handle_t *zhp = calloc(1, sizeof (zfs_handle_t));
if (zhp == NULL)
return (NULL);
zhp->zfs_hdl = hdl;
(void) strlcpy(zhp->zfs_name, path, sizeof (zhp->zfs_name));
if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) {
free(zhp);
return (NULL);
}
if (get_stats_ioctl(zhp, &zc) == -1) {
zcmd_free_nvlists(&zc);
free(zhp);
return (NULL);
}
if (make_dataset_handle_common(zhp, &zc) == -1) {
free(zhp);
zhp = NULL;
}
zcmd_free_nvlists(&zc);
return (zhp);
}
zfs_handle_t *
make_dataset_handle_zc(libzfs_handle_t *hdl, zfs_cmd_t *zc)
{
zfs_handle_t *zhp = calloc(1, sizeof (zfs_handle_t));
if (zhp == NULL)
return (NULL);
zhp->zfs_hdl = hdl;
(void) strlcpy(zhp->zfs_name, zc->zc_name, sizeof (zhp->zfs_name));
if (make_dataset_handle_common(zhp, zc) == -1) {
free(zhp);
return (NULL);
}
return (zhp);
}
zfs_handle_t *
make_dataset_simple_handle_zc(zfs_handle_t *pzhp, zfs_cmd_t *zc)
{
zfs_handle_t *zhp = calloc(1, sizeof (zfs_handle_t));
if (zhp == NULL)
return (NULL);
zhp->zfs_hdl = pzhp->zfs_hdl;
(void) strlcpy(zhp->zfs_name, zc->zc_name, sizeof (zhp->zfs_name));
zhp->zfs_head_type = pzhp->zfs_type;
zhp->zfs_type = ZFS_TYPE_SNAPSHOT;
zhp->zpool_hdl = zpool_handle(zhp);
return (zhp);
}
zfs_handle_t *
zfs_handle_dup(zfs_handle_t *zhp_orig)
{
zfs_handle_t *zhp = calloc(1, sizeof (zfs_handle_t));
if (zhp == NULL)
return (NULL);
zhp->zfs_hdl = zhp_orig->zfs_hdl;
zhp->zpool_hdl = zhp_orig->zpool_hdl;
(void) strlcpy(zhp->zfs_name, zhp_orig->zfs_name,
sizeof (zhp->zfs_name));
zhp->zfs_type = zhp_orig->zfs_type;
zhp->zfs_head_type = zhp_orig->zfs_head_type;
zhp->zfs_dmustats = zhp_orig->zfs_dmustats;
if (zhp_orig->zfs_props != NULL) {
if (nvlist_dup(zhp_orig->zfs_props, &zhp->zfs_props, 0) != 0) {
(void) no_memory(zhp->zfs_hdl);
zfs_close(zhp);
return (NULL);
}
}
if (zhp_orig->zfs_user_props != NULL) {
if (nvlist_dup(zhp_orig->zfs_user_props,
&zhp->zfs_user_props, 0) != 0) {
(void) no_memory(zhp->zfs_hdl);
zfs_close(zhp);
return (NULL);
}
}
if (zhp_orig->zfs_recvd_props != NULL) {
if (nvlist_dup(zhp_orig->zfs_recvd_props,
&zhp->zfs_recvd_props, 0)) {
(void) no_memory(zhp->zfs_hdl);
zfs_close(zhp);
return (NULL);
}
}
zhp->zfs_mntcheck = zhp_orig->zfs_mntcheck;
if (zhp_orig->zfs_mntopts != NULL) {
zhp->zfs_mntopts = zfs_strdup(zhp_orig->zfs_hdl,
zhp_orig->zfs_mntopts);
}
zhp->zfs_props_table = zhp_orig->zfs_props_table;
return (zhp);
}
boolean_t
zfs_bookmark_exists(const char *path)
{
nvlist_t *bmarks;
nvlist_t *props;
char fsname[ZFS_MAX_DATASET_NAME_LEN];
char *bmark_name;
char *pound;
int err;
boolean_t rv;
(void) strlcpy(fsname, path, sizeof (fsname));
pound = strchr(fsname, '#');
if (pound == NULL)
return (B_FALSE);
*pound = '\0';
bmark_name = pound + 1;
props = fnvlist_alloc();
err = lzc_get_bookmarks(fsname, props, &bmarks);
nvlist_free(props);
if (err != 0) {
nvlist_free(bmarks);
return (B_FALSE);
}
rv = nvlist_exists(bmarks, bmark_name);
nvlist_free(bmarks);
return (rv);
}
zfs_handle_t *
make_bookmark_handle(zfs_handle_t *parent, const char *path,
nvlist_t *bmark_props)
{
zfs_handle_t *zhp = calloc(1, sizeof (zfs_handle_t));
if (zhp == NULL)
return (NULL);
/* Fill in the name. */
zhp->zfs_hdl = parent->zfs_hdl;
(void) strlcpy(zhp->zfs_name, path, sizeof (zhp->zfs_name));
/* Set the property lists. */
if (nvlist_dup(bmark_props, &zhp->zfs_props, 0) != 0) {
free(zhp);
return (NULL);
}
/* Set the types. */
zhp->zfs_head_type = parent->zfs_head_type;
zhp->zfs_type = ZFS_TYPE_BOOKMARK;
if ((zhp->zpool_hdl = zpool_handle(zhp)) == NULL) {
nvlist_free(zhp->zfs_props);
free(zhp);
return (NULL);
}
return (zhp);
}
struct zfs_open_bookmarks_cb_data {
const char *path;
zfs_handle_t *zhp;
};
static int
zfs_open_bookmarks_cb(zfs_handle_t *zhp, void *data)
{
struct zfs_open_bookmarks_cb_data *dp = data;
/*
* Is it the one we are looking for?
*/
if (strcmp(dp->path, zfs_get_name(zhp)) == 0) {
/*
* We found it. Save it and let the caller know we are done.
*/
dp->zhp = zhp;
return (EEXIST);
}
/*
* Not found. Close the handle and ask for another one.
*/
zfs_close(zhp);
return (0);
}
/*
* Opens the given snapshot, bookmark, filesystem, or volume. The 'types'
* argument is a mask of acceptable types. The function will print an
* appropriate error message and return NULL if it can't be opened.
*/
zfs_handle_t *
zfs_open(libzfs_handle_t *hdl, const char *path, int types)
{
zfs_handle_t *zhp;
char errbuf[1024];
char *bookp;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot open '%s'"), path);
/*
* Validate the name before we even try to open it.
*/
if (!zfs_validate_name(hdl, path, types, B_FALSE)) {
(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
return (NULL);
}
/*
* Bookmarks needs to be handled separately.
*/
bookp = strchr(path, '#');
if (bookp == NULL) {
/*
* Try to get stats for the dataset, which will tell us if it
* exists.
*/
errno = 0;
if ((zhp = make_dataset_handle(hdl, path)) == NULL) {
(void) zfs_standard_error(hdl, errno, errbuf);
return (NULL);
}
} else {
char dsname[ZFS_MAX_DATASET_NAME_LEN];
zfs_handle_t *pzhp;
struct zfs_open_bookmarks_cb_data cb_data = {path, NULL};
/*
* We need to cut out '#' and everything after '#'
* to get the parent dataset name only.
*/
assert(bookp - path < sizeof (dsname));
(void) strncpy(dsname, path, bookp - path);
dsname[bookp - path] = '\0';
/*
* Create handle for the parent dataset.
*/
errno = 0;
if ((pzhp = make_dataset_handle(hdl, dsname)) == NULL) {
(void) zfs_standard_error(hdl, errno, errbuf);
return (NULL);
}
/*
* Iterate bookmarks to find the right one.
*/
errno = 0;
if ((zfs_iter_bookmarks(pzhp, zfs_open_bookmarks_cb,
&cb_data) == 0) && (cb_data.zhp == NULL)) {
(void) zfs_error(hdl, EZFS_NOENT, errbuf);
zfs_close(pzhp);
return (NULL);
}
if (cb_data.zhp == NULL) {
(void) zfs_standard_error(hdl, errno, errbuf);
zfs_close(pzhp);
return (NULL);
}
zhp = cb_data.zhp;
/*
* Cleanup.
*/
zfs_close(pzhp);
}
if (!(types & zhp->zfs_type)) {
(void) zfs_error(hdl, EZFS_BADTYPE, errbuf);
zfs_close(zhp);
return (NULL);
}
return (zhp);
}
/*
* Release a ZFS handle. Nothing to do but free the associated memory.
*/
void
zfs_close(zfs_handle_t *zhp)
{
if (zhp->zfs_mntopts)
free(zhp->zfs_mntopts);
nvlist_free(zhp->zfs_props);
nvlist_free(zhp->zfs_user_props);
nvlist_free(zhp->zfs_recvd_props);
free(zhp);
}
typedef struct mnttab_node {
struct mnttab mtn_mt;
avl_node_t mtn_node;
} mnttab_node_t;
static int
libzfs_mnttab_cache_compare(const void *arg1, const void *arg2)
{
const mnttab_node_t *mtn1 = (const mnttab_node_t *)arg1;
const mnttab_node_t *mtn2 = (const mnttab_node_t *)arg2;
int rv;
rv = strcmp(mtn1->mtn_mt.mnt_special, mtn2->mtn_mt.mnt_special);
return (TREE_ISIGN(rv));
}
void
libzfs_mnttab_init(libzfs_handle_t *hdl)
{
pthread_mutex_init(&hdl->libzfs_mnttab_cache_lock, NULL);
assert(avl_numnodes(&hdl->libzfs_mnttab_cache) == 0);
avl_create(&hdl->libzfs_mnttab_cache, libzfs_mnttab_cache_compare,
sizeof (mnttab_node_t), offsetof(mnttab_node_t, mtn_node));
}
static int
libzfs_mnttab_update(libzfs_handle_t *hdl)
{
FILE *mnttab;
struct mnttab entry;
if ((mnttab = fopen(MNTTAB, "re")) == NULL)
return (ENOENT);
while (getmntent(mnttab, &entry) == 0) {
mnttab_node_t *mtn;
avl_index_t where;
if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0)
continue;
mtn = zfs_alloc(hdl, sizeof (mnttab_node_t));
mtn->mtn_mt.mnt_special = zfs_strdup(hdl, entry.mnt_special);
mtn->mtn_mt.mnt_mountp = zfs_strdup(hdl, entry.mnt_mountp);
mtn->mtn_mt.mnt_fstype = zfs_strdup(hdl, entry.mnt_fstype);
mtn->mtn_mt.mnt_mntopts = zfs_strdup(hdl, entry.mnt_mntopts);
/* Exclude duplicate mounts */
if (avl_find(&hdl->libzfs_mnttab_cache, mtn, &where) != NULL) {
free(mtn->mtn_mt.mnt_special);
free(mtn->mtn_mt.mnt_mountp);
free(mtn->mtn_mt.mnt_fstype);
free(mtn->mtn_mt.mnt_mntopts);
free(mtn);
continue;
}
avl_add(&hdl->libzfs_mnttab_cache, mtn);
}
(void) fclose(mnttab);
return (0);
}
void
libzfs_mnttab_fini(libzfs_handle_t *hdl)
{
void *cookie = NULL;
mnttab_node_t *mtn;
while ((mtn = avl_destroy_nodes(&hdl->libzfs_mnttab_cache, &cookie))
!= NULL) {
free(mtn->mtn_mt.mnt_special);
free(mtn->mtn_mt.mnt_mountp);
free(mtn->mtn_mt.mnt_fstype);
free(mtn->mtn_mt.mnt_mntopts);
free(mtn);
}
avl_destroy(&hdl->libzfs_mnttab_cache);
(void) pthread_mutex_destroy(&hdl->libzfs_mnttab_cache_lock);
}
void
libzfs_mnttab_cache(libzfs_handle_t *hdl, boolean_t enable)
{
hdl->libzfs_mnttab_enable = enable;
}
int
libzfs_mnttab_find(libzfs_handle_t *hdl, const char *fsname,
struct mnttab *entry)
{
FILE *mnttab;
mnttab_node_t find;
mnttab_node_t *mtn;
int ret = ENOENT;
if (!hdl->libzfs_mnttab_enable) {
struct mnttab srch = { 0 };
if (avl_numnodes(&hdl->libzfs_mnttab_cache))
libzfs_mnttab_fini(hdl);
if ((mnttab = fopen(MNTTAB, "re")) == NULL)
return (ENOENT);
srch.mnt_special = (char *)fsname;
srch.mnt_fstype = MNTTYPE_ZFS;
ret = getmntany(mnttab, entry, &srch) ? ENOENT : 0;
(void) fclose(mnttab);
return (ret);
}
pthread_mutex_lock(&hdl->libzfs_mnttab_cache_lock);
if (avl_numnodes(&hdl->libzfs_mnttab_cache) == 0) {
int error;
if ((error = libzfs_mnttab_update(hdl)) != 0) {
pthread_mutex_unlock(&hdl->libzfs_mnttab_cache_lock);
return (error);
}
}
find.mtn_mt.mnt_special = (char *)fsname;
mtn = avl_find(&hdl->libzfs_mnttab_cache, &find, NULL);
if (mtn) {
*entry = mtn->mtn_mt;
ret = 0;
}
pthread_mutex_unlock(&hdl->libzfs_mnttab_cache_lock);
return (ret);
}
void
libzfs_mnttab_add(libzfs_handle_t *hdl, const char *special,
const char *mountp, const char *mntopts)
{
mnttab_node_t *mtn;
pthread_mutex_lock(&hdl->libzfs_mnttab_cache_lock);
if (avl_numnodes(&hdl->libzfs_mnttab_cache) != 0) {
mtn = zfs_alloc(hdl, sizeof (mnttab_node_t));
mtn->mtn_mt.mnt_special = zfs_strdup(hdl, special);
mtn->mtn_mt.mnt_mountp = zfs_strdup(hdl, mountp);
mtn->mtn_mt.mnt_fstype = zfs_strdup(hdl, MNTTYPE_ZFS);
mtn->mtn_mt.mnt_mntopts = zfs_strdup(hdl, mntopts);
/*
* Another thread may have already added this entry
* via libzfs_mnttab_update. If so we should skip it.
*/
if (avl_find(&hdl->libzfs_mnttab_cache, mtn, NULL) != NULL) {
free(mtn->mtn_mt.mnt_special);
free(mtn->mtn_mt.mnt_mountp);
free(mtn->mtn_mt.mnt_fstype);
free(mtn->mtn_mt.mnt_mntopts);
free(mtn);
} else {
avl_add(&hdl->libzfs_mnttab_cache, mtn);
}
}
pthread_mutex_unlock(&hdl->libzfs_mnttab_cache_lock);
}
void
libzfs_mnttab_remove(libzfs_handle_t *hdl, const char *fsname)
{
mnttab_node_t find;
mnttab_node_t *ret;
pthread_mutex_lock(&hdl->libzfs_mnttab_cache_lock);
find.mtn_mt.mnt_special = (char *)fsname;
if ((ret = avl_find(&hdl->libzfs_mnttab_cache, (void *)&find, NULL))
!= NULL) {
avl_remove(&hdl->libzfs_mnttab_cache, ret);
free(ret->mtn_mt.mnt_special);
free(ret->mtn_mt.mnt_mountp);
free(ret->mtn_mt.mnt_fstype);
free(ret->mtn_mt.mnt_mntopts);
free(ret);
}
pthread_mutex_unlock(&hdl->libzfs_mnttab_cache_lock);
}
int
zfs_spa_version(zfs_handle_t *zhp, int *spa_version)
{
zpool_handle_t *zpool_handle = zhp->zpool_hdl;
if (zpool_handle == NULL)
return (-1);
*spa_version = zpool_get_prop_int(zpool_handle,
ZPOOL_PROP_VERSION, NULL);
return (0);
}
/*
* The choice of reservation property depends on the SPA version.
*/
static int
zfs_which_resv_prop(zfs_handle_t *zhp, zfs_prop_t *resv_prop)
{
int spa_version;
if (zfs_spa_version(zhp, &spa_version) < 0)
return (-1);
if (spa_version >= SPA_VERSION_REFRESERVATION)
*resv_prop = ZFS_PROP_REFRESERVATION;
else
*resv_prop = ZFS_PROP_RESERVATION;
return (0);
}
/*
* Given an nvlist of properties to set, validates that they are correct, and
* parses any numeric properties (index, boolean, etc) if they are specified as
* strings.
*/
nvlist_t *
zfs_valid_proplist(libzfs_handle_t *hdl, zfs_type_t type, nvlist_t *nvl,
uint64_t zoned, zfs_handle_t *zhp, zpool_handle_t *zpool_hdl,
boolean_t key_params_ok, const char *errbuf)
{
nvpair_t *elem;
uint64_t intval;
char *strval;
zfs_prop_t prop;
nvlist_t *ret;
int chosen_normal = -1;
int chosen_utf = -1;
if (nvlist_alloc(&ret, NV_UNIQUE_NAME, 0) != 0) {
(void) no_memory(hdl);
return (NULL);
}
/*
* Make sure this property is valid and applies to this type.
*/
elem = NULL;
while ((elem = nvlist_next_nvpair(nvl, elem)) != NULL) {
const char *propname = nvpair_name(elem);
prop = zfs_name_to_prop(propname);
if (prop == ZPROP_INVAL && zfs_prop_user(propname)) {
/*
* This is a user property: make sure it's a
* string, and that it's less than ZAP_MAXNAMELEN.
*/
if (nvpair_type(elem) != DATA_TYPE_STRING) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be a string"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (strlen(nvpair_name(elem)) >= ZAP_MAXNAMELEN) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property name '%s' is too long"),
propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
(void) nvpair_value_string(elem, &strval);
if (nvlist_add_string(ret, propname, strval) != 0) {
(void) no_memory(hdl);
goto error;
}
continue;
}
/*
* Currently, only user properties can be modified on
* snapshots.
*/
if (type == ZFS_TYPE_SNAPSHOT) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"this property can not be modified for snapshots"));
(void) zfs_error(hdl, EZFS_PROPTYPE, errbuf);
goto error;
}
if (prop == ZPROP_INVAL && zfs_prop_userquota(propname)) {
zfs_userquota_prop_t uqtype;
char *newpropname = NULL;
char domain[128];
uint64_t rid;
uint64_t valary[3];
int rc;
if (userquota_propname_decode(propname, zoned,
&uqtype, domain, sizeof (domain), &rid) != 0) {
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN,
"'%s' has an invalid user/group name"),
propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (uqtype != ZFS_PROP_USERQUOTA &&
uqtype != ZFS_PROP_GROUPQUOTA &&
uqtype != ZFS_PROP_USEROBJQUOTA &&
uqtype != ZFS_PROP_GROUPOBJQUOTA &&
uqtype != ZFS_PROP_PROJECTQUOTA &&
uqtype != ZFS_PROP_PROJECTOBJQUOTA) {
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "'%s' is readonly"),
propname);
(void) zfs_error(hdl, EZFS_PROPREADONLY,
errbuf);
goto error;
}
if (nvpair_type(elem) == DATA_TYPE_STRING) {
(void) nvpair_value_string(elem, &strval);
if (strcmp(strval, "none") == 0) {
intval = 0;
} else if (zfs_nicestrtonum(hdl,
strval, &intval) != 0) {
(void) zfs_error(hdl,
EZFS_BADPROP, errbuf);
goto error;
}
} else if (nvpair_type(elem) ==
DATA_TYPE_UINT64) {
(void) nvpair_value_uint64(elem, &intval);
if (intval == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"use 'none' to disable "
"{user|group|project}quota"));
goto error;
}
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be a number"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
/*
* Encode the prop name as
* userquota@<hex-rid>-domain, to make it easy
* for the kernel to decode.
*/
rc = asprintf(&newpropname, "%s%llx-%s",
zfs_userquota_prop_prefixes[uqtype],
(longlong_t)rid, domain);
if (rc == -1 || newpropname == NULL) {
(void) no_memory(hdl);
goto error;
}
valary[0] = uqtype;
valary[1] = rid;
valary[2] = intval;
if (nvlist_add_uint64_array(ret, newpropname,
valary, 3) != 0) {
free(newpropname);
(void) no_memory(hdl);
goto error;
}
free(newpropname);
continue;
} else if (prop == ZPROP_INVAL && zfs_prop_written(propname)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' is readonly"),
propname);
(void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
goto error;
}
if (prop == ZPROP_INVAL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid property '%s'"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (!zfs_prop_valid_for_type(prop, type, B_FALSE)) {
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "'%s' does not "
"apply to datasets of this type"), propname);
(void) zfs_error(hdl, EZFS_PROPTYPE, errbuf);
goto error;
}
if (zfs_prop_readonly(prop) &&
!(zfs_prop_setonce(prop) && zhp == NULL) &&
!(zfs_prop_encryption_key_param(prop) && key_params_ok)) {
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "'%s' is readonly"),
propname);
(void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
goto error;
}
if (zprop_parse_value(hdl, elem, prop, type, ret,
&strval, &intval, errbuf) != 0)
goto error;
/*
* Perform some additional checks for specific properties.
*/
switch (prop) {
case ZFS_PROP_VERSION:
{
int version;
if (zhp == NULL)
break;
version = zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
if (intval < version) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Can not downgrade; already at version %u"),
version);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
break;
}
case ZFS_PROP_VOLBLOCKSIZE:
case ZFS_PROP_RECORDSIZE:
{
int maxbs = SPA_MAXBLOCKSIZE;
char buf[64];
if (zpool_hdl != NULL) {
maxbs = zpool_get_prop_int(zpool_hdl,
ZPOOL_PROP_MAXBLOCKSIZE, NULL);
}
/*
* The value must be a power of two between
* SPA_MINBLOCKSIZE and maxbs.
*/
if (intval < SPA_MINBLOCKSIZE ||
intval > maxbs || !ISP2(intval)) {
zfs_nicebytes(maxbs, buf, sizeof (buf));
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be power of 2 from 512B "
"to %s"), propname, buf);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
break;
}
case ZFS_PROP_SPECIAL_SMALL_BLOCKS:
{
int maxbs = SPA_OLD_MAXBLOCKSIZE;
char buf[64];
if (zpool_hdl != NULL) {
char state[64] = "";
maxbs = zpool_get_prop_int(zpool_hdl,
ZPOOL_PROP_MAXBLOCKSIZE, NULL);
/*
* Issue a warning but do not fail so that
* tests for settable properties succeed.
*/
if (zpool_prop_get_feature(zpool_hdl,
"feature@allocation_classes", state,
sizeof (state)) != 0 ||
strcmp(state, ZFS_FEATURE_ACTIVE) != 0) {
(void) fprintf(stderr, gettext(
"%s: property requires a special "
"device in the pool\n"), propname);
}
}
if (intval != 0 &&
(intval < SPA_MINBLOCKSIZE ||
intval > maxbs || !ISP2(intval))) {
zfs_nicebytes(maxbs, buf, sizeof (buf));
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid '%s=%llu' property: must be zero "
"or a power of 2 from 512B to %s"),
propname, (unsigned long long)intval, buf);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
break;
}
case ZFS_PROP_MLSLABEL:
{
#ifdef HAVE_MLSLABEL
/*
* Verify the mlslabel string and convert to
* internal hex label string.
*/
m_label_t *new_sl;
char *hex = NULL; /* internal label string */
/* Default value is already OK. */
if (strcasecmp(strval, ZFS_MLSLABEL_DEFAULT) == 0)
break;
/* Verify the label can be converted to binary form */
if (((new_sl = m_label_alloc(MAC_LABEL)) == NULL) ||
(str_to_label(strval, &new_sl, MAC_LABEL,
L_NO_CORRECTION, NULL) == -1)) {
goto badlabel;
}
/* Now translate to hex internal label string */
if (label_to_str(new_sl, &hex, M_INTERNAL,
DEF_NAMES) != 0) {
if (hex)
free(hex);
goto badlabel;
}
m_label_free(new_sl);
/* If string is already in internal form, we're done. */
if (strcmp(strval, hex) == 0) {
free(hex);
break;
}
/* Replace the label string with the internal form. */
(void) nvlist_remove(ret, zfs_prop_to_name(prop),
DATA_TYPE_STRING);
verify(nvlist_add_string(ret, zfs_prop_to_name(prop),
hex) == 0);
free(hex);
break;
badlabel:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid mlslabel '%s'"), strval);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
m_label_free(new_sl); /* OK if null */
goto error;
#else
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"mlslabels are unsupported"));
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
#endif /* HAVE_MLSLABEL */
}
case ZFS_PROP_MOUNTPOINT:
{
namecheck_err_t why;
if (strcmp(strval, ZFS_MOUNTPOINT_NONE) == 0 ||
strcmp(strval, ZFS_MOUNTPOINT_LEGACY) == 0)
break;
if (mountpoint_namecheck(strval, &why)) {
switch (why) {
case NAME_ERR_LEADING_SLASH:
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN,
"'%s' must be an absolute path, "
"'none', or 'legacy'"), propname);
break;
case NAME_ERR_TOOLONG:
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN,
"component of '%s' is too long"),
propname);
break;
default:
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN,
"(%d) not defined"),
why);
break;
}
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
}
+ /* FALLTHROUGH */
- /*FALLTHRU*/
case ZFS_PROP_SHARESMB:
case ZFS_PROP_SHARENFS:
/*
* For the mountpoint and sharenfs or sharesmb
* properties, check if it can be set in a
* global/non-global zone based on
* the zoned property value:
*
* global zone non-global zone
* --------------------------------------------------
* zoned=on mountpoint (no) mountpoint (yes)
* sharenfs (no) sharenfs (no)
* sharesmb (no) sharesmb (no)
*
* zoned=off mountpoint (yes) N/A
* sharenfs (yes)
* sharesmb (yes)
*/
if (zoned) {
if (getzoneid() == GLOBAL_ZONEID) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' cannot be set on "
"dataset in a non-global zone"),
propname);
(void) zfs_error(hdl, EZFS_ZONED,
errbuf);
goto error;
} else if (prop == ZFS_PROP_SHARENFS ||
prop == ZFS_PROP_SHARESMB) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' cannot be set in "
"a non-global zone"), propname);
(void) zfs_error(hdl, EZFS_ZONED,
errbuf);
goto error;
}
} else if (getzoneid() != GLOBAL_ZONEID) {
/*
* If zoned property is 'off', this must be in
* a global zone. If not, something is wrong.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' cannot be set while dataset "
"'zoned' property is set"), propname);
(void) zfs_error(hdl, EZFS_ZONED, errbuf);
goto error;
}
/*
* At this point, it is legitimate to set the
* property. Now we want to make sure that the
* property value is valid if it is sharenfs.
*/
if ((prop == ZFS_PROP_SHARENFS ||
prop == ZFS_PROP_SHARESMB) &&
strcmp(strval, "on") != 0 &&
strcmp(strval, "off") != 0) {
zfs_share_proto_t proto;
if (prop == ZFS_PROP_SHARESMB)
proto = PROTO_SMB;
else
proto = PROTO_NFS;
if (zfs_parse_options(strval, proto) != SA_OK) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' cannot be set to invalid "
"options"), propname);
(void) zfs_error(hdl, EZFS_BADPROP,
errbuf);
goto error;
}
}
break;
case ZFS_PROP_KEYLOCATION:
if (!zfs_prop_valid_keylocation(strval, B_FALSE)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid keylocation"));
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (zhp != NULL) {
uint64_t crypt =
zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION);
if (crypt == ZIO_CRYPT_OFF &&
strcmp(strval, "none") != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"keylocation must be 'none' "
"for unencrypted datasets"));
(void) zfs_error(hdl, EZFS_BADPROP,
errbuf);
goto error;
} else if (crypt != ZIO_CRYPT_OFF &&
strcmp(strval, "none") == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"keylocation must not be 'none' "
"for encrypted datasets"));
(void) zfs_error(hdl, EZFS_BADPROP,
errbuf);
goto error;
}
}
break;
case ZFS_PROP_PBKDF2_ITERS:
if (intval < MIN_PBKDF2_ITERATIONS) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"minimum pbkdf2 iterations is %u"),
MIN_PBKDF2_ITERATIONS);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
break;
case ZFS_PROP_UTF8ONLY:
chosen_utf = (int)intval;
break;
case ZFS_PROP_NORMALIZE:
chosen_normal = (int)intval;
break;
default:
break;
}
/*
* For changes to existing volumes, we have some additional
* checks to enforce.
*/
if (type == ZFS_TYPE_VOLUME && zhp != NULL) {
uint64_t blocksize = zfs_prop_get_int(zhp,
ZFS_PROP_VOLBLOCKSIZE);
char buf[64];
switch (prop) {
case ZFS_PROP_VOLSIZE:
if (intval % blocksize != 0) {
zfs_nicebytes(blocksize, buf,
sizeof (buf));
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be a multiple of "
"volume block size (%s)"),
propname, buf);
(void) zfs_error(hdl, EZFS_BADPROP,
errbuf);
goto error;
}
if (intval == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' cannot be zero"),
propname);
(void) zfs_error(hdl, EZFS_BADPROP,
errbuf);
goto error;
}
break;
default:
break;
}
}
/* check encryption properties */
if (zhp != NULL) {
int64_t crypt = zfs_prop_get_int(zhp,
ZFS_PROP_ENCRYPTION);
switch (prop) {
case ZFS_PROP_COPIES:
if (crypt != ZIO_CRYPT_OFF && intval > 2) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"encrypted datasets cannot have "
"3 copies"));
(void) zfs_error(hdl, EZFS_BADPROP,
errbuf);
goto error;
}
break;
default:
break;
}
}
}
/*
* If normalization was chosen, but no UTF8 choice was made,
* enforce rejection of non-UTF8 names.
*
* If normalization was chosen, but rejecting non-UTF8 names
* was explicitly not chosen, it is an error.
*/
if (chosen_normal > 0 && chosen_utf < 0) {
if (nvlist_add_uint64(ret,
zfs_prop_to_name(ZFS_PROP_UTF8ONLY), 1) != 0) {
(void) no_memory(hdl);
goto error;
}
} else if (chosen_normal > 0 && chosen_utf == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be set 'on' if normalization chosen"),
zfs_prop_to_name(ZFS_PROP_UTF8ONLY));
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
return (ret);
error:
nvlist_free(ret);
return (NULL);
}
static int
zfs_add_synthetic_resv(zfs_handle_t *zhp, nvlist_t *nvl)
{
uint64_t old_volsize;
uint64_t new_volsize;
uint64_t old_reservation;
uint64_t new_reservation;
zfs_prop_t resv_prop;
nvlist_t *props;
zpool_handle_t *zph = zpool_handle(zhp);
/*
* If this is an existing volume, and someone is setting the volsize,
* make sure that it matches the reservation, or add it if necessary.
*/
old_volsize = zfs_prop_get_int(zhp, ZFS_PROP_VOLSIZE);
if (zfs_which_resv_prop(zhp, &resv_prop) < 0)
return (-1);
old_reservation = zfs_prop_get_int(zhp, resv_prop);
props = fnvlist_alloc();
fnvlist_add_uint64(props, zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE),
zfs_prop_get_int(zhp, ZFS_PROP_VOLBLOCKSIZE));
if ((zvol_volsize_to_reservation(zph, old_volsize, props) !=
old_reservation) || nvlist_exists(nvl,
zfs_prop_to_name(resv_prop))) {
fnvlist_free(props);
return (0);
}
if (nvlist_lookup_uint64(nvl, zfs_prop_to_name(ZFS_PROP_VOLSIZE),
&new_volsize) != 0) {
fnvlist_free(props);
return (-1);
}
new_reservation = zvol_volsize_to_reservation(zph, new_volsize, props);
fnvlist_free(props);
if (nvlist_add_uint64(nvl, zfs_prop_to_name(resv_prop),
new_reservation) != 0) {
(void) no_memory(zhp->zfs_hdl);
return (-1);
}
return (1);
}
/*
* Helper for 'zfs {set|clone} refreservation=auto'. Must be called after
* zfs_valid_proplist(), as it is what sets the UINT64_MAX sentinel value.
* Return codes must match zfs_add_synthetic_resv().
*/
static int
zfs_fix_auto_resv(zfs_handle_t *zhp, nvlist_t *nvl)
{
uint64_t volsize;
uint64_t resvsize;
zfs_prop_t prop;
nvlist_t *props;
if (!ZFS_IS_VOLUME(zhp)) {
return (0);
}
if (zfs_which_resv_prop(zhp, &prop) != 0) {
return (-1);
}
if (prop != ZFS_PROP_REFRESERVATION) {
return (0);
}
if (nvlist_lookup_uint64(nvl, zfs_prop_to_name(prop), &resvsize) != 0) {
/* No value being set, so it can't be "auto" */
return (0);
}
if (resvsize != UINT64_MAX) {
/* Being set to a value other than "auto" */
return (0);
}
props = fnvlist_alloc();
fnvlist_add_uint64(props, zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE),
zfs_prop_get_int(zhp, ZFS_PROP_VOLBLOCKSIZE));
if (nvlist_lookup_uint64(nvl, zfs_prop_to_name(ZFS_PROP_VOLSIZE),
&volsize) != 0) {
volsize = zfs_prop_get_int(zhp, ZFS_PROP_VOLSIZE);
}
resvsize = zvol_volsize_to_reservation(zpool_handle(zhp), volsize,
props);
fnvlist_free(props);
(void) nvlist_remove_all(nvl, zfs_prop_to_name(prop));
if (nvlist_add_uint64(nvl, zfs_prop_to_name(prop), resvsize) != 0) {
(void) no_memory(zhp->zfs_hdl);
return (-1);
}
return (1);
}
static boolean_t
zfs_is_namespace_prop(zfs_prop_t prop)
{
switch (prop) {
case ZFS_PROP_ATIME:
case ZFS_PROP_RELATIME:
case ZFS_PROP_DEVICES:
case ZFS_PROP_EXEC:
case ZFS_PROP_SETUID:
case ZFS_PROP_READONLY:
case ZFS_PROP_XATTR:
case ZFS_PROP_NBMAND:
return (B_TRUE);
default:
return (B_FALSE);
}
}
/*
* Given a property name and value, set the property for the given dataset.
*/
int
zfs_prop_set(zfs_handle_t *zhp, const char *propname, const char *propval)
{
int ret = -1;
char errbuf[1024];
libzfs_handle_t *hdl = zhp->zfs_hdl;
nvlist_t *nvl = NULL;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
zhp->zfs_name);
if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0 ||
nvlist_add_string(nvl, propname, propval) != 0) {
(void) no_memory(hdl);
goto error;
}
ret = zfs_prop_set_list(zhp, nvl);
error:
nvlist_free(nvl);
return (ret);
}
/*
* Given an nvlist of property names and values, set the properties for the
* given dataset.
*/
int
zfs_prop_set_list(zfs_handle_t *zhp, nvlist_t *props)
{
zfs_cmd_t zc = {"\0"};
int ret = -1;
prop_changelist_t **cls = NULL;
int cl_idx;
char errbuf[1024];
libzfs_handle_t *hdl = zhp->zfs_hdl;
nvlist_t *nvl;
int nvl_len = 0;
int added_resv = 0;
zfs_prop_t prop = 0;
nvpair_t *elem;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
zhp->zfs_name);
if ((nvl = zfs_valid_proplist(hdl, zhp->zfs_type, props,
zfs_prop_get_int(zhp, ZFS_PROP_ZONED), zhp, zhp->zpool_hdl,
B_FALSE, errbuf)) == NULL)
goto error;
/*
* We have to check for any extra properties which need to be added
* before computing the length of the nvlist.
*/
for (elem = nvlist_next_nvpair(nvl, NULL);
elem != NULL;
elem = nvlist_next_nvpair(nvl, elem)) {
if (zfs_name_to_prop(nvpair_name(elem)) == ZFS_PROP_VOLSIZE &&
(added_resv = zfs_add_synthetic_resv(zhp, nvl)) == -1) {
goto error;
}
}
if (added_resv != 1 &&
(added_resv = zfs_fix_auto_resv(zhp, nvl)) == -1) {
goto error;
}
/*
* Check how many properties we're setting and allocate an array to
* store changelist pointers for postfix().
*/
for (elem = nvlist_next_nvpair(nvl, NULL);
elem != NULL;
elem = nvlist_next_nvpair(nvl, elem))
nvl_len++;
if ((cls = calloc(nvl_len, sizeof (prop_changelist_t *))) == NULL)
goto error;
cl_idx = 0;
for (elem = nvlist_next_nvpair(nvl, NULL);
elem != NULL;
elem = nvlist_next_nvpair(nvl, elem)) {
prop = zfs_name_to_prop(nvpair_name(elem));
assert(cl_idx < nvl_len);
/*
* We don't want to unmount & remount the dataset when changing
* its canmount property to 'on' or 'noauto'. We only use
* the changelist logic to unmount when setting canmount=off.
*/
if (prop != ZFS_PROP_CANMOUNT ||
(fnvpair_value_uint64(elem) == ZFS_CANMOUNT_OFF &&
zfs_is_mounted(zhp, NULL))) {
cls[cl_idx] = changelist_gather(zhp, prop, 0, 0);
if (cls[cl_idx] == NULL)
goto error;
}
if (prop == ZFS_PROP_MOUNTPOINT &&
changelist_haszonedchild(cls[cl_idx])) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"child dataset with inherited mountpoint is used "
"in a non-global zone"));
ret = zfs_error(hdl, EZFS_ZONED, errbuf);
goto error;
}
if (cls[cl_idx] != NULL &&
(ret = changelist_prefix(cls[cl_idx])) != 0)
goto error;
cl_idx++;
}
assert(cl_idx == nvl_len);
/*
* Execute the corresponding ioctl() to set this list of properties.
*/
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
if ((ret = zcmd_write_src_nvlist(hdl, &zc, nvl)) != 0 ||
(ret = zcmd_alloc_dst_nvlist(hdl, &zc, 0)) != 0)
goto error;
ret = zfs_ioctl(hdl, ZFS_IOC_SET_PROP, &zc);
if (ret != 0) {
if (zc.zc_nvlist_dst_filled == B_FALSE) {
(void) zfs_standard_error(hdl, errno, errbuf);
goto error;
}
/* Get the list of unset properties back and report them. */
nvlist_t *errorprops = NULL;
if (zcmd_read_dst_nvlist(hdl, &zc, &errorprops) != 0)
goto error;
for (nvpair_t *elem = nvlist_next_nvpair(errorprops, NULL);
elem != NULL;
elem = nvlist_next_nvpair(errorprops, elem)) {
prop = zfs_name_to_prop(nvpair_name(elem));
zfs_setprop_error(hdl, prop, errno, errbuf);
}
nvlist_free(errorprops);
if (added_resv && errno == ENOSPC) {
/* clean up the volsize property we tried to set */
uint64_t old_volsize = zfs_prop_get_int(zhp,
ZFS_PROP_VOLSIZE);
nvlist_free(nvl);
nvl = NULL;
zcmd_free_nvlists(&zc);
if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
goto error;
if (nvlist_add_uint64(nvl,
zfs_prop_to_name(ZFS_PROP_VOLSIZE),
old_volsize) != 0)
goto error;
if (zcmd_write_src_nvlist(hdl, &zc, nvl) != 0)
goto error;
(void) zfs_ioctl(hdl, ZFS_IOC_SET_PROP, &zc);
}
} else {
for (cl_idx = 0; cl_idx < nvl_len; cl_idx++) {
if (cls[cl_idx] != NULL) {
int clp_err = changelist_postfix(cls[cl_idx]);
if (clp_err != 0)
ret = clp_err;
}
}
if (ret == 0) {
/*
* Refresh the statistics so the new property
* value is reflected.
*/
(void) get_stats(zhp);
/*
* Remount the filesystem to propagate the change
* if one of the options handled by the generic
* Linux namespace layer has been modified.
*/
if (zfs_is_namespace_prop(prop) &&
zfs_is_mounted(zhp, NULL))
ret = zfs_mount(zhp, MNTOPT_REMOUNT, 0);
}
}
error:
nvlist_free(nvl);
zcmd_free_nvlists(&zc);
if (cls != NULL) {
for (cl_idx = 0; cl_idx < nvl_len; cl_idx++) {
if (cls[cl_idx] != NULL)
changelist_free(cls[cl_idx]);
}
free(cls);
}
return (ret);
}
/*
* Given a property, inherit the value from the parent dataset, or if received
* is TRUE, revert to the received value, if any.
*/
int
zfs_prop_inherit(zfs_handle_t *zhp, const char *propname, boolean_t received)
{
zfs_cmd_t zc = {"\0"};
int ret;
prop_changelist_t *cl;
libzfs_handle_t *hdl = zhp->zfs_hdl;
char errbuf[1024];
zfs_prop_t prop;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot inherit %s for '%s'"), propname, zhp->zfs_name);
zc.zc_cookie = received;
if ((prop = zfs_name_to_prop(propname)) == ZPROP_INVAL) {
/*
* For user properties, the amount of work we have to do is very
* small, so just do it here.
*/
if (!zfs_prop_user(propname)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid property"));
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
}
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
(void) strlcpy(zc.zc_value, propname, sizeof (zc.zc_value));
if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_INHERIT_PROP, &zc) != 0)
return (zfs_standard_error(hdl, errno, errbuf));
(void) get_stats(zhp);
return (0);
}
/*
* Verify that this property is inheritable.
*/
if (zfs_prop_readonly(prop))
return (zfs_error(hdl, EZFS_PROPREADONLY, errbuf));
if (!zfs_prop_inheritable(prop) && !received)
return (zfs_error(hdl, EZFS_PROPNONINHERIT, errbuf));
/*
* Check to see if the value applies to this type
*/
if (!zfs_prop_valid_for_type(prop, zhp->zfs_type, B_FALSE))
return (zfs_error(hdl, EZFS_PROPTYPE, errbuf));
/*
* Normalize the name, to get rid of shorthand abbreviations.
*/
propname = zfs_prop_to_name(prop);
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
(void) strlcpy(zc.zc_value, propname, sizeof (zc.zc_value));
if (prop == ZFS_PROP_MOUNTPOINT && getzoneid() == GLOBAL_ZONEID &&
zfs_prop_get_int(zhp, ZFS_PROP_ZONED)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset is used in a non-global zone"));
return (zfs_error(hdl, EZFS_ZONED, errbuf));
}
/*
* Determine datasets which will be affected by this change, if any.
*/
if ((cl = changelist_gather(zhp, prop, 0, 0)) == NULL)
return (-1);
if (prop == ZFS_PROP_MOUNTPOINT && changelist_haszonedchild(cl)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"child dataset with inherited mountpoint is used "
"in a non-global zone"));
ret = zfs_error(hdl, EZFS_ZONED, errbuf);
goto error;
}
if ((ret = changelist_prefix(cl)) != 0)
goto error;
if ((ret = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_INHERIT_PROP, &zc)) != 0) {
return (zfs_standard_error(hdl, errno, errbuf));
} else {
if ((ret = changelist_postfix(cl)) != 0)
goto error;
/*
* Refresh the statistics so the new property is reflected.
*/
(void) get_stats(zhp);
/*
* Remount the filesystem to propagate the change
* if one of the options handled by the generic
* Linux namespace layer has been modified.
*/
if (zfs_is_namespace_prop(prop) &&
zfs_is_mounted(zhp, NULL))
ret = zfs_mount(zhp, MNTOPT_REMOUNT, 0);
}
error:
changelist_free(cl);
return (ret);
}
/*
* True DSL properties are stored in an nvlist. The following two functions
* extract them appropriately.
*/
uint64_t
getprop_uint64(zfs_handle_t *zhp, zfs_prop_t prop, char **source)
{
nvlist_t *nv;
uint64_t value;
*source = NULL;
if (nvlist_lookup_nvlist(zhp->zfs_props,
zfs_prop_to_name(prop), &nv) == 0) {
verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
(void) nvlist_lookup_string(nv, ZPROP_SOURCE, source);
} else {
verify(!zhp->zfs_props_table ||
zhp->zfs_props_table[prop] == B_TRUE);
value = zfs_prop_default_numeric(prop);
*source = "";
}
return (value);
}
static const char *
getprop_string(zfs_handle_t *zhp, zfs_prop_t prop, char **source)
{
nvlist_t *nv;
const char *value;
*source = NULL;
if (nvlist_lookup_nvlist(zhp->zfs_props,
zfs_prop_to_name(prop), &nv) == 0) {
value = fnvlist_lookup_string(nv, ZPROP_VALUE);
(void) nvlist_lookup_string(nv, ZPROP_SOURCE, source);
} else {
verify(!zhp->zfs_props_table ||
zhp->zfs_props_table[prop] == B_TRUE);
value = zfs_prop_default_string(prop);
*source = "";
}
return (value);
}
static boolean_t
zfs_is_recvd_props_mode(zfs_handle_t *zhp)
{
return (zhp->zfs_props == zhp->zfs_recvd_props);
}
static void
zfs_set_recvd_props_mode(zfs_handle_t *zhp, uint64_t *cookie)
{
*cookie = (uint64_t)(uintptr_t)zhp->zfs_props;
zhp->zfs_props = zhp->zfs_recvd_props;
}
static void
zfs_unset_recvd_props_mode(zfs_handle_t *zhp, uint64_t *cookie)
{
zhp->zfs_props = (nvlist_t *)(uintptr_t)*cookie;
*cookie = 0;
}
/*
* Internal function for getting a numeric property. Both zfs_prop_get() and
* zfs_prop_get_int() are built using this interface.
*
* Certain properties can be overridden using 'mount -o'. In this case, scan
* the contents of the /proc/self/mounts entry, searching for the
* appropriate options. If they differ from the on-disk values, report the
* current values and mark the source "temporary".
*/
static int
get_numeric_property(zfs_handle_t *zhp, zfs_prop_t prop, zprop_source_t *src,
char **source, uint64_t *val)
{
zfs_cmd_t zc = {"\0"};
nvlist_t *zplprops = NULL;
struct mnttab mnt;
char *mntopt_on = NULL;
char *mntopt_off = NULL;
boolean_t received = zfs_is_recvd_props_mode(zhp);
*source = NULL;
/*
* If the property is being fetched for a snapshot, check whether
* the property is valid for the snapshot's head dataset type.
*/
if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT &&
!zfs_prop_valid_for_type(prop, zhp->zfs_head_type, B_TRUE)) {
*val = zfs_prop_default_numeric(prop);
return (-1);
}
switch (prop) {
case ZFS_PROP_ATIME:
mntopt_on = MNTOPT_ATIME;
mntopt_off = MNTOPT_NOATIME;
break;
case ZFS_PROP_RELATIME:
mntopt_on = MNTOPT_RELATIME;
mntopt_off = MNTOPT_NORELATIME;
break;
case ZFS_PROP_DEVICES:
mntopt_on = MNTOPT_DEVICES;
mntopt_off = MNTOPT_NODEVICES;
break;
case ZFS_PROP_EXEC:
mntopt_on = MNTOPT_EXEC;
mntopt_off = MNTOPT_NOEXEC;
break;
case ZFS_PROP_READONLY:
mntopt_on = MNTOPT_RO;
mntopt_off = MNTOPT_RW;
break;
case ZFS_PROP_SETUID:
mntopt_on = MNTOPT_SETUID;
mntopt_off = MNTOPT_NOSETUID;
break;
case ZFS_PROP_XATTR:
mntopt_on = MNTOPT_XATTR;
mntopt_off = MNTOPT_NOXATTR;
break;
case ZFS_PROP_NBMAND:
mntopt_on = MNTOPT_NBMAND;
mntopt_off = MNTOPT_NONBMAND;
break;
default:
break;
}
/*
* Because looking up the mount options is potentially expensive
* (iterating over all of /proc/self/mounts), we defer its
* calculation until we're looking up a property which requires
* its presence.
*/
if (!zhp->zfs_mntcheck &&
(mntopt_on != NULL || prop == ZFS_PROP_MOUNTED)) {
libzfs_handle_t *hdl = zhp->zfs_hdl;
struct mnttab entry;
if (libzfs_mnttab_find(hdl, zhp->zfs_name, &entry) == 0) {
zhp->zfs_mntopts = zfs_strdup(hdl,
entry.mnt_mntopts);
if (zhp->zfs_mntopts == NULL)
return (-1);
}
zhp->zfs_mntcheck = B_TRUE;
}
if (zhp->zfs_mntopts == NULL)
mnt.mnt_mntopts = "";
else
mnt.mnt_mntopts = zhp->zfs_mntopts;
switch (prop) {
case ZFS_PROP_ATIME:
case ZFS_PROP_RELATIME:
case ZFS_PROP_DEVICES:
case ZFS_PROP_EXEC:
case ZFS_PROP_READONLY:
case ZFS_PROP_SETUID:
#ifndef __FreeBSD__
case ZFS_PROP_XATTR:
#endif
case ZFS_PROP_NBMAND:
*val = getprop_uint64(zhp, prop, source);
if (received)
break;
if (hasmntopt(&mnt, mntopt_on) && !*val) {
*val = B_TRUE;
if (src)
*src = ZPROP_SRC_TEMPORARY;
} else if (hasmntopt(&mnt, mntopt_off) && *val) {
*val = B_FALSE;
if (src)
*src = ZPROP_SRC_TEMPORARY;
}
break;
case ZFS_PROP_CANMOUNT:
case ZFS_PROP_VOLSIZE:
case ZFS_PROP_QUOTA:
case ZFS_PROP_REFQUOTA:
case ZFS_PROP_RESERVATION:
case ZFS_PROP_REFRESERVATION:
case ZFS_PROP_FILESYSTEM_LIMIT:
case ZFS_PROP_SNAPSHOT_LIMIT:
case ZFS_PROP_FILESYSTEM_COUNT:
case ZFS_PROP_SNAPSHOT_COUNT:
*val = getprop_uint64(zhp, prop, source);
if (*source == NULL) {
/* not default, must be local */
*source = zhp->zfs_name;
}
break;
case ZFS_PROP_MOUNTED:
*val = (zhp->zfs_mntopts != NULL);
break;
case ZFS_PROP_NUMCLONES:
*val = zhp->zfs_dmustats.dds_num_clones;
break;
case ZFS_PROP_VERSION:
case ZFS_PROP_NORMALIZE:
case ZFS_PROP_UTF8ONLY:
case ZFS_PROP_CASE:
if (zcmd_alloc_dst_nvlist(zhp->zfs_hdl, &zc, 0) != 0)
return (-1);
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_OBJSET_ZPLPROPS, &zc)) {
zcmd_free_nvlists(&zc);
if (prop == ZFS_PROP_VERSION &&
zhp->zfs_type == ZFS_TYPE_VOLUME)
*val = zfs_prop_default_numeric(prop);
return (-1);
}
if (zcmd_read_dst_nvlist(zhp->zfs_hdl, &zc, &zplprops) != 0 ||
nvlist_lookup_uint64(zplprops, zfs_prop_to_name(prop),
val) != 0) {
zcmd_free_nvlists(&zc);
return (-1);
}
nvlist_free(zplprops);
zcmd_free_nvlists(&zc);
break;
case ZFS_PROP_INCONSISTENT:
*val = zhp->zfs_dmustats.dds_inconsistent;
break;
case ZFS_PROP_REDACTED:
*val = zhp->zfs_dmustats.dds_redacted;
break;
default:
switch (zfs_prop_get_type(prop)) {
case PROP_TYPE_NUMBER:
case PROP_TYPE_INDEX:
*val = getprop_uint64(zhp, prop, source);
/*
* If we tried to use a default value for a
* readonly property, it means that it was not
* present. Note this only applies to "truly"
* readonly properties, not set-once properties
* like volblocksize.
*/
if (zfs_prop_readonly(prop) &&
!zfs_prop_setonce(prop) &&
*source != NULL && (*source)[0] == '\0') {
*source = NULL;
return (-1);
}
break;
case PROP_TYPE_STRING:
default:
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"cannot get non-numeric property"));
return (zfs_error(zhp->zfs_hdl, EZFS_BADPROP,
dgettext(TEXT_DOMAIN, "internal error")));
}
}
return (0);
}
/*
* Calculate the source type, given the raw source string.
*/
static void
get_source(zfs_handle_t *zhp, zprop_source_t *srctype, char *source,
char *statbuf, size_t statlen)
{
if (statbuf == NULL ||
srctype == NULL || *srctype == ZPROP_SRC_TEMPORARY) {
return;
}
if (source == NULL) {
*srctype = ZPROP_SRC_NONE;
} else if (source[0] == '\0') {
*srctype = ZPROP_SRC_DEFAULT;
} else if (strstr(source, ZPROP_SOURCE_VAL_RECVD) != NULL) {
*srctype = ZPROP_SRC_RECEIVED;
} else {
if (strcmp(source, zhp->zfs_name) == 0) {
*srctype = ZPROP_SRC_LOCAL;
} else {
(void) strlcpy(statbuf, source, statlen);
*srctype = ZPROP_SRC_INHERITED;
}
}
}
int
zfs_prop_get_recvd(zfs_handle_t *zhp, const char *propname, char *propbuf,
size_t proplen, boolean_t literal)
{
zfs_prop_t prop;
int err = 0;
if (zhp->zfs_recvd_props == NULL)
if (get_recvd_props_ioctl(zhp) != 0)
return (-1);
prop = zfs_name_to_prop(propname);
if (prop != ZPROP_INVAL) {
uint64_t cookie;
if (!nvlist_exists(zhp->zfs_recvd_props, propname))
return (-1);
zfs_set_recvd_props_mode(zhp, &cookie);
err = zfs_prop_get(zhp, prop, propbuf, proplen,
NULL, NULL, 0, literal);
zfs_unset_recvd_props_mode(zhp, &cookie);
} else {
nvlist_t *propval;
char *recvdval;
if (nvlist_lookup_nvlist(zhp->zfs_recvd_props,
propname, &propval) != 0)
return (-1);
verify(nvlist_lookup_string(propval, ZPROP_VALUE,
&recvdval) == 0);
(void) strlcpy(propbuf, recvdval, proplen);
}
return (err == 0 ? 0 : -1);
}
static int
get_clones_string(zfs_handle_t *zhp, char *propbuf, size_t proplen)
{
nvlist_t *value;
nvpair_t *pair;
value = zfs_get_clones_nvl(zhp);
if (value == NULL || nvlist_empty(value))
return (-1);
propbuf[0] = '\0';
for (pair = nvlist_next_nvpair(value, NULL); pair != NULL;
pair = nvlist_next_nvpair(value, pair)) {
if (propbuf[0] != '\0')
(void) strlcat(propbuf, ",", proplen);
(void) strlcat(propbuf, nvpair_name(pair), proplen);
}
return (0);
}
struct get_clones_arg {
uint64_t numclones;
nvlist_t *value;
const char *origin;
char buf[ZFS_MAX_DATASET_NAME_LEN];
};
static int
get_clones_cb(zfs_handle_t *zhp, void *arg)
{
struct get_clones_arg *gca = arg;
if (gca->numclones == 0) {
zfs_close(zhp);
return (0);
}
if (zfs_prop_get(zhp, ZFS_PROP_ORIGIN, gca->buf, sizeof (gca->buf),
NULL, NULL, 0, B_TRUE) != 0)
goto out;
if (strcmp(gca->buf, gca->origin) == 0) {
fnvlist_add_boolean(gca->value, zfs_get_name(zhp));
gca->numclones--;
}
out:
(void) zfs_iter_children(zhp, get_clones_cb, gca);
zfs_close(zhp);
return (0);
}
nvlist_t *
zfs_get_clones_nvl(zfs_handle_t *zhp)
{
nvlist_t *nv, *value;
if (nvlist_lookup_nvlist(zhp->zfs_props,
zfs_prop_to_name(ZFS_PROP_CLONES), &nv) != 0) {
struct get_clones_arg gca;
/*
* if this is a snapshot, then the kernel wasn't able
* to get the clones. Do it by slowly iterating.
*/
if (zhp->zfs_type != ZFS_TYPE_SNAPSHOT)
return (NULL);
if (nvlist_alloc(&nv, NV_UNIQUE_NAME, 0) != 0)
return (NULL);
if (nvlist_alloc(&value, NV_UNIQUE_NAME, 0) != 0) {
nvlist_free(nv);
return (NULL);
}
gca.numclones = zfs_prop_get_int(zhp, ZFS_PROP_NUMCLONES);
gca.value = value;
gca.origin = zhp->zfs_name;
if (gca.numclones != 0) {
zfs_handle_t *root;
char pool[ZFS_MAX_DATASET_NAME_LEN];
char *cp = pool;
/* get the pool name */
(void) strlcpy(pool, zhp->zfs_name, sizeof (pool));
(void) strsep(&cp, "/@");
root = zfs_open(zhp->zfs_hdl, pool,
ZFS_TYPE_FILESYSTEM);
if (root == NULL) {
nvlist_free(nv);
nvlist_free(value);
return (NULL);
}
(void) get_clones_cb(root, &gca);
}
if (gca.numclones != 0 ||
nvlist_add_nvlist(nv, ZPROP_VALUE, value) != 0 ||
nvlist_add_nvlist(zhp->zfs_props,
zfs_prop_to_name(ZFS_PROP_CLONES), nv) != 0) {
nvlist_free(nv);
nvlist_free(value);
return (NULL);
}
nvlist_free(nv);
nvlist_free(value);
verify(0 == nvlist_lookup_nvlist(zhp->zfs_props,
zfs_prop_to_name(ZFS_PROP_CLONES), &nv));
}
verify(nvlist_lookup_nvlist(nv, ZPROP_VALUE, &value) == 0);
return (value);
}
static int
get_rsnaps_string(zfs_handle_t *zhp, char *propbuf, size_t proplen)
{
nvlist_t *value;
uint64_t *snaps;
uint_t nsnaps;
if (nvlist_lookup_nvlist(zhp->zfs_props,
zfs_prop_to_name(ZFS_PROP_REDACT_SNAPS), &value) != 0)
return (-1);
if (nvlist_lookup_uint64_array(value, ZPROP_VALUE, &snaps,
&nsnaps) != 0)
return (-1);
if (nsnaps == 0) {
/* There's no redaction snapshots; pass a special value back */
(void) snprintf(propbuf, proplen, "none");
return (0);
}
propbuf[0] = '\0';
for (int i = 0; i < nsnaps; i++) {
char buf[128];
if (propbuf[0] != '\0')
(void) strlcat(propbuf, ",", proplen);
(void) snprintf(buf, sizeof (buf), "%llu",
(u_longlong_t)snaps[i]);
(void) strlcat(propbuf, buf, proplen);
}
return (0);
}
/*
* Accepts a property and value and checks that the value
* matches the one found by the channel program. If they are
* not equal, print both of them.
*/
static void
zcp_check(zfs_handle_t *zhp, zfs_prop_t prop, uint64_t intval,
const char *strval)
{
if (!zhp->zfs_hdl->libzfs_prop_debug)
return;
int error;
char *poolname = zhp->zpool_hdl->zpool_name;
const char *prop_name = zfs_prop_to_name(prop);
const char *program =
"args = ...\n"
"ds = args['dataset']\n"
"prop = args['property']\n"
"value, setpoint = zfs.get_prop(ds, prop)\n"
"return {value=value, setpoint=setpoint}\n";
nvlist_t *outnvl;
nvlist_t *retnvl;
nvlist_t *argnvl = fnvlist_alloc();
fnvlist_add_string(argnvl, "dataset", zhp->zfs_name);
fnvlist_add_string(argnvl, "property", zfs_prop_to_name(prop));
error = lzc_channel_program_nosync(poolname, program,
10 * 1000 * 1000, 10 * 1024 * 1024, argnvl, &outnvl);
if (error == 0) {
retnvl = fnvlist_lookup_nvlist(outnvl, "return");
if (zfs_prop_get_type(prop) == PROP_TYPE_NUMBER) {
int64_t ans;
error = nvlist_lookup_int64(retnvl, "value", &ans);
if (error != 0) {
(void) fprintf(stderr, "%s: zcp check error: "
"%u\n", prop_name, error);
return;
}
if (ans != intval) {
(void) fprintf(stderr, "%s: zfs found %llu, "
"but zcp found %llu\n", prop_name,
(u_longlong_t)intval, (u_longlong_t)ans);
}
} else {
char *str_ans;
error = nvlist_lookup_string(retnvl, "value", &str_ans);
if (error != 0) {
(void) fprintf(stderr, "%s: zcp check error: "
"%u\n", prop_name, error);
return;
}
if (strcmp(strval, str_ans) != 0) {
(void) fprintf(stderr,
"%s: zfs found '%s', but zcp found '%s'\n",
prop_name, strval, str_ans);
}
}
} else {
(void) fprintf(stderr, "%s: zcp check failed, channel program "
"error: %u\n", prop_name, error);
}
nvlist_free(argnvl);
nvlist_free(outnvl);
}
/*
* Retrieve a property from the given object. If 'literal' is specified, then
* numbers are left as exact values. Otherwise, numbers are converted to a
* human-readable form.
*
* Returns 0 on success, or -1 on error.
*/
int
zfs_prop_get(zfs_handle_t *zhp, zfs_prop_t prop, char *propbuf, size_t proplen,
zprop_source_t *src, char *statbuf, size_t statlen, boolean_t literal)
{
char *source = NULL;
uint64_t val;
const char *str;
const char *strval;
boolean_t received = zfs_is_recvd_props_mode(zhp);
/*
* Check to see if this property applies to our object
*/
if (!zfs_prop_valid_for_type(prop, zhp->zfs_type, B_FALSE))
return (-1);
if (received && zfs_prop_readonly(prop))
return (-1);
if (src)
*src = ZPROP_SRC_NONE;
switch (prop) {
case ZFS_PROP_CREATION:
/*
* 'creation' is a time_t stored in the statistics. We convert
* this into a string unless 'literal' is specified.
*/
{
val = getprop_uint64(zhp, prop, &source);
time_t time = (time_t)val;
struct tm t;
if (literal ||
localtime_r(&time, &t) == NULL ||
strftime(propbuf, proplen, "%a %b %e %k:%M %Y",
&t) == 0)
(void) snprintf(propbuf, proplen, "%llu",
(u_longlong_t)val);
}
zcp_check(zhp, prop, val, NULL);
break;
case ZFS_PROP_MOUNTPOINT:
/*
* Getting the precise mountpoint can be tricky.
*
* - for 'none' or 'legacy', return those values.
* - for inherited mountpoints, we want to take everything
* after our ancestor and append it to the inherited value.
*
* If the pool has an alternate root, we want to prepend that
* root to any values we return.
*/
str = getprop_string(zhp, prop, &source);
if (str[0] == '/') {
char buf[MAXPATHLEN];
char *root = buf;
const char *relpath;
/*
* If we inherit the mountpoint, even from a dataset
* with a received value, the source will be the path of
* the dataset we inherit from. If source is
* ZPROP_SOURCE_VAL_RECVD, the received value is not
* inherited.
*/
if (strcmp(source, ZPROP_SOURCE_VAL_RECVD) == 0) {
relpath = "";
} else {
relpath = zhp->zfs_name + strlen(source);
if (relpath[0] == '/')
relpath++;
}
if ((zpool_get_prop(zhp->zpool_hdl,
ZPOOL_PROP_ALTROOT, buf, MAXPATHLEN, NULL,
B_FALSE)) || (strcmp(root, "-") == 0))
root[0] = '\0';
/*
* Special case an alternate root of '/'. This will
* avoid having multiple leading slashes in the
* mountpoint path.
*/
if (strcmp(root, "/") == 0)
root++;
/*
* If the mountpoint is '/' then skip over this
* if we are obtaining either an alternate root or
* an inherited mountpoint.
*/
if (str[1] == '\0' && (root[0] != '\0' ||
relpath[0] != '\0'))
str++;
if (relpath[0] == '\0')
(void) snprintf(propbuf, proplen, "%s%s",
root, str);
else
(void) snprintf(propbuf, proplen, "%s%s%s%s",
root, str, relpath[0] == '@' ? "" : "/",
relpath);
} else {
/* 'legacy' or 'none' */
(void) strlcpy(propbuf, str, proplen);
}
zcp_check(zhp, prop, 0, propbuf);
break;
case ZFS_PROP_ORIGIN:
str = getprop_string(zhp, prop, &source);
if (str == NULL)
return (-1);
(void) strlcpy(propbuf, str, proplen);
zcp_check(zhp, prop, 0, str);
break;
case ZFS_PROP_REDACT_SNAPS:
if (get_rsnaps_string(zhp, propbuf, proplen) != 0)
return (-1);
break;
case ZFS_PROP_CLONES:
if (get_clones_string(zhp, propbuf, proplen) != 0)
return (-1);
break;
case ZFS_PROP_QUOTA:
case ZFS_PROP_REFQUOTA:
case ZFS_PROP_RESERVATION:
case ZFS_PROP_REFRESERVATION:
if (get_numeric_property(zhp, prop, src, &source, &val) != 0)
return (-1);
/*
* If quota or reservation is 0, we translate this into 'none'
* (unless literal is set), and indicate that it's the default
* value. Otherwise, we print the number nicely and indicate
* that its set locally.
*/
if (val == 0) {
if (literal)
(void) strlcpy(propbuf, "0", proplen);
else
(void) strlcpy(propbuf, "none", proplen);
} else {
if (literal)
(void) snprintf(propbuf, proplen, "%llu",
(u_longlong_t)val);
else
zfs_nicebytes(val, propbuf, proplen);
}
zcp_check(zhp, prop, val, NULL);
break;
case ZFS_PROP_FILESYSTEM_LIMIT:
case ZFS_PROP_SNAPSHOT_LIMIT:
case ZFS_PROP_FILESYSTEM_COUNT:
case ZFS_PROP_SNAPSHOT_COUNT:
if (get_numeric_property(zhp, prop, src, &source, &val) != 0)
return (-1);
/*
* If limit is UINT64_MAX, we translate this into 'none' (unless
* literal is set), and indicate that it's the default value.
* Otherwise, we print the number nicely and indicate that it's
* set locally.
*/
if (literal) {
(void) snprintf(propbuf, proplen, "%llu",
(u_longlong_t)val);
} else if (val == UINT64_MAX) {
(void) strlcpy(propbuf, "none", proplen);
} else {
zfs_nicenum(val, propbuf, proplen);
}
zcp_check(zhp, prop, val, NULL);
break;
case ZFS_PROP_REFRATIO:
case ZFS_PROP_COMPRESSRATIO:
if (get_numeric_property(zhp, prop, src, &source, &val) != 0)
return (-1);
if (literal)
(void) snprintf(propbuf, proplen, "%llu.%02llu",
(u_longlong_t)(val / 100),
(u_longlong_t)(val % 100));
else
(void) snprintf(propbuf, proplen, "%llu.%02llux",
(u_longlong_t)(val / 100),
(u_longlong_t)(val % 100));
zcp_check(zhp, prop, val, NULL);
break;
case ZFS_PROP_TYPE:
switch (zhp->zfs_type) {
case ZFS_TYPE_FILESYSTEM:
str = "filesystem";
break;
case ZFS_TYPE_VOLUME:
str = "volume";
break;
case ZFS_TYPE_SNAPSHOT:
str = "snapshot";
break;
case ZFS_TYPE_BOOKMARK:
str = "bookmark";
break;
default:
abort();
}
(void) snprintf(propbuf, proplen, "%s", str);
zcp_check(zhp, prop, 0, propbuf);
break;
case ZFS_PROP_MOUNTED:
/*
* The 'mounted' property is a pseudo-property that described
* whether the filesystem is currently mounted. Even though
* it's a boolean value, the typical values of "on" and "off"
* don't make sense, so we translate to "yes" and "no".
*/
if (get_numeric_property(zhp, ZFS_PROP_MOUNTED,
src, &source, &val) != 0)
return (-1);
if (val)
(void) strlcpy(propbuf, "yes", proplen);
else
(void) strlcpy(propbuf, "no", proplen);
break;
case ZFS_PROP_NAME:
/*
* The 'name' property is a pseudo-property derived from the
* dataset name. It is presented as a real property to simplify
* consumers.
*/
(void) strlcpy(propbuf, zhp->zfs_name, proplen);
zcp_check(zhp, prop, 0, propbuf);
break;
case ZFS_PROP_MLSLABEL:
{
#ifdef HAVE_MLSLABEL
m_label_t *new_sl = NULL;
char *ascii = NULL; /* human readable label */
(void) strlcpy(propbuf,
getprop_string(zhp, prop, &source), proplen);
if (literal || (strcasecmp(propbuf,
ZFS_MLSLABEL_DEFAULT) == 0))
break;
/*
* Try to translate the internal hex string to
* human-readable output. If there are any
* problems just use the hex string.
*/
if (str_to_label(propbuf, &new_sl, MAC_LABEL,
L_NO_CORRECTION, NULL) == -1) {
m_label_free(new_sl);
break;
}
if (label_to_str(new_sl, &ascii, M_LABEL,
DEF_NAMES) != 0) {
if (ascii)
free(ascii);
m_label_free(new_sl);
break;
}
m_label_free(new_sl);
(void) strlcpy(propbuf, ascii, proplen);
free(ascii);
#else
(void) strlcpy(propbuf,
getprop_string(zhp, prop, &source), proplen);
#endif /* HAVE_MLSLABEL */
}
break;
case ZFS_PROP_GUID:
case ZFS_PROP_CREATETXG:
case ZFS_PROP_OBJSETID:
case ZFS_PROP_PBKDF2_ITERS:
/*
* These properties are stored as numbers, but they are
* identifiers or counters.
* We don't want them to be pretty printed, because pretty
* printing truncates their values making them useless.
*/
if (get_numeric_property(zhp, prop, src, &source, &val) != 0)
return (-1);
(void) snprintf(propbuf, proplen, "%llu", (u_longlong_t)val);
zcp_check(zhp, prop, val, NULL);
break;
case ZFS_PROP_REFERENCED:
case ZFS_PROP_AVAILABLE:
case ZFS_PROP_USED:
case ZFS_PROP_USEDSNAP:
case ZFS_PROP_USEDDS:
case ZFS_PROP_USEDREFRESERV:
case ZFS_PROP_USEDCHILD:
if (get_numeric_property(zhp, prop, src, &source, &val) != 0)
return (-1);
if (literal) {
(void) snprintf(propbuf, proplen, "%llu",
(u_longlong_t)val);
} else {
zfs_nicebytes(val, propbuf, proplen);
}
zcp_check(zhp, prop, val, NULL);
break;
default:
switch (zfs_prop_get_type(prop)) {
case PROP_TYPE_NUMBER:
if (get_numeric_property(zhp, prop, src,
&source, &val) != 0) {
return (-1);
}
if (literal) {
(void) snprintf(propbuf, proplen, "%llu",
(u_longlong_t)val);
} else {
zfs_nicenum(val, propbuf, proplen);
}
zcp_check(zhp, prop, val, NULL);
break;
case PROP_TYPE_STRING:
str = getprop_string(zhp, prop, &source);
if (str == NULL)
return (-1);
(void) strlcpy(propbuf, str, proplen);
zcp_check(zhp, prop, 0, str);
break;
case PROP_TYPE_INDEX:
if (get_numeric_property(zhp, prop, src,
&source, &val) != 0)
return (-1);
if (zfs_prop_index_to_string(prop, val, &strval) != 0)
return (-1);
(void) strlcpy(propbuf, strval, proplen);
zcp_check(zhp, prop, 0, strval);
break;
default:
abort();
}
}
get_source(zhp, src, source, statbuf, statlen);
return (0);
}
/*
* Utility function to get the given numeric property. Does no validation that
* the given property is the appropriate type; should only be used with
* hard-coded property types.
*/
uint64_t
zfs_prop_get_int(zfs_handle_t *zhp, zfs_prop_t prop)
{
char *source;
uint64_t val = 0;
(void) get_numeric_property(zhp, prop, NULL, &source, &val);
return (val);
}
static int
zfs_prop_set_int(zfs_handle_t *zhp, zfs_prop_t prop, uint64_t val)
{
char buf[64];
(void) snprintf(buf, sizeof (buf), "%llu", (longlong_t)val);
return (zfs_prop_set(zhp, zfs_prop_to_name(prop), buf));
}
/*
* Similar to zfs_prop_get(), but returns the value as an integer.
*/
int
zfs_prop_get_numeric(zfs_handle_t *zhp, zfs_prop_t prop, uint64_t *value,
zprop_source_t *src, char *statbuf, size_t statlen)
{
char *source;
/*
* Check to see if this property applies to our object
*/
if (!zfs_prop_valid_for_type(prop, zhp->zfs_type, B_FALSE)) {
return (zfs_error_fmt(zhp->zfs_hdl, EZFS_PROPTYPE,
dgettext(TEXT_DOMAIN, "cannot get property '%s'"),
zfs_prop_to_name(prop)));
}
if (src)
*src = ZPROP_SRC_NONE;
if (get_numeric_property(zhp, prop, src, &source, value) != 0)
return (-1);
get_source(zhp, src, source, statbuf, statlen);
return (0);
}
#ifdef HAVE_IDMAP
static int
idmap_id_to_numeric_domain_rid(uid_t id, boolean_t isuser,
char **domainp, idmap_rid_t *ridp)
{
idmap_get_handle_t *get_hdl = NULL;
idmap_stat status;
int err = EINVAL;
if (idmap_get_create(&get_hdl) != IDMAP_SUCCESS)
goto out;
if (isuser) {
err = idmap_get_sidbyuid(get_hdl, id,
IDMAP_REQ_FLG_USE_CACHE, domainp, ridp, &status);
} else {
err = idmap_get_sidbygid(get_hdl, id,
IDMAP_REQ_FLG_USE_CACHE, domainp, ridp, &status);
}
if (err == IDMAP_SUCCESS &&
idmap_get_mappings(get_hdl) == IDMAP_SUCCESS &&
status == IDMAP_SUCCESS)
err = 0;
else
err = EINVAL;
out:
if (get_hdl)
idmap_get_destroy(get_hdl);
return (err);
}
#endif /* HAVE_IDMAP */
/*
* convert the propname into parameters needed by kernel
* Eg: userquota@ahrens -> ZFS_PROP_USERQUOTA, "", 126829
* Eg: userused@matt@domain -> ZFS_PROP_USERUSED, "S-1-123-456", 789
* Eg: groupquota@staff -> ZFS_PROP_GROUPQUOTA, "", 1234
* Eg: groupused@staff -> ZFS_PROP_GROUPUSED, "", 1234
* Eg: projectquota@123 -> ZFS_PROP_PROJECTQUOTA, "", 123
* Eg: projectused@789 -> ZFS_PROP_PROJECTUSED, "", 789
*/
static int
userquota_propname_decode(const char *propname, boolean_t zoned,
zfs_userquota_prop_t *typep, char *domain, int domainlen, uint64_t *ridp)
{
zfs_userquota_prop_t type;
char *cp;
boolean_t isuser;
boolean_t isgroup;
boolean_t isproject;
struct passwd *pw;
struct group *gr;
domain[0] = '\0';
/* Figure out the property type ({user|group|project}{quota|space}) */
for (type = 0; type < ZFS_NUM_USERQUOTA_PROPS; type++) {
if (strncmp(propname, zfs_userquota_prop_prefixes[type],
strlen(zfs_userquota_prop_prefixes[type])) == 0)
break;
}
if (type == ZFS_NUM_USERQUOTA_PROPS)
return (EINVAL);
*typep = type;
isuser = (type == ZFS_PROP_USERQUOTA || type == ZFS_PROP_USERUSED ||
type == ZFS_PROP_USEROBJQUOTA ||
type == ZFS_PROP_USEROBJUSED);
isgroup = (type == ZFS_PROP_GROUPQUOTA || type == ZFS_PROP_GROUPUSED ||
type == ZFS_PROP_GROUPOBJQUOTA ||
type == ZFS_PROP_GROUPOBJUSED);
isproject = (type == ZFS_PROP_PROJECTQUOTA ||
type == ZFS_PROP_PROJECTUSED || type == ZFS_PROP_PROJECTOBJQUOTA ||
type == ZFS_PROP_PROJECTOBJUSED);
cp = strchr(propname, '@') + 1;
if (isuser && (pw = getpwnam(cp)) != NULL) {
if (zoned && getzoneid() == GLOBAL_ZONEID)
return (ENOENT);
*ridp = pw->pw_uid;
} else if (isgroup && (gr = getgrnam(cp)) != NULL) {
if (zoned && getzoneid() == GLOBAL_ZONEID)
return (ENOENT);
*ridp = gr->gr_gid;
} else if (!isproject && strchr(cp, '@')) {
#ifdef HAVE_IDMAP
/*
* It's a SID name (eg "user@domain") that needs to be
* turned into S-1-domainID-RID.
*/
directory_error_t e;
char *numericsid = NULL;
char *end;
if (zoned && getzoneid() == GLOBAL_ZONEID)
return (ENOENT);
if (isuser) {
e = directory_sid_from_user_name(NULL,
cp, &numericsid);
} else {
e = directory_sid_from_group_name(NULL,
cp, &numericsid);
}
if (e != NULL) {
directory_error_free(e);
return (ENOENT);
}
if (numericsid == NULL)
return (ENOENT);
cp = numericsid;
(void) strlcpy(domain, cp, domainlen);
cp = strrchr(domain, '-');
*cp = '\0';
cp++;
errno = 0;
*ridp = strtoull(cp, &end, 10);
free(numericsid);
if (errno != 0 || *end != '\0')
return (EINVAL);
#else
return (ENOSYS);
#endif /* HAVE_IDMAP */
} else {
/* It's a user/group/project ID (eg "12345"). */
uid_t id;
char *end;
id = strtoul(cp, &end, 10);
if (*end != '\0')
return (EINVAL);
if (id > MAXUID && !isproject) {
#ifdef HAVE_IDMAP
/* It's an ephemeral ID. */
idmap_rid_t rid;
char *mapdomain;
if (idmap_id_to_numeric_domain_rid(id, isuser,
&mapdomain, &rid) != 0)
return (ENOENT);
(void) strlcpy(domain, mapdomain, domainlen);
*ridp = rid;
#else
return (ENOSYS);
#endif /* HAVE_IDMAP */
} else {
*ridp = id;
}
}
return (0);
}
static int
zfs_prop_get_userquota_common(zfs_handle_t *zhp, const char *propname,
uint64_t *propvalue, zfs_userquota_prop_t *typep)
{
int err;
zfs_cmd_t zc = {"\0"};
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
err = userquota_propname_decode(propname,
zfs_prop_get_int(zhp, ZFS_PROP_ZONED),
typep, zc.zc_value, sizeof (zc.zc_value), &zc.zc_guid);
zc.zc_objset_type = *typep;
if (err)
return (err);
err = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_USERSPACE_ONE, &zc);
if (err)
return (err);
*propvalue = zc.zc_cookie;
return (0);
}
int
zfs_prop_get_userquota_int(zfs_handle_t *zhp, const char *propname,
uint64_t *propvalue)
{
zfs_userquota_prop_t type;
return (zfs_prop_get_userquota_common(zhp, propname, propvalue,
&type));
}
int
zfs_prop_get_userquota(zfs_handle_t *zhp, const char *propname,
char *propbuf, int proplen, boolean_t literal)
{
int err;
uint64_t propvalue;
zfs_userquota_prop_t type;
err = zfs_prop_get_userquota_common(zhp, propname, &propvalue,
&type);
if (err)
return (err);
if (literal) {
(void) snprintf(propbuf, proplen, "%llu",
(u_longlong_t)propvalue);
} else if (propvalue == 0 &&
(type == ZFS_PROP_USERQUOTA || type == ZFS_PROP_GROUPQUOTA ||
type == ZFS_PROP_USEROBJQUOTA || type == ZFS_PROP_GROUPOBJQUOTA ||
type == ZFS_PROP_PROJECTQUOTA ||
type == ZFS_PROP_PROJECTOBJQUOTA)) {
(void) strlcpy(propbuf, "none", proplen);
} else if (type == ZFS_PROP_USERQUOTA || type == ZFS_PROP_GROUPQUOTA ||
type == ZFS_PROP_USERUSED || type == ZFS_PROP_GROUPUSED ||
type == ZFS_PROP_PROJECTUSED || type == ZFS_PROP_PROJECTQUOTA) {
zfs_nicebytes(propvalue, propbuf, proplen);
} else {
zfs_nicenum(propvalue, propbuf, proplen);
}
return (0);
}
/*
* propname must start with "written@" or "written#".
*/
int
zfs_prop_get_written_int(zfs_handle_t *zhp, const char *propname,
uint64_t *propvalue)
{
int err;
zfs_cmd_t zc = {"\0"};
const char *snapname;
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
assert(zfs_prop_written(propname));
snapname = propname + strlen("written@");
if (strchr(snapname, '@') != NULL || strchr(snapname, '#') != NULL) {
/* full snapshot or bookmark name specified */
(void) strlcpy(zc.zc_value, snapname, sizeof (zc.zc_value));
} else {
/* snapname is the short name, append it to zhp's fsname */
char *cp;
(void) strlcpy(zc.zc_value, zhp->zfs_name,
sizeof (zc.zc_value));
cp = strchr(zc.zc_value, '@');
if (cp != NULL)
*cp = '\0';
(void) strlcat(zc.zc_value, snapname - 1, sizeof (zc.zc_value));
}
err = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_SPACE_WRITTEN, &zc);
if (err)
return (err);
*propvalue = zc.zc_cookie;
return (0);
}
int
zfs_prop_get_written(zfs_handle_t *zhp, const char *propname,
char *propbuf, int proplen, boolean_t literal)
{
int err;
uint64_t propvalue;
err = zfs_prop_get_written_int(zhp, propname, &propvalue);
if (err)
return (err);
if (literal) {
(void) snprintf(propbuf, proplen, "%llu",
(u_longlong_t)propvalue);
} else {
zfs_nicebytes(propvalue, propbuf, proplen);
}
return (0);
}
/*
* Returns the name of the given zfs handle.
*/
const char *
zfs_get_name(const zfs_handle_t *zhp)
{
return (zhp->zfs_name);
}
/*
* Returns the name of the parent pool for the given zfs handle.
*/
const char *
zfs_get_pool_name(const zfs_handle_t *zhp)
{
return (zhp->zpool_hdl->zpool_name);
}
/*
* Returns the type of the given zfs handle.
*/
zfs_type_t
zfs_get_type(const zfs_handle_t *zhp)
{
return (zhp->zfs_type);
}
/*
* Returns the type of the given zfs handle,
* or, if a snapshot, the type of the snapshotted dataset.
*/
zfs_type_t
zfs_get_underlying_type(const zfs_handle_t *zhp)
{
return (zhp->zfs_head_type);
}
/*
* Is one dataset name a child dataset of another?
*
* Needs to handle these cases:
* Dataset 1 "a/foo" "a/foo" "a/foo" "a/foo"
* Dataset 2 "a/fo" "a/foobar" "a/bar/baz" "a/foo/bar"
* Descendant? No. No. No. Yes.
*/
static boolean_t
is_descendant(const char *ds1, const char *ds2)
{
size_t d1len = strlen(ds1);
/* ds2 can't be a descendant if it's smaller */
if (strlen(ds2) < d1len)
return (B_FALSE);
/* otherwise, compare strings and verify that there's a '/' char */
return (ds2[d1len] == '/' && (strncmp(ds1, ds2, d1len) == 0));
}
/*
* Given a complete name, return just the portion that refers to the parent.
* Will return -1 if there is no parent (path is just the name of the
* pool).
*/
static int
parent_name(const char *path, char *buf, size_t buflen)
{
char *slashp;
(void) strlcpy(buf, path, buflen);
if ((slashp = strrchr(buf, '/')) == NULL)
return (-1);
*slashp = '\0';
return (0);
}
int
zfs_parent_name(zfs_handle_t *zhp, char *buf, size_t buflen)
{
return (parent_name(zfs_get_name(zhp), buf, buflen));
}
/*
* If accept_ancestor is false, then check to make sure that the given path has
* a parent, and that it exists. If accept_ancestor is true, then find the
* closest existing ancestor for the given path. In prefixlen return the
* length of already existing prefix of the given path. We also fetch the
* 'zoned' property, which is used to validate property settings when creating
* new datasets.
*/
static int
check_parents(libzfs_handle_t *hdl, const char *path, uint64_t *zoned,
boolean_t accept_ancestor, int *prefixlen)
{
zfs_cmd_t zc = {"\0"};
char parent[ZFS_MAX_DATASET_NAME_LEN];
char *slash;
zfs_handle_t *zhp;
char errbuf[1024];
uint64_t is_zoned;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot create '%s'"), path);
/* get parent, and check to see if this is just a pool */
if (parent_name(path, parent, sizeof (parent)) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"missing dataset name"));
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
}
/* check to see if the pool exists */
if ((slash = strchr(parent, '/')) == NULL)
slash = parent + strlen(parent);
(void) strncpy(zc.zc_name, parent, slash - parent);
zc.zc_name[slash - parent] = '\0';
if (zfs_ioctl(hdl, ZFS_IOC_OBJSET_STATS, &zc) != 0 &&
errno == ENOENT) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"no such pool '%s'"), zc.zc_name);
return (zfs_error(hdl, EZFS_NOENT, errbuf));
}
/* check to see if the parent dataset exists */
while ((zhp = make_dataset_handle(hdl, parent)) == NULL) {
if (errno == ENOENT && accept_ancestor) {
/*
* Go deeper to find an ancestor, give up on top level.
*/
if (parent_name(parent, parent, sizeof (parent)) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"no such pool '%s'"), zc.zc_name);
return (zfs_error(hdl, EZFS_NOENT, errbuf));
}
} else if (errno == ENOENT) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"parent does not exist"));
return (zfs_error(hdl, EZFS_NOENT, errbuf));
} else
return (zfs_standard_error(hdl, errno, errbuf));
}
is_zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED);
if (zoned != NULL)
*zoned = is_zoned;
/* we are in a non-global zone, but parent is in the global zone */
if (getzoneid() != GLOBAL_ZONEID && !is_zoned) {
(void) zfs_standard_error(hdl, EPERM, errbuf);
zfs_close(zhp);
return (-1);
}
/* make sure parent is a filesystem */
if (zfs_get_type(zhp) != ZFS_TYPE_FILESYSTEM) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"parent is not a filesystem"));
(void) zfs_error(hdl, EZFS_BADTYPE, errbuf);
zfs_close(zhp);
return (-1);
}
zfs_close(zhp);
if (prefixlen != NULL)
*prefixlen = strlen(parent);
return (0);
}
/*
* Finds whether the dataset of the given type(s) exists.
*/
boolean_t
zfs_dataset_exists(libzfs_handle_t *hdl, const char *path, zfs_type_t types)
{
zfs_handle_t *zhp;
if (!zfs_validate_name(hdl, path, types, B_FALSE))
return (B_FALSE);
/*
* Try to get stats for the dataset, which will tell us if it exists.
*/
if ((zhp = make_dataset_handle(hdl, path)) != NULL) {
int ds_type = zhp->zfs_type;
zfs_close(zhp);
if (types & ds_type)
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Given a path to 'target', create all the ancestors between
* the prefixlen portion of the path, and the target itself.
* Fail if the initial prefixlen-ancestor does not already exist.
*/
int
create_parents(libzfs_handle_t *hdl, char *target, int prefixlen)
{
zfs_handle_t *h;
char *cp;
const char *opname;
/* make sure prefix exists */
cp = target + prefixlen;
if (*cp != '/') {
assert(strchr(cp, '/') == NULL);
h = zfs_open(hdl, target, ZFS_TYPE_FILESYSTEM);
} else {
*cp = '\0';
h = zfs_open(hdl, target, ZFS_TYPE_FILESYSTEM);
*cp = '/';
}
if (h == NULL)
return (-1);
zfs_close(h);
/*
* Attempt to create, mount, and share any ancestor filesystems,
* up to the prefixlen-long one.
*/
for (cp = target + prefixlen + 1;
(cp = strchr(cp, '/')) != NULL; *cp = '/', cp++) {
*cp = '\0';
h = make_dataset_handle(hdl, target);
if (h) {
/* it already exists, nothing to do here */
zfs_close(h);
continue;
}
if (zfs_create(hdl, target, ZFS_TYPE_FILESYSTEM,
NULL) != 0) {
opname = dgettext(TEXT_DOMAIN, "create");
goto ancestorerr;
}
h = zfs_open(hdl, target, ZFS_TYPE_FILESYSTEM);
if (h == NULL) {
opname = dgettext(TEXT_DOMAIN, "open");
goto ancestorerr;
}
if (zfs_mount(h, NULL, 0) != 0) {
opname = dgettext(TEXT_DOMAIN, "mount");
goto ancestorerr;
}
if (zfs_share(h) != 0) {
opname = dgettext(TEXT_DOMAIN, "share");
goto ancestorerr;
}
zfs_close(h);
}
zfs_commit_all_shares();
return (0);
ancestorerr:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"failed to %s ancestor '%s'"), opname, target);
return (-1);
}
/*
* Creates non-existing ancestors of the given path.
*/
int
zfs_create_ancestors(libzfs_handle_t *hdl, const char *path)
{
int prefix;
char *path_copy;
char errbuf[1024];
int rc = 0;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot create '%s'"), path);
/*
* Check that we are not passing the nesting limit
* before we start creating any ancestors.
*/
if (dataset_nestcheck(path) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"maximum name nesting depth exceeded"));
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
}
if (check_parents(hdl, path, NULL, B_TRUE, &prefix) != 0)
return (-1);
if ((path_copy = strdup(path)) != NULL) {
rc = create_parents(hdl, path_copy, prefix);
free(path_copy);
}
if (path_copy == NULL || rc != 0)
return (-1);
return (0);
}
/*
* Create a new filesystem or volume.
*/
int
zfs_create(libzfs_handle_t *hdl, const char *path, zfs_type_t type,
nvlist_t *props)
{
int ret;
uint64_t size = 0;
uint64_t blocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
uint64_t zoned;
enum lzc_dataset_type ost;
zpool_handle_t *zpool_handle;
uint8_t *wkeydata = NULL;
uint_t wkeylen = 0;
char errbuf[1024];
char parent[ZFS_MAX_DATASET_NAME_LEN];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot create '%s'"), path);
/* validate the path, taking care to note the extended error message */
if (!zfs_validate_name(hdl, path, type, B_TRUE))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
if (dataset_nestcheck(path) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"maximum name nesting depth exceeded"));
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
}
/* validate parents exist */
if (check_parents(hdl, path, &zoned, B_FALSE, NULL) != 0)
return (-1);
/*
* The failure modes when creating a dataset of a different type over
* one that already exists is a little strange. In particular, if you
* try to create a dataset on top of an existing dataset, the ioctl()
* will return ENOENT, not EEXIST. To prevent this from happening, we
* first try to see if the dataset exists.
*/
if (zfs_dataset_exists(hdl, path, ZFS_TYPE_DATASET)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset already exists"));
return (zfs_error(hdl, EZFS_EXISTS, errbuf));
}
if (type == ZFS_TYPE_VOLUME)
ost = LZC_DATSET_TYPE_ZVOL;
else
ost = LZC_DATSET_TYPE_ZFS;
/* open zpool handle for prop validation */
char pool_path[ZFS_MAX_DATASET_NAME_LEN];
(void) strlcpy(pool_path, path, sizeof (pool_path));
/* truncate pool_path at first slash */
char *p = strchr(pool_path, '/');
if (p != NULL)
*p = '\0';
if ((zpool_handle = zpool_open(hdl, pool_path)) == NULL)
return (-1);
if (props && (props = zfs_valid_proplist(hdl, type, props,
zoned, NULL, zpool_handle, B_TRUE, errbuf)) == 0) {
zpool_close(zpool_handle);
return (-1);
}
zpool_close(zpool_handle);
if (type == ZFS_TYPE_VOLUME) {
/*
* If we are creating a volume, the size and block size must
* satisfy a few restraints. First, the blocksize must be a
* valid block size between SPA_{MIN,MAX}BLOCKSIZE. Second, the
* volsize must be a multiple of the block size, and cannot be
* zero.
*/
if (props == NULL || nvlist_lookup_uint64(props,
zfs_prop_to_name(ZFS_PROP_VOLSIZE), &size) != 0) {
nvlist_free(props);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"missing volume size"));
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
}
if ((ret = nvlist_lookup_uint64(props,
zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE),
&blocksize)) != 0) {
if (ret == ENOENT) {
blocksize = zfs_prop_default_numeric(
ZFS_PROP_VOLBLOCKSIZE);
} else {
nvlist_free(props);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"missing volume block size"));
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
}
}
if (size == 0) {
nvlist_free(props);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"volume size cannot be zero"));
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
}
if (size % blocksize != 0) {
nvlist_free(props);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"volume size must be a multiple of volume block "
"size"));
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
}
}
(void) parent_name(path, parent, sizeof (parent));
if (zfs_crypto_create(hdl, parent, props, NULL, B_TRUE,
&wkeydata, &wkeylen) != 0) {
nvlist_free(props);
return (zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf));
}
/* create the dataset */
ret = lzc_create(path, ost, props, wkeydata, wkeylen);
nvlist_free(props);
if (wkeydata != NULL)
free(wkeydata);
/* check for failure */
if (ret != 0) {
switch (errno) {
case ENOENT:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"no such parent '%s'"), parent);
return (zfs_error(hdl, EZFS_NOENT, errbuf));
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded to set this "
"property or value"));
return (zfs_error(hdl, EZFS_BADVERSION, errbuf));
case EACCES:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"encryption root's key is not loaded "
"or provided"));
return (zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf));
case ERANGE:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid property value(s) specified"));
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
#ifdef _ILP32
case EOVERFLOW:
/*
* This platform can't address a volume this big.
*/
if (type == ZFS_TYPE_VOLUME)
return (zfs_error(hdl, EZFS_VOLTOOBIG,
errbuf));
#endif
/* FALLTHROUGH */
default:
return (zfs_standard_error(hdl, errno, errbuf));
}
}
return (0);
}
/*
* Destroys the given dataset. The caller must make sure that the filesystem
* isn't mounted, and that there are no active dependents. If the file system
* does not exist this function does nothing.
*/
int
zfs_destroy(zfs_handle_t *zhp, boolean_t defer)
{
int error;
if (zhp->zfs_type != ZFS_TYPE_SNAPSHOT && defer)
return (EINVAL);
if (zhp->zfs_type == ZFS_TYPE_BOOKMARK) {
nvlist_t *nv = fnvlist_alloc();
fnvlist_add_boolean(nv, zhp->zfs_name);
error = lzc_destroy_bookmarks(nv, NULL);
fnvlist_free(nv);
if (error != 0) {
return (zfs_standard_error_fmt(zhp->zfs_hdl, error,
dgettext(TEXT_DOMAIN, "cannot destroy '%s'"),
zhp->zfs_name));
}
return (0);
}
if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT) {
nvlist_t *nv = fnvlist_alloc();
fnvlist_add_boolean(nv, zhp->zfs_name);
error = lzc_destroy_snaps(nv, defer, NULL);
fnvlist_free(nv);
} else {
error = lzc_destroy(zhp->zfs_name);
}
if (error != 0 && error != ENOENT) {
return (zfs_standard_error_fmt(zhp->zfs_hdl, errno,
dgettext(TEXT_DOMAIN, "cannot destroy '%s'"),
zhp->zfs_name));
}
remove_mountpoint(zhp);
return (0);
}
struct destroydata {
nvlist_t *nvl;
const char *snapname;
};
static int
zfs_check_snap_cb(zfs_handle_t *zhp, void *arg)
{
struct destroydata *dd = arg;
char name[ZFS_MAX_DATASET_NAME_LEN];
int rv = 0;
if (snprintf(name, sizeof (name), "%s@%s", zhp->zfs_name,
dd->snapname) >= sizeof (name))
return (EINVAL);
if (lzc_exists(name))
verify(nvlist_add_boolean(dd->nvl, name) == 0);
rv = zfs_iter_filesystems(zhp, zfs_check_snap_cb, dd);
zfs_close(zhp);
return (rv);
}
/*
* Destroys all snapshots with the given name in zhp & descendants.
*/
int
zfs_destroy_snaps(zfs_handle_t *zhp, char *snapname, boolean_t defer)
{
int ret;
struct destroydata dd = { 0 };
dd.snapname = snapname;
verify(nvlist_alloc(&dd.nvl, NV_UNIQUE_NAME, 0) == 0);
(void) zfs_check_snap_cb(zfs_handle_dup(zhp), &dd);
if (nvlist_empty(dd.nvl)) {
ret = zfs_standard_error_fmt(zhp->zfs_hdl, ENOENT,
dgettext(TEXT_DOMAIN, "cannot destroy '%s@%s'"),
zhp->zfs_name, snapname);
} else {
ret = zfs_destroy_snaps_nvl(zhp->zfs_hdl, dd.nvl, defer);
}
nvlist_free(dd.nvl);
return (ret);
}
/*
* Destroys all the snapshots named in the nvlist.
*/
int
zfs_destroy_snaps_nvl(libzfs_handle_t *hdl, nvlist_t *snaps, boolean_t defer)
{
int ret;
nvlist_t *errlist = NULL;
nvpair_t *pair;
ret = lzc_destroy_snaps(snaps, defer, &errlist);
if (ret == 0) {
nvlist_free(errlist);
return (0);
}
if (nvlist_empty(errlist)) {
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot destroy snapshots"));
ret = zfs_standard_error(hdl, ret, errbuf);
}
for (pair = nvlist_next_nvpair(errlist, NULL);
pair != NULL; pair = nvlist_next_nvpair(errlist, pair)) {
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot destroy snapshot %s"),
nvpair_name(pair));
switch (fnvpair_value_int32(pair)) {
case EEXIST:
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "snapshot is cloned"));
ret = zfs_error(hdl, EZFS_EXISTS, errbuf);
break;
default:
ret = zfs_standard_error(hdl, errno, errbuf);
break;
}
}
nvlist_free(errlist);
return (ret);
}
/*
* Clones the given dataset. The target must be of the same type as the source.
*/
int
zfs_clone(zfs_handle_t *zhp, const char *target, nvlist_t *props)
{
char parent[ZFS_MAX_DATASET_NAME_LEN];
int ret;
char errbuf[1024];
libzfs_handle_t *hdl = zhp->zfs_hdl;
uint64_t zoned;
assert(zhp->zfs_type == ZFS_TYPE_SNAPSHOT);
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot create '%s'"), target);
/* validate the target/clone name */
if (!zfs_validate_name(hdl, target, ZFS_TYPE_FILESYSTEM, B_TRUE))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
/* validate parents exist */
if (check_parents(hdl, target, &zoned, B_FALSE, NULL) != 0)
return (-1);
(void) parent_name(target, parent, sizeof (parent));
/* do the clone */
if (props) {
zfs_type_t type;
if (ZFS_IS_VOLUME(zhp)) {
type = ZFS_TYPE_VOLUME;
} else {
type = ZFS_TYPE_FILESYSTEM;
}
if ((props = zfs_valid_proplist(hdl, type, props, zoned,
zhp, zhp->zpool_hdl, B_TRUE, errbuf)) == NULL)
return (-1);
if (zfs_fix_auto_resv(zhp, props) == -1) {
nvlist_free(props);
return (-1);
}
}
if (zfs_crypto_clone_check(hdl, zhp, parent, props) != 0) {
nvlist_free(props);
return (zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf));
}
ret = lzc_clone(target, zhp->zfs_name, props);
nvlist_free(props);
if (ret != 0) {
switch (errno) {
case ENOENT:
/*
* The parent doesn't exist. We should have caught this
* above, but there may a race condition that has since
* destroyed the parent.
*
* At this point, we don't know whether it's the source
* that doesn't exist anymore, or whether the target
* dataset doesn't exist.
*/
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"no such parent '%s'"), parent);
return (zfs_error(zhp->zfs_hdl, EZFS_NOENT, errbuf));
case EXDEV:
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"source and target pools differ"));
return (zfs_error(zhp->zfs_hdl, EZFS_CROSSTARGET,
errbuf));
default:
return (zfs_standard_error(zhp->zfs_hdl, errno,
errbuf));
}
}
return (ret);
}
/*
* Promotes the given clone fs to be the clone parent.
*/
int
zfs_promote(zfs_handle_t *zhp)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
char snapname[ZFS_MAX_DATASET_NAME_LEN];
int ret;
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot promote '%s'"), zhp->zfs_name);
if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"snapshots can not be promoted"));
return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
}
if (zhp->zfs_dmustats.dds_origin[0] == '\0') {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"not a cloned filesystem"));
return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
}
if (!zfs_validate_name(hdl, zhp->zfs_name, zhp->zfs_type, B_TRUE))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
ret = lzc_promote(zhp->zfs_name, snapname, sizeof (snapname));
if (ret != 0) {
switch (ret) {
case EACCES:
/*
* Promoting encrypted dataset outside its
* encryption root.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot promote dataset outside its "
"encryption root"));
return (zfs_error(hdl, EZFS_EXISTS, errbuf));
case EEXIST:
/* There is a conflicting snapshot name. */
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"conflicting snapshot '%s' from parent '%s'"),
snapname, zhp->zfs_dmustats.dds_origin);
return (zfs_error(hdl, EZFS_EXISTS, errbuf));
default:
return (zfs_standard_error(hdl, ret, errbuf));
}
}
return (ret);
}
typedef struct snapdata {
nvlist_t *sd_nvl;
const char *sd_snapname;
} snapdata_t;
static int
zfs_snapshot_cb(zfs_handle_t *zhp, void *arg)
{
snapdata_t *sd = arg;
char name[ZFS_MAX_DATASET_NAME_LEN];
int rv = 0;
if (zfs_prop_get_int(zhp, ZFS_PROP_INCONSISTENT) == 0) {
if (snprintf(name, sizeof (name), "%s@%s", zfs_get_name(zhp),
sd->sd_snapname) >= sizeof (name))
return (EINVAL);
fnvlist_add_boolean(sd->sd_nvl, name);
rv = zfs_iter_filesystems(zhp, zfs_snapshot_cb, sd);
}
zfs_close(zhp);
return (rv);
}
/*
* Creates snapshots. The keys in the snaps nvlist are the snapshots to be
* created.
*/
int
zfs_snapshot_nvl(libzfs_handle_t *hdl, nvlist_t *snaps, nvlist_t *props)
{
int ret;
char errbuf[1024];
nvpair_t *elem;
nvlist_t *errors;
zpool_handle_t *zpool_hdl;
char pool[ZFS_MAX_DATASET_NAME_LEN];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot create snapshots "));
elem = NULL;
while ((elem = nvlist_next_nvpair(snaps, elem)) != NULL) {
const char *snapname = nvpair_name(elem);
/* validate the target name */
if (!zfs_validate_name(hdl, snapname, ZFS_TYPE_SNAPSHOT,
B_TRUE)) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot create snapshot '%s'"), snapname);
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
}
}
/*
* get pool handle for prop validation. assumes all snaps are in the
* same pool, as does lzc_snapshot (below).
*/
elem = nvlist_next_nvpair(snaps, NULL);
(void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
pool[strcspn(pool, "/@")] = '\0';
zpool_hdl = zpool_open(hdl, pool);
if (zpool_hdl == NULL)
return (-1);
if (props != NULL &&
(props = zfs_valid_proplist(hdl, ZFS_TYPE_SNAPSHOT,
props, B_FALSE, NULL, zpool_hdl, B_FALSE, errbuf)) == NULL) {
zpool_close(zpool_hdl);
return (-1);
}
zpool_close(zpool_hdl);
ret = lzc_snapshot(snaps, props, &errors);
if (ret != 0) {
boolean_t printed = B_FALSE;
for (elem = nvlist_next_nvpair(errors, NULL);
elem != NULL;
elem = nvlist_next_nvpair(errors, elem)) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot create snapshot '%s'"), nvpair_name(elem));
(void) zfs_standard_error(hdl,
fnvpair_value_int32(elem), errbuf);
printed = B_TRUE;
}
if (!printed) {
switch (ret) {
case EXDEV:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"multiple snapshots of same "
"fs not allowed"));
(void) zfs_error(hdl, EZFS_EXISTS, errbuf);
break;
default:
(void) zfs_standard_error(hdl, ret, errbuf);
}
}
}
nvlist_free(props);
nvlist_free(errors);
return (ret);
}
int
zfs_snapshot(libzfs_handle_t *hdl, const char *path, boolean_t recursive,
nvlist_t *props)
{
int ret;
snapdata_t sd = { 0 };
char fsname[ZFS_MAX_DATASET_NAME_LEN];
char *cp;
zfs_handle_t *zhp;
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot snapshot %s"), path);
if (!zfs_validate_name(hdl, path, ZFS_TYPE_SNAPSHOT, B_TRUE))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
(void) strlcpy(fsname, path, sizeof (fsname));
cp = strchr(fsname, '@');
*cp = '\0';
sd.sd_snapname = cp + 1;
if ((zhp = zfs_open(hdl, fsname, ZFS_TYPE_FILESYSTEM |
ZFS_TYPE_VOLUME)) == NULL) {
return (-1);
}
verify(nvlist_alloc(&sd.sd_nvl, NV_UNIQUE_NAME, 0) == 0);
if (recursive) {
(void) zfs_snapshot_cb(zfs_handle_dup(zhp), &sd);
} else {
fnvlist_add_boolean(sd.sd_nvl, path);
}
ret = zfs_snapshot_nvl(hdl, sd.sd_nvl, props);
nvlist_free(sd.sd_nvl);
zfs_close(zhp);
return (ret);
}
/*
* Destroy any more recent snapshots. We invoke this callback on any dependents
* of the snapshot first. If the 'cb_dependent' member is non-zero, then this
* is a dependent and we should just destroy it without checking the transaction
* group.
*/
typedef struct rollback_data {
const char *cb_target; /* the snapshot */
uint64_t cb_create; /* creation time reference */
boolean_t cb_error;
boolean_t cb_force;
} rollback_data_t;
static int
rollback_destroy_dependent(zfs_handle_t *zhp, void *data)
{
rollback_data_t *cbp = data;
prop_changelist_t *clp;
/* We must destroy this clone; first unmount it */
clp = changelist_gather(zhp, ZFS_PROP_NAME, 0,
cbp->cb_force ? MS_FORCE: 0);
if (clp == NULL || changelist_prefix(clp) != 0) {
cbp->cb_error = B_TRUE;
zfs_close(zhp);
return (0);
}
if (zfs_destroy(zhp, B_FALSE) != 0)
cbp->cb_error = B_TRUE;
else
changelist_remove(clp, zhp->zfs_name);
(void) changelist_postfix(clp);
changelist_free(clp);
zfs_close(zhp);
return (0);
}
static int
rollback_destroy(zfs_handle_t *zhp, void *data)
{
rollback_data_t *cbp = data;
if (zfs_prop_get_int(zhp, ZFS_PROP_CREATETXG) > cbp->cb_create) {
cbp->cb_error |= zfs_iter_dependents(zhp, B_FALSE,
rollback_destroy_dependent, cbp);
cbp->cb_error |= zfs_destroy(zhp, B_FALSE);
}
zfs_close(zhp);
return (0);
}
/*
* Given a dataset, rollback to a specific snapshot, discarding any
* data changes since then and making it the active dataset.
*
* Any snapshots and bookmarks more recent than the target are
* destroyed, along with their dependents (i.e. clones).
*/
int
zfs_rollback(zfs_handle_t *zhp, zfs_handle_t *snap, boolean_t force)
{
rollback_data_t cb = { 0 };
int err;
boolean_t restore_resv = 0;
uint64_t old_volsize = 0, new_volsize;
zfs_prop_t resv_prop = { 0 };
uint64_t min_txg = 0;
assert(zhp->zfs_type == ZFS_TYPE_FILESYSTEM ||
zhp->zfs_type == ZFS_TYPE_VOLUME);
/*
* Destroy all recent snapshots and their dependents.
*/
cb.cb_force = force;
cb.cb_target = snap->zfs_name;
cb.cb_create = zfs_prop_get_int(snap, ZFS_PROP_CREATETXG);
if (cb.cb_create > 0)
min_txg = cb.cb_create;
(void) zfs_iter_snapshots(zhp, B_FALSE, rollback_destroy, &cb,
min_txg, 0);
(void) zfs_iter_bookmarks(zhp, rollback_destroy, &cb);
if (cb.cb_error)
return (-1);
/*
* Now that we have verified that the snapshot is the latest,
* rollback to the given snapshot.
*/
if (zhp->zfs_type == ZFS_TYPE_VOLUME) {
if (zfs_which_resv_prop(zhp, &resv_prop) < 0)
return (-1);
old_volsize = zfs_prop_get_int(zhp, ZFS_PROP_VOLSIZE);
restore_resv =
(old_volsize == zfs_prop_get_int(zhp, resv_prop));
}
/*
* Pass both the filesystem and the wanted snapshot names,
* we would get an error back if the snapshot is destroyed or
* a new snapshot is created before this request is processed.
*/
err = lzc_rollback_to(zhp->zfs_name, snap->zfs_name);
if (err != 0) {
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot rollback '%s'"),
zhp->zfs_name);
switch (err) {
case EEXIST:
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"there is a snapshot or bookmark more recent "
"than '%s'"), snap->zfs_name);
(void) zfs_error(zhp->zfs_hdl, EZFS_EXISTS, errbuf);
break;
case ESRCH:
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"'%s' is not found among snapshots of '%s'"),
snap->zfs_name, zhp->zfs_name);
(void) zfs_error(zhp->zfs_hdl, EZFS_NOENT, errbuf);
break;
case EINVAL:
(void) zfs_error(zhp->zfs_hdl, EZFS_BADTYPE, errbuf);
break;
default:
(void) zfs_standard_error(zhp->zfs_hdl, err, errbuf);
}
return (err);
}
/*
* For volumes, if the pre-rollback volsize matched the pre-
* rollback reservation and the volsize has changed then set
* the reservation property to the post-rollback volsize.
* Make a new handle since the rollback closed the dataset.
*/
if ((zhp->zfs_type == ZFS_TYPE_VOLUME) &&
(zhp = make_dataset_handle(zhp->zfs_hdl, zhp->zfs_name))) {
if (restore_resv) {
new_volsize = zfs_prop_get_int(zhp, ZFS_PROP_VOLSIZE);
if (old_volsize != new_volsize)
err = zfs_prop_set_int(zhp, resv_prop,
new_volsize);
}
zfs_close(zhp);
}
return (err);
}
/*
* Renames the given dataset.
*/
int
zfs_rename(zfs_handle_t *zhp, const char *target, renameflags_t flags)
{
int ret = 0;
zfs_cmd_t zc = {"\0"};
char *delim;
prop_changelist_t *cl = NULL;
char parent[ZFS_MAX_DATASET_NAME_LEN];
char property[ZFS_MAXPROPLEN];
libzfs_handle_t *hdl = zhp->zfs_hdl;
char errbuf[1024];
/* if we have the same exact name, just return success */
if (strcmp(zhp->zfs_name, target) == 0)
return (0);
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot rename to '%s'"), target);
/* make sure source name is valid */
if (!zfs_validate_name(hdl, zhp->zfs_name, zhp->zfs_type, B_TRUE))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
/*
* Make sure the target name is valid
*/
if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT) {
if ((strchr(target, '@') == NULL) ||
*target == '@') {
/*
* Snapshot target name is abbreviated,
* reconstruct full dataset name
*/
(void) strlcpy(parent, zhp->zfs_name,
sizeof (parent));
delim = strchr(parent, '@');
if (strchr(target, '@') == NULL)
*(++delim) = '\0';
else
*delim = '\0';
(void) strlcat(parent, target, sizeof (parent));
target = parent;
} else {
/*
* Make sure we're renaming within the same dataset.
*/
delim = strchr(target, '@');
if (strncmp(zhp->zfs_name, target, delim - target)
!= 0 || zhp->zfs_name[delim - target] != '@') {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"snapshots must be part of same "
"dataset"));
return (zfs_error(hdl, EZFS_CROSSTARGET,
errbuf));
}
}
if (!zfs_validate_name(hdl, target, zhp->zfs_type, B_TRUE))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
} else {
if (flags.recursive) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"recursive rename must be a snapshot"));
return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
}
if (!zfs_validate_name(hdl, target, zhp->zfs_type, B_TRUE))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
/* validate parents */
if (check_parents(hdl, target, NULL, B_FALSE, NULL) != 0)
return (-1);
/* make sure we're in the same pool */
verify((delim = strchr(target, '/')) != NULL);
if (strncmp(zhp->zfs_name, target, delim - target) != 0 ||
zhp->zfs_name[delim - target] != '/') {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"datasets must be within same pool"));
return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf));
}
/* new name cannot be a child of the current dataset name */
if (is_descendant(zhp->zfs_name, target)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"New dataset name cannot be a descendant of "
"current dataset name"));
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
}
}
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot rename '%s'"), zhp->zfs_name);
if (getzoneid() == GLOBAL_ZONEID &&
zfs_prop_get_int(zhp, ZFS_PROP_ZONED)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset is used in a non-global zone"));
return (zfs_error(hdl, EZFS_ZONED, errbuf));
}
/*
* Avoid unmounting file systems with mountpoint property set to
* 'legacy' or 'none' even if -u option is not given.
*/
if (zhp->zfs_type == ZFS_TYPE_FILESYSTEM &&
!flags.recursive && !flags.nounmount &&
zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, property,
sizeof (property), NULL, NULL, 0, B_FALSE) == 0 &&
(strcmp(property, "legacy") == 0 ||
strcmp(property, "none") == 0)) {
flags.nounmount = B_TRUE;
}
if (flags.recursive) {
char *parentname = zfs_strdup(zhp->zfs_hdl, zhp->zfs_name);
if (parentname == NULL) {
ret = -1;
goto error;
}
delim = strchr(parentname, '@');
*delim = '\0';
zfs_handle_t *zhrp = zfs_open(zhp->zfs_hdl, parentname,
ZFS_TYPE_DATASET);
free(parentname);
if (zhrp == NULL) {
ret = -1;
goto error;
}
zfs_close(zhrp);
} else if (zhp->zfs_type != ZFS_TYPE_SNAPSHOT) {
if ((cl = changelist_gather(zhp, ZFS_PROP_NAME,
flags.nounmount ? CL_GATHER_DONT_UNMOUNT :
CL_GATHER_ITER_MOUNTED,
flags.forceunmount ? MS_FORCE : 0)) == NULL)
return (-1);
if (changelist_haszonedchild(cl)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"child dataset with inherited mountpoint is used "
"in a non-global zone"));
(void) zfs_error(hdl, EZFS_ZONED, errbuf);
ret = -1;
goto error;
}
if ((ret = changelist_prefix(cl)) != 0)
goto error;
}
if (ZFS_IS_VOLUME(zhp))
zc.zc_objset_type = DMU_OST_ZVOL;
else
zc.zc_objset_type = DMU_OST_ZFS;
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
(void) strlcpy(zc.zc_value, target, sizeof (zc.zc_value));
zc.zc_cookie = !!flags.recursive;
zc.zc_cookie |= (!!flags.nounmount) << 1;
if ((ret = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_RENAME, &zc)) != 0) {
/*
* if it was recursive, the one that actually failed will
* be in zc.zc_name
*/
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot rename '%s'"), zc.zc_name);
if (flags.recursive && errno == EEXIST) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"a child dataset already has a snapshot "
"with the new name"));
(void) zfs_error(hdl, EZFS_EXISTS, errbuf);
} else if (errno == EACCES) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot move encrypted child outside of "
"its encryption root"));
(void) zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf);
} else {
(void) zfs_standard_error(zhp->zfs_hdl, errno, errbuf);
}
/*
* On failure, we still want to remount any filesystems that
* were previously mounted, so we don't alter the system state.
*/
if (cl != NULL)
(void) changelist_postfix(cl);
} else {
if (cl != NULL) {
changelist_rename(cl, zfs_get_name(zhp), target);
ret = changelist_postfix(cl);
}
}
error:
if (cl != NULL) {
changelist_free(cl);
}
return (ret);
}
nvlist_t *
zfs_get_all_props(zfs_handle_t *zhp)
{
return (zhp->zfs_props);
}
nvlist_t *
zfs_get_recvd_props(zfs_handle_t *zhp)
{
if (zhp->zfs_recvd_props == NULL)
if (get_recvd_props_ioctl(zhp) != 0)
return (NULL);
return (zhp->zfs_recvd_props);
}
nvlist_t *
zfs_get_user_props(zfs_handle_t *zhp)
{
return (zhp->zfs_user_props);
}
/*
* This function is used by 'zfs list' to determine the exact set of columns to
* display, and their maximum widths. This does two main things:
*
* - If this is a list of all properties, then expand the list to include
* all native properties, and set a flag so that for each dataset we look
* for new unique user properties and add them to the list.
*
* - For non fixed-width properties, keep track of the maximum width seen
* so that we can size the column appropriately. If the user has
* requested received property values, we also need to compute the width
* of the RECEIVED column.
*/
int
zfs_expand_proplist(zfs_handle_t *zhp, zprop_list_t **plp, boolean_t received,
boolean_t literal)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
zprop_list_t *entry;
zprop_list_t **last, **start;
nvlist_t *userprops, *propval;
nvpair_t *elem;
char *strval;
char buf[ZFS_MAXPROPLEN];
if (zprop_expand_list(hdl, plp, ZFS_TYPE_DATASET) != 0)
return (-1);
userprops = zfs_get_user_props(zhp);
entry = *plp;
if (entry->pl_all && nvlist_next_nvpair(userprops, NULL) != NULL) {
/*
* Go through and add any user properties as necessary. We
* start by incrementing our list pointer to the first
* non-native property.
*/
start = plp;
while (*start != NULL) {
if ((*start)->pl_prop == ZPROP_INVAL)
break;
start = &(*start)->pl_next;
}
elem = NULL;
while ((elem = nvlist_next_nvpair(userprops, elem)) != NULL) {
/*
* See if we've already found this property in our list.
*/
for (last = start; *last != NULL;
last = &(*last)->pl_next) {
if (strcmp((*last)->pl_user_prop,
nvpair_name(elem)) == 0)
break;
}
if (*last == NULL) {
if ((entry = zfs_alloc(hdl,
sizeof (zprop_list_t))) == NULL ||
((entry->pl_user_prop = zfs_strdup(hdl,
nvpair_name(elem)))) == NULL) {
free(entry);
return (-1);
}
entry->pl_prop = ZPROP_INVAL;
entry->pl_width = strlen(nvpair_name(elem));
entry->pl_all = B_TRUE;
*last = entry;
}
}
}
/*
* Now go through and check the width of any non-fixed columns
*/
for (entry = *plp; entry != NULL; entry = entry->pl_next) {
if (entry->pl_fixed && !literal)
continue;
if (entry->pl_prop != ZPROP_INVAL) {
if (zfs_prop_get(zhp, entry->pl_prop,
buf, sizeof (buf), NULL, NULL, 0, literal) == 0) {
if (strlen(buf) > entry->pl_width)
entry->pl_width = strlen(buf);
}
if (received && zfs_prop_get_recvd(zhp,
zfs_prop_to_name(entry->pl_prop),
buf, sizeof (buf), literal) == 0)
if (strlen(buf) > entry->pl_recvd_width)
entry->pl_recvd_width = strlen(buf);
} else {
if (nvlist_lookup_nvlist(userprops, entry->pl_user_prop,
&propval) == 0) {
verify(nvlist_lookup_string(propval,
ZPROP_VALUE, &strval) == 0);
if (strlen(strval) > entry->pl_width)
entry->pl_width = strlen(strval);
}
if (received && zfs_prop_get_recvd(zhp,
entry->pl_user_prop,
buf, sizeof (buf), literal) == 0)
if (strlen(buf) > entry->pl_recvd_width)
entry->pl_recvd_width = strlen(buf);
}
}
return (0);
}
void
zfs_prune_proplist(zfs_handle_t *zhp, uint8_t *props)
{
nvpair_t *curr;
nvpair_t *next;
/*
* Keep a reference to the props-table against which we prune the
* properties.
*/
zhp->zfs_props_table = props;
curr = nvlist_next_nvpair(zhp->zfs_props, NULL);
while (curr) {
zfs_prop_t zfs_prop = zfs_name_to_prop(nvpair_name(curr));
next = nvlist_next_nvpair(zhp->zfs_props, curr);
/*
* User properties will result in ZPROP_INVAL, and since we
* only know how to prune standard ZFS properties, we always
* leave these in the list. This can also happen if we
* encounter an unknown DSL property (when running older
* software, for example).
*/
if (zfs_prop != ZPROP_INVAL && props[zfs_prop] == B_FALSE)
(void) nvlist_remove(zhp->zfs_props,
nvpair_name(curr), nvpair_type(curr));
curr = next;
}
}
static int
zfs_smb_acl_mgmt(libzfs_handle_t *hdl, char *dataset, char *path,
zfs_smb_acl_op_t cmd, char *resource1, char *resource2)
{
zfs_cmd_t zc = {"\0"};
nvlist_t *nvlist = NULL;
int error;
(void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name));
(void) strlcpy(zc.zc_value, path, sizeof (zc.zc_value));
zc.zc_cookie = (uint64_t)cmd;
if (cmd == ZFS_SMB_ACL_RENAME) {
if (nvlist_alloc(&nvlist, NV_UNIQUE_NAME, 0) != 0) {
(void) no_memory(hdl);
return (0);
}
}
switch (cmd) {
case ZFS_SMB_ACL_ADD:
case ZFS_SMB_ACL_REMOVE:
(void) strlcpy(zc.zc_string, resource1, sizeof (zc.zc_string));
break;
case ZFS_SMB_ACL_RENAME:
if (nvlist_add_string(nvlist, ZFS_SMB_ACL_SRC,
resource1) != 0) {
(void) no_memory(hdl);
return (-1);
}
if (nvlist_add_string(nvlist, ZFS_SMB_ACL_TARGET,
resource2) != 0) {
(void) no_memory(hdl);
return (-1);
}
if (zcmd_write_src_nvlist(hdl, &zc, nvlist) != 0) {
nvlist_free(nvlist);
return (-1);
}
break;
case ZFS_SMB_ACL_PURGE:
break;
default:
return (-1);
}
error = ioctl(hdl->libzfs_fd, ZFS_IOC_SMB_ACL, &zc);
nvlist_free(nvlist);
return (error);
}
int
zfs_smb_acl_add(libzfs_handle_t *hdl, char *dataset,
char *path, char *resource)
{
return (zfs_smb_acl_mgmt(hdl, dataset, path, ZFS_SMB_ACL_ADD,
resource, NULL));
}
int
zfs_smb_acl_remove(libzfs_handle_t *hdl, char *dataset,
char *path, char *resource)
{
return (zfs_smb_acl_mgmt(hdl, dataset, path, ZFS_SMB_ACL_REMOVE,
resource, NULL));
}
int
zfs_smb_acl_purge(libzfs_handle_t *hdl, char *dataset, char *path)
{
return (zfs_smb_acl_mgmt(hdl, dataset, path, ZFS_SMB_ACL_PURGE,
NULL, NULL));
}
int
zfs_smb_acl_rename(libzfs_handle_t *hdl, char *dataset, char *path,
char *oldname, char *newname)
{
return (zfs_smb_acl_mgmt(hdl, dataset, path, ZFS_SMB_ACL_RENAME,
oldname, newname));
}
int
zfs_userspace(zfs_handle_t *zhp, zfs_userquota_prop_t type,
zfs_userspace_cb_t func, void *arg)
{
zfs_cmd_t zc = {"\0"};
zfs_useracct_t buf[100];
libzfs_handle_t *hdl = zhp->zfs_hdl;
int ret;
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
zc.zc_objset_type = type;
zc.zc_nvlist_dst = (uintptr_t)buf;
for (;;) {
zfs_useracct_t *zua = buf;
zc.zc_nvlist_dst_size = sizeof (buf);
if (zfs_ioctl(hdl, ZFS_IOC_USERSPACE_MANY, &zc) != 0) {
if ((errno == ENOTSUP &&
(type == ZFS_PROP_USEROBJUSED ||
type == ZFS_PROP_GROUPOBJUSED ||
type == ZFS_PROP_USEROBJQUOTA ||
type == ZFS_PROP_GROUPOBJQUOTA ||
type == ZFS_PROP_PROJECTOBJUSED ||
type == ZFS_PROP_PROJECTOBJQUOTA ||
type == ZFS_PROP_PROJECTUSED ||
type == ZFS_PROP_PROJECTQUOTA)))
break;
return (zfs_standard_error_fmt(hdl, errno,
dgettext(TEXT_DOMAIN,
"cannot get used/quota for %s"), zc.zc_name));
}
if (zc.zc_nvlist_dst_size == 0)
break;
while (zc.zc_nvlist_dst_size > 0) {
if ((ret = func(arg, zua->zu_domain, zua->zu_rid,
zua->zu_space)) != 0)
return (ret);
zua++;
zc.zc_nvlist_dst_size -= sizeof (zfs_useracct_t);
}
}
return (0);
}
struct holdarg {
nvlist_t *nvl;
const char *snapname;
const char *tag;
boolean_t recursive;
int error;
};
static int
zfs_hold_one(zfs_handle_t *zhp, void *arg)
{
struct holdarg *ha = arg;
char name[ZFS_MAX_DATASET_NAME_LEN];
int rv = 0;
if (snprintf(name, sizeof (name), "%s@%s", zhp->zfs_name,
ha->snapname) >= sizeof (name))
return (EINVAL);
if (lzc_exists(name))
fnvlist_add_string(ha->nvl, name, ha->tag);
if (ha->recursive)
rv = zfs_iter_filesystems(zhp, zfs_hold_one, ha);
zfs_close(zhp);
return (rv);
}
int
zfs_hold(zfs_handle_t *zhp, const char *snapname, const char *tag,
boolean_t recursive, int cleanup_fd)
{
int ret;
struct holdarg ha;
ha.nvl = fnvlist_alloc();
ha.snapname = snapname;
ha.tag = tag;
ha.recursive = recursive;
(void) zfs_hold_one(zfs_handle_dup(zhp), &ha);
if (nvlist_empty(ha.nvl)) {
char errbuf[1024];
fnvlist_free(ha.nvl);
ret = ENOENT;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot hold snapshot '%s@%s'"),
zhp->zfs_name, snapname);
(void) zfs_standard_error(zhp->zfs_hdl, ret, errbuf);
return (ret);
}
ret = zfs_hold_nvl(zhp, cleanup_fd, ha.nvl);
fnvlist_free(ha.nvl);
return (ret);
}
int
zfs_hold_nvl(zfs_handle_t *zhp, int cleanup_fd, nvlist_t *holds)
{
int ret;
nvlist_t *errors;
libzfs_handle_t *hdl = zhp->zfs_hdl;
char errbuf[1024];
nvpair_t *elem;
errors = NULL;
ret = lzc_hold(holds, cleanup_fd, &errors);
if (ret == 0) {
/* There may be errors even in the success case. */
fnvlist_free(errors);
return (0);
}
if (nvlist_empty(errors)) {
/* no hold-specific errors */
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot hold"));
switch (ret) {
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded"));
(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
break;
case EINVAL:
(void) zfs_error(hdl, EZFS_BADTYPE, errbuf);
break;
default:
(void) zfs_standard_error(hdl, ret, errbuf);
}
}
for (elem = nvlist_next_nvpair(errors, NULL);
elem != NULL;
elem = nvlist_next_nvpair(errors, elem)) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot hold snapshot '%s'"), nvpair_name(elem));
switch (fnvpair_value_int32(elem)) {
case E2BIG:
/*
* Temporary tags wind up having the ds object id
* prepended. So even if we passed the length check
* above, it's still possible for the tag to wind
* up being slightly too long.
*/
(void) zfs_error(hdl, EZFS_TAGTOOLONG, errbuf);
break;
case EINVAL:
(void) zfs_error(hdl, EZFS_BADTYPE, errbuf);
break;
case EEXIST:
(void) zfs_error(hdl, EZFS_REFTAG_HOLD, errbuf);
break;
default:
(void) zfs_standard_error(hdl,
fnvpair_value_int32(elem), errbuf);
}
}
fnvlist_free(errors);
return (ret);
}
static int
zfs_release_one(zfs_handle_t *zhp, void *arg)
{
struct holdarg *ha = arg;
char name[ZFS_MAX_DATASET_NAME_LEN];
int rv = 0;
nvlist_t *existing_holds;
if (snprintf(name, sizeof (name), "%s@%s", zhp->zfs_name,
ha->snapname) >= sizeof (name)) {
ha->error = EINVAL;
rv = EINVAL;
}
if (lzc_get_holds(name, &existing_holds) != 0) {
ha->error = ENOENT;
} else if (!nvlist_exists(existing_holds, ha->tag)) {
ha->error = ESRCH;
} else {
nvlist_t *torelease = fnvlist_alloc();
fnvlist_add_boolean(torelease, ha->tag);
fnvlist_add_nvlist(ha->nvl, name, torelease);
fnvlist_free(torelease);
}
if (ha->recursive)
rv = zfs_iter_filesystems(zhp, zfs_release_one, ha);
zfs_close(zhp);
return (rv);
}
int
zfs_release(zfs_handle_t *zhp, const char *snapname, const char *tag,
boolean_t recursive)
{
int ret;
struct holdarg ha;
nvlist_t *errors = NULL;
nvpair_t *elem;
libzfs_handle_t *hdl = zhp->zfs_hdl;
char errbuf[1024];
ha.nvl = fnvlist_alloc();
ha.snapname = snapname;
ha.tag = tag;
ha.recursive = recursive;
ha.error = 0;
(void) zfs_release_one(zfs_handle_dup(zhp), &ha);
if (nvlist_empty(ha.nvl)) {
fnvlist_free(ha.nvl);
ret = ha.error;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot release hold from snapshot '%s@%s'"),
zhp->zfs_name, snapname);
if (ret == ESRCH) {
(void) zfs_error(hdl, EZFS_REFTAG_RELE, errbuf);
} else {
(void) zfs_standard_error(hdl, ret, errbuf);
}
return (ret);
}
ret = lzc_release(ha.nvl, &errors);
fnvlist_free(ha.nvl);
if (ret == 0) {
/* There may be errors even in the success case. */
fnvlist_free(errors);
return (0);
}
if (nvlist_empty(errors)) {
/* no hold-specific errors */
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot release"));
switch (errno) {
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded"));
(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
break;
default:
(void) zfs_standard_error(hdl, errno, errbuf);
}
}
for (elem = nvlist_next_nvpair(errors, NULL);
elem != NULL;
elem = nvlist_next_nvpair(errors, elem)) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot release hold from snapshot '%s'"),
nvpair_name(elem));
switch (fnvpair_value_int32(elem)) {
case ESRCH:
(void) zfs_error(hdl, EZFS_REFTAG_RELE, errbuf);
break;
case EINVAL:
(void) zfs_error(hdl, EZFS_BADTYPE, errbuf);
break;
default:
(void) zfs_standard_error(hdl,
fnvpair_value_int32(elem), errbuf);
}
}
fnvlist_free(errors);
return (ret);
}
int
zfs_get_fsacl(zfs_handle_t *zhp, nvlist_t **nvl)
{
zfs_cmd_t zc = {"\0"};
libzfs_handle_t *hdl = zhp->zfs_hdl;
int nvsz = 2048;
void *nvbuf;
int err = 0;
char errbuf[1024];
assert(zhp->zfs_type == ZFS_TYPE_VOLUME ||
zhp->zfs_type == ZFS_TYPE_FILESYSTEM);
tryagain:
nvbuf = malloc(nvsz);
if (nvbuf == NULL) {
err = (zfs_error(hdl, EZFS_NOMEM, strerror(errno)));
goto out;
}
zc.zc_nvlist_dst_size = nvsz;
zc.zc_nvlist_dst = (uintptr_t)nvbuf;
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
if (zfs_ioctl(hdl, ZFS_IOC_GET_FSACL, &zc) != 0) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot get permissions on '%s'"),
zc.zc_name);
switch (errno) {
case ENOMEM:
free(nvbuf);
nvsz = zc.zc_nvlist_dst_size;
goto tryagain;
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded"));
err = zfs_error(hdl, EZFS_BADVERSION, errbuf);
break;
case EINVAL:
err = zfs_error(hdl, EZFS_BADTYPE, errbuf);
break;
case ENOENT:
err = zfs_error(hdl, EZFS_NOENT, errbuf);
break;
default:
err = zfs_standard_error(hdl, errno, errbuf);
break;
}
} else {
/* success */
int rc = nvlist_unpack(nvbuf, zc.zc_nvlist_dst_size, nvl, 0);
if (rc) {
err = zfs_standard_error_fmt(hdl, rc, dgettext(
TEXT_DOMAIN, "cannot get permissions on '%s'"),
zc.zc_name);
}
}
free(nvbuf);
out:
return (err);
}
int
zfs_set_fsacl(zfs_handle_t *zhp, boolean_t un, nvlist_t *nvl)
{
zfs_cmd_t zc = {"\0"};
libzfs_handle_t *hdl = zhp->zfs_hdl;
char *nvbuf;
char errbuf[1024];
size_t nvsz;
int err;
assert(zhp->zfs_type == ZFS_TYPE_VOLUME ||
zhp->zfs_type == ZFS_TYPE_FILESYSTEM);
err = nvlist_size(nvl, &nvsz, NV_ENCODE_NATIVE);
assert(err == 0);
nvbuf = malloc(nvsz);
err = nvlist_pack(nvl, &nvbuf, &nvsz, NV_ENCODE_NATIVE, 0);
assert(err == 0);
zc.zc_nvlist_src_size = nvsz;
zc.zc_nvlist_src = (uintptr_t)nvbuf;
zc.zc_perm_action = un;
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
if (zfs_ioctl(hdl, ZFS_IOC_SET_FSACL, &zc) != 0) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot set permissions on '%s'"),
zc.zc_name);
switch (errno) {
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded"));
err = zfs_error(hdl, EZFS_BADVERSION, errbuf);
break;
case EINVAL:
err = zfs_error(hdl, EZFS_BADTYPE, errbuf);
break;
case ENOENT:
err = zfs_error(hdl, EZFS_NOENT, errbuf);
break;
default:
err = zfs_standard_error(hdl, errno, errbuf);
break;
}
}
free(nvbuf);
return (err);
}
int
zfs_get_holds(zfs_handle_t *zhp, nvlist_t **nvl)
{
int err;
char errbuf[1024];
err = lzc_get_holds(zhp->zfs_name, nvl);
if (err != 0) {
libzfs_handle_t *hdl = zhp->zfs_hdl;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot get holds for '%s'"),
zhp->zfs_name);
switch (err) {
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded"));
err = zfs_error(hdl, EZFS_BADVERSION, errbuf);
break;
case EINVAL:
err = zfs_error(hdl, EZFS_BADTYPE, errbuf);
break;
case ENOENT:
err = zfs_error(hdl, EZFS_NOENT, errbuf);
break;
default:
err = zfs_standard_error(hdl, errno, errbuf);
break;
}
}
return (err);
}
/*
* The theory of raidz space accounting
*
* The "referenced" property of RAIDZ vdevs is scaled such that a 128KB block
* will "reference" 128KB, even though it allocates more than that, to store the
* parity information (and perhaps skip sectors). This concept of the
* "referenced" (and other DMU space accounting) being lower than the allocated
* space by a constant factor is called "raidz deflation."
*
* As mentioned above, the constant factor for raidz deflation assumes a 128KB
* block size. However, zvols typically have a much smaller block size (default
* 8KB). These smaller blocks may require proportionally much more parity
* information (and perhaps skip sectors). In this case, the change to the
* "referenced" property may be much more than the logical block size.
*
* Suppose a raidz vdev has 5 disks with ashift=12. A 128k block may be written
* as follows.
*
* +-------+-------+-------+-------+-------+
* | disk1 | disk2 | disk3 | disk4 | disk5 |
* +-------+-------+-------+-------+-------+
* | P0 | D0 | D8 | D16 | D24 |
* | P1 | D1 | D9 | D17 | D25 |
* | P2 | D2 | D10 | D18 | D26 |
* | P3 | D3 | D11 | D19 | D27 |
* | P4 | D4 | D12 | D20 | D28 |
* | P5 | D5 | D13 | D21 | D29 |
* | P6 | D6 | D14 | D22 | D30 |
* | P7 | D7 | D15 | D23 | D31 |
* +-------+-------+-------+-------+-------+
*
* Above, notice that 160k was allocated: 8 x 4k parity sectors + 32 x 4k data
* sectors. The dataset's referenced will increase by 128k and the pool's
* allocated and free properties will be adjusted by 160k.
*
* A 4k block written to the same raidz vdev will require two 4k sectors. The
* blank cells represent unallocated space.
*
* +-------+-------+-------+-------+-------+
* | disk1 | disk2 | disk3 | disk4 | disk5 |
* +-------+-------+-------+-------+-------+
* | P0 | D0 | | | |
* +-------+-------+-------+-------+-------+
*
* Above, notice that the 4k block required one sector for parity and another
* for data. vdev_raidz_asize() will return 8k and as such the pool's allocated
* and free properties will be adjusted by 8k. The dataset will not be charged
* 8k. Rather, it will be charged a value that is scaled according to the
* overhead of the 128k block on the same vdev. This 8k allocation will be
* charged 8k * 128k / 160k. 128k is from SPA_OLD_MAXBLOCKSIZE and 160k is as
* calculated in the 128k block example above.
*
* Every raidz allocation is sized to be a multiple of nparity+1 sectors. That
* is, every raidz1 allocation will be a multiple of 2 sectors, raidz2
* allocations are a multiple of 3 sectors, and raidz3 allocations are a
* multiple of of 4 sectors. When a block does not fill the required number of
* sectors, skip blocks (sectors) are used.
*
* An 8k block being written to a raidz vdev may be written as follows:
*
* +-------+-------+-------+-------+-------+
* | disk1 | disk2 | disk3 | disk4 | disk5 |
* +-------+-------+-------+-------+-------+
* | P0 | D0 | D1 | S0 | |
* +-------+-------+-------+-------+-------+
*
* In order to maintain the nparity+1 allocation size, a skip block (S0) was
* added. For this 8k block, the pool's allocated and free properties are
* adjusted by 16k and the dataset's referenced is increased by 16k * 128k /
* 160k. Again, 128k is from SPA_OLD_MAXBLOCKSIZE and 160k is as calculated in
* the 128k block example above.
*
* The situation is slightly different for dRAID since the minimum allocation
* size is the full group width. The same 8K block above would be written as
* follows in a dRAID group:
*
* +-------+-------+-------+-------+-------+
* | disk1 | disk2 | disk3 | disk4 | disk5 |
* +-------+-------+-------+-------+-------+
* | P0 | D0 | D1 | S0 | S1 |
* +-------+-------+-------+-------+-------+
*
* Compression may lead to a variety of block sizes being written for the same
* volume or file. There is no clear way to reserve just the amount of space
* that will be required, so the worst case (no compression) is assumed.
* Note that metadata blocks will typically be compressed, so the reservation
* size returned by zvol_volsize_to_reservation() will generally be slightly
* larger than the maximum that the volume can reference.
*/
/*
* Derived from function of same name in module/zfs/vdev_raidz.c. Returns the
* amount of space (in bytes) that will be allocated for the specified block
* size. Note that the "referenced" space accounted will be less than this, but
* not necessarily equal to "blksize", due to RAIDZ deflation.
*/
static uint64_t
vdev_raidz_asize(uint64_t ndisks, uint64_t nparity, uint64_t ashift,
uint64_t blksize)
{
uint64_t asize, ndata;
ASSERT3U(ndisks, >, nparity);
ndata = ndisks - nparity;
asize = ((blksize - 1) >> ashift) + 1;
asize += nparity * ((asize + ndata - 1) / ndata);
asize = roundup(asize, nparity + 1) << ashift;
return (asize);
}
/*
* Derived from function of same name in module/zfs/vdev_draid.c. Returns the
* amount of space (in bytes) that will be allocated for the specified block
* size.
*/
static uint64_t
vdev_draid_asize(uint64_t ndisks, uint64_t nparity, uint64_t ashift,
uint64_t blksize)
{
ASSERT3U(ndisks, >, nparity);
uint64_t ndata = ndisks - nparity;
uint64_t rows = ((blksize - 1) / (ndata << ashift)) + 1;
uint64_t asize = (rows * ndisks) << ashift;
return (asize);
}
/*
* Determine how much space will be allocated if it lands on the most space-
* inefficient top-level vdev. Returns the size in bytes required to store one
* copy of the volume data. See theory comment above.
*/
static uint64_t
volsize_from_vdevs(zpool_handle_t *zhp, uint64_t nblocks, uint64_t blksize)
{
nvlist_t *config, *tree, **vdevs;
uint_t nvdevs;
uint64_t ret = 0;
config = zpool_get_config(zhp, NULL);
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree) != 0 ||
nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN,
&vdevs, &nvdevs) != 0) {
return (nblocks * blksize);
}
for (int v = 0; v < nvdevs; v++) {
char *type;
uint64_t nparity, ashift, asize, tsize;
uint64_t volsize;
if (nvlist_lookup_string(vdevs[v], ZPOOL_CONFIG_TYPE,
&type) != 0)
continue;
if (strcmp(type, VDEV_TYPE_RAIDZ) != 0 &&
strcmp(type, VDEV_TYPE_DRAID) != 0)
continue;
if (nvlist_lookup_uint64(vdevs[v],
ZPOOL_CONFIG_NPARITY, &nparity) != 0)
continue;
if (nvlist_lookup_uint64(vdevs[v],
ZPOOL_CONFIG_ASHIFT, &ashift) != 0)
continue;
if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {
nvlist_t **disks;
uint_t ndisks;
if (nvlist_lookup_nvlist_array(vdevs[v],
ZPOOL_CONFIG_CHILDREN, &disks, &ndisks) != 0)
continue;
/* allocation size for the "typical" 128k block */
tsize = vdev_raidz_asize(ndisks, nparity, ashift,
SPA_OLD_MAXBLOCKSIZE);
/* allocation size for the blksize block */
asize = vdev_raidz_asize(ndisks, nparity, ashift,
blksize);
} else {
uint64_t ndata;
if (nvlist_lookup_uint64(vdevs[v],
ZPOOL_CONFIG_DRAID_NDATA, &ndata) != 0)
continue;
/* allocation size for the "typical" 128k block */
tsize = vdev_draid_asize(ndata + nparity, nparity,
ashift, SPA_OLD_MAXBLOCKSIZE);
/* allocation size for the blksize block */
asize = vdev_draid_asize(ndata + nparity, nparity,
ashift, blksize);
}
/*
* Scale this size down as a ratio of 128k / tsize.
* See theory statement above.
*/
volsize = nblocks * asize * SPA_OLD_MAXBLOCKSIZE / tsize;
if (volsize > ret) {
ret = volsize;
}
}
if (ret == 0) {
ret = nblocks * blksize;
}
return (ret);
}
/*
* Convert the zvol's volume size to an appropriate reservation. See theory
* comment above.
*
* Note: If this routine is updated, it is necessary to update the ZFS test
* suite's shell version in reservation.shlib.
*/
uint64_t
zvol_volsize_to_reservation(zpool_handle_t *zph, uint64_t volsize,
nvlist_t *props)
{
uint64_t numdb;
uint64_t nblocks, volblocksize;
int ncopies;
char *strval;
if (nvlist_lookup_string(props,
zfs_prop_to_name(ZFS_PROP_COPIES), &strval) == 0)
ncopies = atoi(strval);
else
ncopies = 1;
if (nvlist_lookup_uint64(props,
zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE),
&volblocksize) != 0)
volblocksize = ZVOL_DEFAULT_BLOCKSIZE;
nblocks = volsize / volblocksize;
/*
* Metadata defaults to using 128k blocks, not volblocksize blocks. For
* this reason, only the data blocks are scaled based on vdev config.
*/
volsize = volsize_from_vdevs(zph, nblocks, volblocksize);
/* start with metadnode L0-L6 */
numdb = 7;
/* calculate number of indirects */
while (nblocks > 1) {
nblocks += DNODES_PER_LEVEL - 1;
nblocks /= DNODES_PER_LEVEL;
numdb += nblocks;
}
numdb *= MIN(SPA_DVAS_PER_BP, ncopies + 1);
volsize *= ncopies;
/*
* this is exactly DN_MAX_INDBLKSHIFT when metadata isn't
* compressed, but in practice they compress down to about
* 1100 bytes
*/
numdb *= 1ULL << DN_MAX_INDBLKSHIFT;
volsize += numdb;
return (volsize);
}
/*
* Wait for the given activity and return the status of the wait (whether or not
* any waiting was done) in the 'waited' parameter. Non-existent fses are
* reported via the 'missing' parameter, rather than by printing an error
* message. This is convenient when this function is called in a loop over a
* long period of time (as it is, for example, by zfs's wait cmd). In that
* scenario, a fs being exported or destroyed should be considered a normal
* event, so we don't want to print an error when we find that the fs doesn't
* exist.
*/
int
zfs_wait_status(zfs_handle_t *zhp, zfs_wait_activity_t activity,
boolean_t *missing, boolean_t *waited)
{
int error = lzc_wait_fs(zhp->zfs_name, activity, waited);
*missing = (error == ENOENT);
if (*missing)
return (0);
if (error != 0) {
(void) zfs_standard_error_fmt(zhp->zfs_hdl, error,
dgettext(TEXT_DOMAIN, "error waiting in fs '%s'"),
zhp->zfs_name);
}
return (error);
}
diff --git a/sys/contrib/openzfs/lib/libzfs/libzfs_util.c b/sys/contrib/openzfs/lib/libzfs/libzfs_util.c
index 68e97e4830d8..88d6561a5fb4 100644
--- a/sys/contrib/openzfs/lib/libzfs/libzfs_util.c
+++ b/sys/contrib/openzfs/lib/libzfs/libzfs_util.c
@@ -1,2088 +1,2083 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2020 Joyent, Inc. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
* Copyright (c) 2017 Datto Inc.
* Copyright (c) 2020 The FreeBSD Foundation
*
* Portions of this software were developed by Allan Jude
* under sponsorship from the FreeBSD Foundation.
*/
/*
* Internal utility routines for the ZFS library.
*/
#include <errno.h>
#include <fcntl.h>
#include <libintl.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <unistd.h>
#include <math.h>
#if LIBFETCH_DYNAMIC
#include <dlfcn.h>
#endif
#include <sys/stat.h>
#include <sys/mnttab.h>
#include <sys/mntent.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <libzfs.h>
#include <libzfs_core.h>
#include "libzfs_impl.h"
#include "zfs_prop.h"
#include "zfeature_common.h"
#include <zfs_fletcher.h>
#include <libzutil.h>
/*
* We only care about the scheme in order to match the scheme
* with the handler. Each handler should validate the full URI
* as necessary.
*/
#define URI_REGEX "^\\([A-Za-z][A-Za-z0-9+.\\-]*\\):"
int
libzfs_errno(libzfs_handle_t *hdl)
{
return (hdl->libzfs_error);
}
const char *
libzfs_error_action(libzfs_handle_t *hdl)
{
return (hdl->libzfs_action);
}
const char *
libzfs_error_description(libzfs_handle_t *hdl)
{
if (hdl->libzfs_desc[0] != '\0')
return (hdl->libzfs_desc);
switch (hdl->libzfs_error) {
case EZFS_NOMEM:
return (dgettext(TEXT_DOMAIN, "out of memory"));
case EZFS_BADPROP:
return (dgettext(TEXT_DOMAIN, "invalid property value"));
case EZFS_PROPREADONLY:
return (dgettext(TEXT_DOMAIN, "read-only property"));
case EZFS_PROPTYPE:
return (dgettext(TEXT_DOMAIN, "property doesn't apply to "
"datasets of this type"));
case EZFS_PROPNONINHERIT:
return (dgettext(TEXT_DOMAIN, "property cannot be inherited"));
case EZFS_PROPSPACE:
return (dgettext(TEXT_DOMAIN, "invalid quota or reservation"));
case EZFS_BADTYPE:
return (dgettext(TEXT_DOMAIN, "operation not applicable to "
"datasets of this type"));
case EZFS_BUSY:
return (dgettext(TEXT_DOMAIN, "pool or dataset is busy"));
case EZFS_EXISTS:
return (dgettext(TEXT_DOMAIN, "pool or dataset exists"));
case EZFS_NOENT:
return (dgettext(TEXT_DOMAIN, "no such pool or dataset"));
case EZFS_BADSTREAM:
return (dgettext(TEXT_DOMAIN, "invalid backup stream"));
case EZFS_DSREADONLY:
return (dgettext(TEXT_DOMAIN, "dataset is read-only"));
case EZFS_VOLTOOBIG:
return (dgettext(TEXT_DOMAIN, "volume size exceeds limit for "
"this system"));
case EZFS_INVALIDNAME:
return (dgettext(TEXT_DOMAIN, "invalid name"));
case EZFS_BADRESTORE:
return (dgettext(TEXT_DOMAIN, "unable to restore to "
"destination"));
case EZFS_BADBACKUP:
return (dgettext(TEXT_DOMAIN, "backup failed"));
case EZFS_BADTARGET:
return (dgettext(TEXT_DOMAIN, "invalid target vdev"));
case EZFS_NODEVICE:
return (dgettext(TEXT_DOMAIN, "no such device in pool"));
case EZFS_BADDEV:
return (dgettext(TEXT_DOMAIN, "invalid device"));
case EZFS_NOREPLICAS:
return (dgettext(TEXT_DOMAIN, "no valid replicas"));
case EZFS_RESILVERING:
return (dgettext(TEXT_DOMAIN, "currently resilvering"));
case EZFS_BADVERSION:
return (dgettext(TEXT_DOMAIN, "unsupported version or "
"feature"));
case EZFS_POOLUNAVAIL:
return (dgettext(TEXT_DOMAIN, "pool is unavailable"));
case EZFS_DEVOVERFLOW:
return (dgettext(TEXT_DOMAIN, "too many devices in one vdev"));
case EZFS_BADPATH:
return (dgettext(TEXT_DOMAIN, "must be an absolute path"));
case EZFS_CROSSTARGET:
return (dgettext(TEXT_DOMAIN, "operation crosses datasets or "
"pools"));
case EZFS_ZONED:
return (dgettext(TEXT_DOMAIN, "dataset in use by local zone"));
case EZFS_MOUNTFAILED:
return (dgettext(TEXT_DOMAIN, "mount failed"));
case EZFS_UMOUNTFAILED:
return (dgettext(TEXT_DOMAIN, "unmount failed"));
case EZFS_UNSHARENFSFAILED:
return (dgettext(TEXT_DOMAIN, "NFS share removal failed"));
case EZFS_SHARENFSFAILED:
return (dgettext(TEXT_DOMAIN, "NFS share creation failed"));
case EZFS_UNSHARESMBFAILED:
return (dgettext(TEXT_DOMAIN, "SMB share removal failed"));
case EZFS_SHARESMBFAILED:
return (dgettext(TEXT_DOMAIN, "SMB share creation failed"));
case EZFS_PERM:
return (dgettext(TEXT_DOMAIN, "permission denied"));
case EZFS_NOSPC:
return (dgettext(TEXT_DOMAIN, "out of space"));
case EZFS_FAULT:
return (dgettext(TEXT_DOMAIN, "bad address"));
case EZFS_IO:
return (dgettext(TEXT_DOMAIN, "I/O error"));
case EZFS_INTR:
return (dgettext(TEXT_DOMAIN, "signal received"));
case EZFS_ISSPARE:
return (dgettext(TEXT_DOMAIN, "device is reserved as a hot "
"spare"));
case EZFS_INVALCONFIG:
return (dgettext(TEXT_DOMAIN, "invalid vdev configuration"));
case EZFS_RECURSIVE:
return (dgettext(TEXT_DOMAIN, "recursive dataset dependency"));
case EZFS_NOHISTORY:
return (dgettext(TEXT_DOMAIN, "no history available"));
case EZFS_POOLPROPS:
return (dgettext(TEXT_DOMAIN, "failed to retrieve "
"pool properties"));
case EZFS_POOL_NOTSUP:
return (dgettext(TEXT_DOMAIN, "operation not supported "
"on this type of pool"));
case EZFS_POOL_INVALARG:
return (dgettext(TEXT_DOMAIN, "invalid argument for "
"this pool operation"));
case EZFS_NAMETOOLONG:
return (dgettext(TEXT_DOMAIN, "dataset name is too long"));
case EZFS_OPENFAILED:
return (dgettext(TEXT_DOMAIN, "open failed"));
case EZFS_NOCAP:
return (dgettext(TEXT_DOMAIN,
"disk capacity information could not be retrieved"));
case EZFS_LABELFAILED:
return (dgettext(TEXT_DOMAIN, "write of label failed"));
case EZFS_BADWHO:
return (dgettext(TEXT_DOMAIN, "invalid user/group"));
case EZFS_BADPERM:
return (dgettext(TEXT_DOMAIN, "invalid permission"));
case EZFS_BADPERMSET:
return (dgettext(TEXT_DOMAIN, "invalid permission set name"));
case EZFS_NODELEGATION:
return (dgettext(TEXT_DOMAIN, "delegated administration is "
"disabled on pool"));
case EZFS_BADCACHE:
return (dgettext(TEXT_DOMAIN, "invalid or missing cache file"));
case EZFS_ISL2CACHE:
return (dgettext(TEXT_DOMAIN, "device is in use as a cache"));
case EZFS_VDEVNOTSUP:
return (dgettext(TEXT_DOMAIN, "vdev specification is not "
"supported"));
case EZFS_NOTSUP:
return (dgettext(TEXT_DOMAIN, "operation not supported "
"on this dataset"));
case EZFS_IOC_NOTSUPPORTED:
return (dgettext(TEXT_DOMAIN, "operation not supported by "
"zfs kernel module"));
case EZFS_ACTIVE_SPARE:
return (dgettext(TEXT_DOMAIN, "pool has active shared spare "
"device"));
case EZFS_UNPLAYED_LOGS:
return (dgettext(TEXT_DOMAIN, "log device has unplayed intent "
"logs"));
case EZFS_REFTAG_RELE:
return (dgettext(TEXT_DOMAIN, "no such tag on this dataset"));
case EZFS_REFTAG_HOLD:
return (dgettext(TEXT_DOMAIN, "tag already exists on this "
"dataset"));
case EZFS_TAGTOOLONG:
return (dgettext(TEXT_DOMAIN, "tag too long"));
case EZFS_PIPEFAILED:
return (dgettext(TEXT_DOMAIN, "pipe create failed"));
case EZFS_THREADCREATEFAILED:
return (dgettext(TEXT_DOMAIN, "thread create failed"));
case EZFS_POSTSPLIT_ONLINE:
return (dgettext(TEXT_DOMAIN, "disk was split from this pool "
"into a new one"));
case EZFS_SCRUB_PAUSED:
return (dgettext(TEXT_DOMAIN, "scrub is paused; "
"use 'zpool scrub' to resume"));
case EZFS_SCRUBBING:
return (dgettext(TEXT_DOMAIN, "currently scrubbing; "
"use 'zpool scrub -s' to cancel current scrub"));
case EZFS_NO_SCRUB:
return (dgettext(TEXT_DOMAIN, "there is no active scrub"));
case EZFS_DIFF:
return (dgettext(TEXT_DOMAIN, "unable to generate diffs"));
case EZFS_DIFFDATA:
return (dgettext(TEXT_DOMAIN, "invalid diff data"));
case EZFS_POOLREADONLY:
return (dgettext(TEXT_DOMAIN, "pool is read-only"));
case EZFS_NO_PENDING:
return (dgettext(TEXT_DOMAIN, "operation is not "
"in progress"));
case EZFS_CHECKPOINT_EXISTS:
return (dgettext(TEXT_DOMAIN, "checkpoint exists"));
case EZFS_DISCARDING_CHECKPOINT:
return (dgettext(TEXT_DOMAIN, "currently discarding "
"checkpoint"));
case EZFS_NO_CHECKPOINT:
return (dgettext(TEXT_DOMAIN, "checkpoint does not exist"));
case EZFS_DEVRM_IN_PROGRESS:
return (dgettext(TEXT_DOMAIN, "device removal in progress"));
case EZFS_VDEV_TOO_BIG:
return (dgettext(TEXT_DOMAIN, "device exceeds supported size"));
case EZFS_ACTIVE_POOL:
return (dgettext(TEXT_DOMAIN, "pool is imported on a "
"different host"));
case EZFS_CRYPTOFAILED:
return (dgettext(TEXT_DOMAIN, "encryption failure"));
case EZFS_TOOMANY:
return (dgettext(TEXT_DOMAIN, "argument list too long"));
case EZFS_INITIALIZING:
return (dgettext(TEXT_DOMAIN, "currently initializing"));
case EZFS_NO_INITIALIZE:
return (dgettext(TEXT_DOMAIN, "there is no active "
"initialization"));
case EZFS_WRONG_PARENT:
return (dgettext(TEXT_DOMAIN, "invalid parent dataset"));
case EZFS_TRIMMING:
return (dgettext(TEXT_DOMAIN, "currently trimming"));
case EZFS_NO_TRIM:
return (dgettext(TEXT_DOMAIN, "there is no active trim"));
case EZFS_TRIM_NOTSUP:
return (dgettext(TEXT_DOMAIN, "trim operations are not "
"supported by this device"));
case EZFS_NO_RESILVER_DEFER:
return (dgettext(TEXT_DOMAIN, "this action requires the "
"resilver_defer feature"));
case EZFS_EXPORT_IN_PROGRESS:
return (dgettext(TEXT_DOMAIN, "pool export in progress"));
case EZFS_REBUILDING:
return (dgettext(TEXT_DOMAIN, "currently sequentially "
"resilvering"));
case EZFS_UNKNOWN:
return (dgettext(TEXT_DOMAIN, "unknown error"));
default:
assert(hdl->libzfs_error == 0);
return (dgettext(TEXT_DOMAIN, "no error"));
}
}
-/*PRINTFLIKE2*/
void
zfs_error_aux(libzfs_handle_t *hdl, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
(void) vsnprintf(hdl->libzfs_desc, sizeof (hdl->libzfs_desc),
fmt, ap);
hdl->libzfs_desc_active = 1;
va_end(ap);
}
static void
zfs_verror(libzfs_handle_t *hdl, int error, const char *fmt, va_list ap)
{
(void) vsnprintf(hdl->libzfs_action, sizeof (hdl->libzfs_action),
fmt, ap);
hdl->libzfs_error = error;
if (hdl->libzfs_desc_active)
hdl->libzfs_desc_active = 0;
else
hdl->libzfs_desc[0] = '\0';
if (hdl->libzfs_printerr) {
if (error == EZFS_UNKNOWN) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN, "internal "
"error: %s: %s\n"), hdl->libzfs_action,
libzfs_error_description(hdl));
abort();
}
(void) fprintf(stderr, "%s: %s\n", hdl->libzfs_action,
libzfs_error_description(hdl));
if (error == EZFS_NOMEM)
exit(1);
}
}
int
zfs_error(libzfs_handle_t *hdl, int error, const char *msg)
{
return (zfs_error_fmt(hdl, error, "%s", msg));
}
-/*PRINTFLIKE3*/
int
zfs_error_fmt(libzfs_handle_t *hdl, int error, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
zfs_verror(hdl, error, fmt, ap);
va_end(ap);
return (-1);
}
static int
zfs_common_error(libzfs_handle_t *hdl, int error, const char *fmt,
va_list ap)
{
switch (error) {
case EPERM:
case EACCES:
zfs_verror(hdl, EZFS_PERM, fmt, ap);
return (-1);
case ECANCELED:
zfs_verror(hdl, EZFS_NODELEGATION, fmt, ap);
return (-1);
case EIO:
zfs_verror(hdl, EZFS_IO, fmt, ap);
return (-1);
case EFAULT:
zfs_verror(hdl, EZFS_FAULT, fmt, ap);
return (-1);
case EINTR:
zfs_verror(hdl, EZFS_INTR, fmt, ap);
return (-1);
}
return (0);
}
int
zfs_standard_error(libzfs_handle_t *hdl, int error, const char *msg)
{
return (zfs_standard_error_fmt(hdl, error, "%s", msg));
}
-/*PRINTFLIKE3*/
int
zfs_standard_error_fmt(libzfs_handle_t *hdl, int error, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
if (zfs_common_error(hdl, error, fmt, ap) != 0) {
va_end(ap);
return (-1);
}
switch (error) {
case ENXIO:
case ENODEV:
case EPIPE:
zfs_verror(hdl, EZFS_IO, fmt, ap);
break;
case ENOENT:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset does not exist"));
zfs_verror(hdl, EZFS_NOENT, fmt, ap);
break;
case ENOSPC:
case EDQUOT:
zfs_verror(hdl, EZFS_NOSPC, fmt, ap);
break;
case EEXIST:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset already exists"));
zfs_verror(hdl, EZFS_EXISTS, fmt, ap);
break;
case EBUSY:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset is busy"));
zfs_verror(hdl, EZFS_BUSY, fmt, ap);
break;
case EROFS:
zfs_verror(hdl, EZFS_POOLREADONLY, fmt, ap);
break;
case ENAMETOOLONG:
zfs_verror(hdl, EZFS_NAMETOOLONG, fmt, ap);
break;
case ENOTSUP:
zfs_verror(hdl, EZFS_BADVERSION, fmt, ap);
break;
case EAGAIN:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool I/O is currently suspended"));
zfs_verror(hdl, EZFS_POOLUNAVAIL, fmt, ap);
break;
case EREMOTEIO:
zfs_verror(hdl, EZFS_ACTIVE_POOL, fmt, ap);
break;
case ZFS_ERR_UNKNOWN_SEND_STREAM_FEATURE:
case ZFS_ERR_IOC_CMD_UNAVAIL:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "the loaded zfs "
"module does not support this operation. A reboot may "
"be required to enable this operation."));
zfs_verror(hdl, EZFS_IOC_NOTSUPPORTED, fmt, ap);
break;
case ZFS_ERR_IOC_ARG_UNAVAIL:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "the loaded zfs "
"module does not support an option for this operation. "
"A reboot may be required to enable this option."));
zfs_verror(hdl, EZFS_IOC_NOTSUPPORTED, fmt, ap);
break;
case ZFS_ERR_IOC_ARG_REQUIRED:
case ZFS_ERR_IOC_ARG_BADTYPE:
zfs_verror(hdl, EZFS_IOC_NOTSUPPORTED, fmt, ap);
break;
case ZFS_ERR_WRONG_PARENT:
zfs_verror(hdl, EZFS_WRONG_PARENT, fmt, ap);
break;
case ZFS_ERR_BADPROP:
zfs_verror(hdl, EZFS_BADPROP, fmt, ap);
break;
default:
zfs_error_aux(hdl, "%s", strerror(error));
zfs_verror(hdl, EZFS_UNKNOWN, fmt, ap);
break;
}
va_end(ap);
return (-1);
}
void
zfs_setprop_error(libzfs_handle_t *hdl, zfs_prop_t prop, int err,
char *errbuf)
{
switch (err) {
case ENOSPC:
/*
* For quotas and reservations, ENOSPC indicates
* something different; setting a quota or reservation
* doesn't use any disk space.
*/
switch (prop) {
case ZFS_PROP_QUOTA:
case ZFS_PROP_REFQUOTA:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"size is less than current used or "
"reserved space"));
(void) zfs_error(hdl, EZFS_PROPSPACE, errbuf);
break;
case ZFS_PROP_RESERVATION:
case ZFS_PROP_REFRESERVATION:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"size is greater than available space"));
(void) zfs_error(hdl, EZFS_PROPSPACE, errbuf);
break;
default:
(void) zfs_standard_error(hdl, err, errbuf);
break;
}
break;
case EBUSY:
(void) zfs_standard_error(hdl, EBUSY, errbuf);
break;
case EROFS:
(void) zfs_error(hdl, EZFS_DSREADONLY, errbuf);
break;
case E2BIG:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property value too long"));
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
break;
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool and or dataset must be upgraded to set this "
"property or value"));
(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
break;
case ERANGE:
if (prop == ZFS_PROP_COMPRESSION ||
prop == ZFS_PROP_DNODESIZE ||
prop == ZFS_PROP_RECORDSIZE) {
(void) zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property setting is not allowed on "
"bootable datasets"));
(void) zfs_error(hdl, EZFS_NOTSUP, errbuf);
} else if (prop == ZFS_PROP_CHECKSUM ||
prop == ZFS_PROP_DEDUP) {
(void) zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property setting is not allowed on "
"root pools"));
(void) zfs_error(hdl, EZFS_NOTSUP, errbuf);
} else {
(void) zfs_standard_error(hdl, err, errbuf);
}
break;
case EINVAL:
if (prop == ZPROP_INVAL) {
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
} else {
(void) zfs_standard_error(hdl, err, errbuf);
}
break;
case ZFS_ERR_BADPROP:
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
break;
case EACCES:
if (prop == ZFS_PROP_KEYLOCATION) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"keylocation may only be set on encryption roots"));
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
} else {
(void) zfs_standard_error(hdl, err, errbuf);
}
break;
case EOVERFLOW:
/*
* This platform can't address a volume this big.
*/
#ifdef _ILP32
if (prop == ZFS_PROP_VOLSIZE) {
(void) zfs_error(hdl, EZFS_VOLTOOBIG, errbuf);
break;
}
#endif
/* FALLTHROUGH */
default:
(void) zfs_standard_error(hdl, err, errbuf);
}
}
int
zpool_standard_error(libzfs_handle_t *hdl, int error, const char *msg)
{
return (zpool_standard_error_fmt(hdl, error, "%s", msg));
}
-/*PRINTFLIKE3*/
int
zpool_standard_error_fmt(libzfs_handle_t *hdl, int error, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
if (zfs_common_error(hdl, error, fmt, ap) != 0) {
va_end(ap);
return (-1);
}
switch (error) {
case ENODEV:
zfs_verror(hdl, EZFS_NODEVICE, fmt, ap);
break;
case ENOENT:
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "no such pool or dataset"));
zfs_verror(hdl, EZFS_NOENT, fmt, ap);
break;
case EEXIST:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool already exists"));
zfs_verror(hdl, EZFS_EXISTS, fmt, ap);
break;
case EBUSY:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool is busy"));
zfs_verror(hdl, EZFS_BUSY, fmt, ap);
break;
/* There is no pending operation to cancel */
case ENOTACTIVE:
zfs_verror(hdl, EZFS_NO_PENDING, fmt, ap);
break;
case ENXIO:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more devices is currently unavailable"));
zfs_verror(hdl, EZFS_BADDEV, fmt, ap);
break;
case ENAMETOOLONG:
zfs_verror(hdl, EZFS_DEVOVERFLOW, fmt, ap);
break;
case ENOTSUP:
zfs_verror(hdl, EZFS_POOL_NOTSUP, fmt, ap);
break;
case EINVAL:
zfs_verror(hdl, EZFS_POOL_INVALARG, fmt, ap);
break;
case ENOSPC:
case EDQUOT:
zfs_verror(hdl, EZFS_NOSPC, fmt, ap);
return (-1);
case EAGAIN:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool I/O is currently suspended"));
zfs_verror(hdl, EZFS_POOLUNAVAIL, fmt, ap);
break;
case EROFS:
zfs_verror(hdl, EZFS_POOLREADONLY, fmt, ap);
break;
case EDOM:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"block size out of range or does not match"));
zfs_verror(hdl, EZFS_BADPROP, fmt, ap);
break;
case EREMOTEIO:
zfs_verror(hdl, EZFS_ACTIVE_POOL, fmt, ap);
break;
case ZFS_ERR_CHECKPOINT_EXISTS:
zfs_verror(hdl, EZFS_CHECKPOINT_EXISTS, fmt, ap);
break;
case ZFS_ERR_DISCARDING_CHECKPOINT:
zfs_verror(hdl, EZFS_DISCARDING_CHECKPOINT, fmt, ap);
break;
case ZFS_ERR_NO_CHECKPOINT:
zfs_verror(hdl, EZFS_NO_CHECKPOINT, fmt, ap);
break;
case ZFS_ERR_DEVRM_IN_PROGRESS:
zfs_verror(hdl, EZFS_DEVRM_IN_PROGRESS, fmt, ap);
break;
case ZFS_ERR_VDEV_TOO_BIG:
zfs_verror(hdl, EZFS_VDEV_TOO_BIG, fmt, ap);
break;
case ZFS_ERR_EXPORT_IN_PROGRESS:
zfs_verror(hdl, EZFS_EXPORT_IN_PROGRESS, fmt, ap);
break;
case ZFS_ERR_RESILVER_IN_PROGRESS:
zfs_verror(hdl, EZFS_RESILVERING, fmt, ap);
break;
case ZFS_ERR_REBUILD_IN_PROGRESS:
zfs_verror(hdl, EZFS_REBUILDING, fmt, ap);
break;
case ZFS_ERR_BADPROP:
zfs_verror(hdl, EZFS_BADPROP, fmt, ap);
break;
case ZFS_ERR_IOC_CMD_UNAVAIL:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "the loaded zfs "
"module does not support this operation. A reboot may "
"be required to enable this operation."));
zfs_verror(hdl, EZFS_IOC_NOTSUPPORTED, fmt, ap);
break;
case ZFS_ERR_IOC_ARG_UNAVAIL:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "the loaded zfs "
"module does not support an option for this operation. "
"A reboot may be required to enable this option."));
zfs_verror(hdl, EZFS_IOC_NOTSUPPORTED, fmt, ap);
break;
case ZFS_ERR_IOC_ARG_REQUIRED:
case ZFS_ERR_IOC_ARG_BADTYPE:
zfs_verror(hdl, EZFS_IOC_NOTSUPPORTED, fmt, ap);
break;
default:
zfs_error_aux(hdl, "%s", strerror(error));
zfs_verror(hdl, EZFS_UNKNOWN, fmt, ap);
}
va_end(ap);
return (-1);
}
/*
* Display an out of memory error message and abort the current program.
*/
int
no_memory(libzfs_handle_t *hdl)
{
return (zfs_error(hdl, EZFS_NOMEM, "internal error"));
}
/*
* A safe form of malloc() which will die if the allocation fails.
*/
void *
zfs_alloc(libzfs_handle_t *hdl, size_t size)
{
void *data;
if ((data = calloc(1, size)) == NULL)
(void) no_memory(hdl);
return (data);
}
/*
* A safe form of asprintf() which will die if the allocation fails.
*/
-/*PRINTFLIKE2*/
char *
zfs_asprintf(libzfs_handle_t *hdl, const char *fmt, ...)
{
va_list ap;
char *ret;
int err;
va_start(ap, fmt);
err = vasprintf(&ret, fmt, ap);
va_end(ap);
if (err < 0) {
(void) no_memory(hdl);
ret = NULL;
}
return (ret);
}
/*
* A safe form of realloc(), which also zeroes newly allocated space.
*/
void *
zfs_realloc(libzfs_handle_t *hdl, void *ptr, size_t oldsize, size_t newsize)
{
void *ret;
if ((ret = realloc(ptr, newsize)) == NULL) {
(void) no_memory(hdl);
return (NULL);
}
bzero((char *)ret + oldsize, (newsize - oldsize));
return (ret);
}
/*
* A safe form of strdup() which will die if the allocation fails.
*/
char *
zfs_strdup(libzfs_handle_t *hdl, const char *str)
{
char *ret;
if ((ret = strdup(str)) == NULL)
(void) no_memory(hdl);
return (ret);
}
void
libzfs_print_on_error(libzfs_handle_t *hdl, boolean_t printerr)
{
hdl->libzfs_printerr = printerr;
}
/*
* Read lines from an open file descriptor and store them in an array of
* strings until EOF. lines[] will be allocated and populated with all the
* lines read. All newlines are replaced with NULL terminators for
* convenience. lines[] must be freed after use with libzfs_free_str_array().
*
* Returns the number of lines read.
*/
static int
libzfs_read_stdout_from_fd(int fd, char **lines[])
{
FILE *fp;
int lines_cnt = 0;
size_t len = 0;
char *line = NULL;
char **tmp_lines = NULL, **tmp;
fp = fdopen(fd, "r");
if (fp == NULL) {
close(fd);
return (0);
}
while (getline(&line, &len, fp) != -1) {
tmp = realloc(tmp_lines, sizeof (*tmp_lines) * (lines_cnt + 1));
if (tmp == NULL) {
/* Return the lines we were able to process */
break;
}
tmp_lines = tmp;
/* Remove newline if not EOF */
if (line[strlen(line) - 1] == '\n')
line[strlen(line) - 1] = '\0';
tmp_lines[lines_cnt] = strdup(line);
if (tmp_lines[lines_cnt] == NULL)
break;
++lines_cnt;
}
free(line);
fclose(fp);
*lines = tmp_lines;
return (lines_cnt);
}
static int
libzfs_run_process_impl(const char *path, char *argv[], char *env[], int flags,
char **lines[], int *lines_cnt)
{
pid_t pid;
int error, devnull_fd;
int link[2];
/*
* Setup a pipe between our child and parent process if we're
* reading stdout.
*/
if (lines != NULL && pipe2(link, O_NONBLOCK | O_CLOEXEC) == -1)
return (-EPIPE);
pid = fork();
if (pid == 0) {
/* Child process */
devnull_fd = open("/dev/null", O_WRONLY | O_CLOEXEC);
if (devnull_fd < 0)
_exit(-1);
if (!(flags & STDOUT_VERBOSE) && (lines == NULL))
(void) dup2(devnull_fd, STDOUT_FILENO);
else if (lines != NULL) {
/* Save the output to lines[] */
dup2(link[1], STDOUT_FILENO);
}
if (!(flags & STDERR_VERBOSE))
(void) dup2(devnull_fd, STDERR_FILENO);
if (flags & NO_DEFAULT_PATH) {
if (env == NULL)
execv(path, argv);
else
execve(path, argv, env);
} else {
if (env == NULL)
execvp(path, argv);
else
execvpe(path, argv, env);
}
_exit(-1);
} else if (pid > 0) {
/* Parent process */
int status;
while ((error = waitpid(pid, &status, 0)) == -1 &&
errno == EINTR)
;
if (error < 0 || !WIFEXITED(status))
return (-1);
if (lines != NULL) {
close(link[1]);
*lines_cnt = libzfs_read_stdout_from_fd(link[0], lines);
}
return (WEXITSTATUS(status));
}
return (-1);
}
int
libzfs_run_process(const char *path, char *argv[], int flags)
{
return (libzfs_run_process_impl(path, argv, NULL, flags, NULL, NULL));
}
/*
* Run a command and store its stdout lines in an array of strings (lines[]).
* lines[] is allocated and populated for you, and the number of lines is set in
* lines_cnt. lines[] must be freed after use with libzfs_free_str_array().
* All newlines (\n) in lines[] are terminated for convenience.
*/
int
libzfs_run_process_get_stdout(const char *path, char *argv[], char *env[],
char **lines[], int *lines_cnt)
{
return (libzfs_run_process_impl(path, argv, env, 0, lines, lines_cnt));
}
/*
* Same as libzfs_run_process_get_stdout(), but run without $PATH set. This
* means that *path needs to be the full path to the executable.
*/
int
libzfs_run_process_get_stdout_nopath(const char *path, char *argv[],
char *env[], char **lines[], int *lines_cnt)
{
return (libzfs_run_process_impl(path, argv, env, NO_DEFAULT_PATH,
lines, lines_cnt));
}
/*
* Free an array of strings. Free both the strings contained in the array and
* the array itself.
*/
void
libzfs_free_str_array(char **strs, int count)
{
while (--count >= 0)
free(strs[count]);
free(strs);
}
/*
* Returns 1 if environment variable is set to "YES", "yes", "ON", "on", or
* a non-zero number.
*
* Returns 0 otherwise.
*/
int
libzfs_envvar_is_set(char *envvar)
{
char *env = getenv(envvar);
if (env && (strtoul(env, NULL, 0) > 0 ||
(!strncasecmp(env, "YES", 3) && strnlen(env, 4) == 3) ||
(!strncasecmp(env, "ON", 2) && strnlen(env, 3) == 2)))
return (1);
return (0);
}
libzfs_handle_t *
libzfs_init(void)
{
libzfs_handle_t *hdl;
int error;
char *env;
if ((error = libzfs_load_module()) != 0) {
errno = error;
return (NULL);
}
if ((hdl = calloc(1, sizeof (libzfs_handle_t))) == NULL) {
return (NULL);
}
if (regcomp(&hdl->libzfs_urire, URI_REGEX, 0) != 0) {
free(hdl);
return (NULL);
}
if ((hdl->libzfs_fd = open(ZFS_DEV, O_RDWR|O_EXCL|O_CLOEXEC)) < 0) {
free(hdl);
return (NULL);
}
if (libzfs_core_init() != 0) {
(void) close(hdl->libzfs_fd);
free(hdl);
return (NULL);
}
zfs_prop_init();
zpool_prop_init();
zpool_feature_init();
libzfs_mnttab_init(hdl);
fletcher_4_init();
if (getenv("ZFS_PROP_DEBUG") != NULL) {
hdl->libzfs_prop_debug = B_TRUE;
}
if ((env = getenv("ZFS_SENDRECV_MAX_NVLIST")) != NULL) {
if ((error = zfs_nicestrtonum(hdl, env,
&hdl->libzfs_max_nvlist))) {
errno = error;
(void) close(hdl->libzfs_fd);
free(hdl);
return (NULL);
}
} else {
hdl->libzfs_max_nvlist = (SPA_MAXBLOCKSIZE * 4);
}
/*
* For testing, remove some settable properties and features
*/
if (libzfs_envvar_is_set("ZFS_SYSFS_PROP_SUPPORT_TEST")) {
zprop_desc_t *proptbl;
proptbl = zpool_prop_get_table();
proptbl[ZPOOL_PROP_COMMENT].pd_zfs_mod_supported = B_FALSE;
proptbl = zfs_prop_get_table();
proptbl[ZFS_PROP_DNODESIZE].pd_zfs_mod_supported = B_FALSE;
zfeature_info_t *ftbl = spa_feature_table;
ftbl[SPA_FEATURE_LARGE_BLOCKS].fi_zfs_mod_supported = B_FALSE;
}
return (hdl);
}
void
libzfs_fini(libzfs_handle_t *hdl)
{
(void) close(hdl->libzfs_fd);
zpool_free_handles(hdl);
namespace_clear(hdl);
libzfs_mnttab_fini(hdl);
libzfs_core_fini();
regfree(&hdl->libzfs_urire);
fletcher_4_fini();
#if LIBFETCH_DYNAMIC
if (hdl->libfetch != (void *)-1 && hdl->libfetch != NULL)
(void) dlclose(hdl->libfetch);
free(hdl->libfetch_load_error);
#endif
free(hdl);
}
libzfs_handle_t *
zpool_get_handle(zpool_handle_t *zhp)
{
return (zhp->zpool_hdl);
}
libzfs_handle_t *
zfs_get_handle(zfs_handle_t *zhp)
{
return (zhp->zfs_hdl);
}
zpool_handle_t *
zfs_get_pool_handle(const zfs_handle_t *zhp)
{
return (zhp->zpool_hdl);
}
/*
* Given a name, determine whether or not it's a valid path
* (starts with '/' or "./"). If so, walk the mnttab trying
* to match the device number. If not, treat the path as an
* fs/vol/snap/bkmark name.
*/
zfs_handle_t *
zfs_path_to_zhandle(libzfs_handle_t *hdl, const char *path, zfs_type_t argtype)
{
struct stat64 statbuf;
struct extmnttab entry;
if (path[0] != '/' && strncmp(path, "./", strlen("./")) != 0) {
/*
* It's not a valid path, assume it's a name of type 'argtype'.
*/
return (zfs_open(hdl, path, argtype));
}
if (getextmntent(path, &entry, &statbuf) != 0)
return (NULL);
if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0) {
(void) fprintf(stderr, gettext("'%s': not a ZFS filesystem\n"),
path);
return (NULL);
}
return (zfs_open(hdl, entry.mnt_special, ZFS_TYPE_FILESYSTEM));
}
/*
* Initialize the zc_nvlist_dst member to prepare for receiving an nvlist from
* an ioctl().
*/
int
zcmd_alloc_dst_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, size_t len)
{
if (len == 0)
len = 256 * 1024;
zc->zc_nvlist_dst_size = len;
zc->zc_nvlist_dst =
(uint64_t)(uintptr_t)zfs_alloc(hdl, zc->zc_nvlist_dst_size);
if (zc->zc_nvlist_dst == 0)
return (-1);
return (0);
}
/*
* Called when an ioctl() which returns an nvlist fails with ENOMEM. This will
* expand the nvlist to the size specified in 'zc_nvlist_dst_size', which was
* filled in by the kernel to indicate the actual required size.
*/
int
zcmd_expand_dst_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc)
{
free((void *)(uintptr_t)zc->zc_nvlist_dst);
zc->zc_nvlist_dst =
(uint64_t)(uintptr_t)zfs_alloc(hdl, zc->zc_nvlist_dst_size);
if (zc->zc_nvlist_dst == 0)
return (-1);
return (0);
}
/*
* Called to free the src and dst nvlists stored in the command structure.
*/
void
zcmd_free_nvlists(zfs_cmd_t *zc)
{
free((void *)(uintptr_t)zc->zc_nvlist_conf);
free((void *)(uintptr_t)zc->zc_nvlist_src);
free((void *)(uintptr_t)zc->zc_nvlist_dst);
zc->zc_nvlist_conf = 0;
zc->zc_nvlist_src = 0;
zc->zc_nvlist_dst = 0;
}
static int
zcmd_write_nvlist_com(libzfs_handle_t *hdl, uint64_t *outnv, uint64_t *outlen,
nvlist_t *nvl)
{
char *packed;
size_t len;
verify(nvlist_size(nvl, &len, NV_ENCODE_NATIVE) == 0);
if ((packed = zfs_alloc(hdl, len)) == NULL)
return (-1);
verify(nvlist_pack(nvl, &packed, &len, NV_ENCODE_NATIVE, 0) == 0);
*outnv = (uint64_t)(uintptr_t)packed;
*outlen = len;
return (0);
}
int
zcmd_write_conf_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, nvlist_t *nvl)
{
return (zcmd_write_nvlist_com(hdl, &zc->zc_nvlist_conf,
&zc->zc_nvlist_conf_size, nvl));
}
int
zcmd_write_src_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, nvlist_t *nvl)
{
return (zcmd_write_nvlist_com(hdl, &zc->zc_nvlist_src,
&zc->zc_nvlist_src_size, nvl));
}
/*
* Unpacks an nvlist from the ZFS ioctl command structure.
*/
int
zcmd_read_dst_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, nvlist_t **nvlp)
{
if (nvlist_unpack((void *)(uintptr_t)zc->zc_nvlist_dst,
zc->zc_nvlist_dst_size, nvlp, 0) != 0)
return (no_memory(hdl));
return (0);
}
/*
* ================================================================
* API shared by zfs and zpool property management
* ================================================================
*/
static void
zprop_print_headers(zprop_get_cbdata_t *cbp, zfs_type_t type)
{
zprop_list_t *pl = cbp->cb_proplist;
int i;
char *title;
size_t len;
cbp->cb_first = B_FALSE;
if (cbp->cb_scripted)
return;
/*
* Start with the length of the column headers.
*/
cbp->cb_colwidths[GET_COL_NAME] = strlen(dgettext(TEXT_DOMAIN, "NAME"));
cbp->cb_colwidths[GET_COL_PROPERTY] = strlen(dgettext(TEXT_DOMAIN,
"PROPERTY"));
cbp->cb_colwidths[GET_COL_VALUE] = strlen(dgettext(TEXT_DOMAIN,
"VALUE"));
cbp->cb_colwidths[GET_COL_RECVD] = strlen(dgettext(TEXT_DOMAIN,
"RECEIVED"));
cbp->cb_colwidths[GET_COL_SOURCE] = strlen(dgettext(TEXT_DOMAIN,
"SOURCE"));
/* first property is always NAME */
assert(cbp->cb_proplist->pl_prop ==
((type == ZFS_TYPE_POOL) ? ZPOOL_PROP_NAME : ZFS_PROP_NAME));
/*
* Go through and calculate the widths for each column. For the
* 'source' column, we kludge it up by taking the worst-case scenario of
* inheriting from the longest name. This is acceptable because in the
* majority of cases 'SOURCE' is the last column displayed, and we don't
* use the width anyway. Note that the 'VALUE' column can be oversized,
* if the name of the property is much longer than any values we find.
*/
for (pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) {
/*
* 'PROPERTY' column
*/
if (pl->pl_prop != ZPROP_INVAL) {
const char *propname = (type == ZFS_TYPE_POOL) ?
zpool_prop_to_name(pl->pl_prop) :
zfs_prop_to_name(pl->pl_prop);
len = strlen(propname);
if (len > cbp->cb_colwidths[GET_COL_PROPERTY])
cbp->cb_colwidths[GET_COL_PROPERTY] = len;
} else {
len = strlen(pl->pl_user_prop);
if (len > cbp->cb_colwidths[GET_COL_PROPERTY])
cbp->cb_colwidths[GET_COL_PROPERTY] = len;
}
/*
* 'VALUE' column. The first property is always the 'name'
* property that was tacked on either by /sbin/zfs's
* zfs_do_get() or when calling zprop_expand_list(), so we
* ignore its width. If the user specified the name property
* to display, then it will be later in the list in any case.
*/
if (pl != cbp->cb_proplist &&
pl->pl_width > cbp->cb_colwidths[GET_COL_VALUE])
cbp->cb_colwidths[GET_COL_VALUE] = pl->pl_width;
/* 'RECEIVED' column. */
if (pl != cbp->cb_proplist &&
pl->pl_recvd_width > cbp->cb_colwidths[GET_COL_RECVD])
cbp->cb_colwidths[GET_COL_RECVD] = pl->pl_recvd_width;
/*
* 'NAME' and 'SOURCE' columns
*/
if (pl->pl_prop == (type == ZFS_TYPE_POOL ? ZPOOL_PROP_NAME :
ZFS_PROP_NAME) &&
pl->pl_width > cbp->cb_colwidths[GET_COL_NAME]) {
cbp->cb_colwidths[GET_COL_NAME] = pl->pl_width;
cbp->cb_colwidths[GET_COL_SOURCE] = pl->pl_width +
strlen(dgettext(TEXT_DOMAIN, "inherited from"));
}
}
/*
* Now go through and print the headers.
*/
for (i = 0; i < ZFS_GET_NCOLS; i++) {
switch (cbp->cb_columns[i]) {
case GET_COL_NAME:
title = dgettext(TEXT_DOMAIN, "NAME");
break;
case GET_COL_PROPERTY:
title = dgettext(TEXT_DOMAIN, "PROPERTY");
break;
case GET_COL_VALUE:
title = dgettext(TEXT_DOMAIN, "VALUE");
break;
case GET_COL_RECVD:
title = dgettext(TEXT_DOMAIN, "RECEIVED");
break;
case GET_COL_SOURCE:
title = dgettext(TEXT_DOMAIN, "SOURCE");
break;
default:
title = NULL;
}
if (title != NULL) {
if (i == (ZFS_GET_NCOLS - 1) ||
cbp->cb_columns[i + 1] == GET_COL_NONE)
(void) printf("%s", title);
else
(void) printf("%-*s ",
cbp->cb_colwidths[cbp->cb_columns[i]],
title);
}
}
(void) printf("\n");
}
/*
* Display a single line of output, according to the settings in the callback
* structure.
*/
void
zprop_print_one_property(const char *name, zprop_get_cbdata_t *cbp,
const char *propname, const char *value, zprop_source_t sourcetype,
const char *source, const char *recvd_value)
{
int i;
const char *str = NULL;
char buf[128];
/*
* Ignore those source types that the user has chosen to ignore.
*/
if ((sourcetype & cbp->cb_sources) == 0)
return;
if (cbp->cb_first)
zprop_print_headers(cbp, cbp->cb_type);
for (i = 0; i < ZFS_GET_NCOLS; i++) {
switch (cbp->cb_columns[i]) {
case GET_COL_NAME:
str = name;
break;
case GET_COL_PROPERTY:
str = propname;
break;
case GET_COL_VALUE:
str = value;
break;
case GET_COL_SOURCE:
switch (sourcetype) {
case ZPROP_SRC_NONE:
str = "-";
break;
case ZPROP_SRC_DEFAULT:
str = "default";
break;
case ZPROP_SRC_LOCAL:
str = "local";
break;
case ZPROP_SRC_TEMPORARY:
str = "temporary";
break;
case ZPROP_SRC_INHERITED:
(void) snprintf(buf, sizeof (buf),
"inherited from %s", source);
str = buf;
break;
case ZPROP_SRC_RECEIVED:
str = "received";
break;
default:
str = NULL;
assert(!"unhandled zprop_source_t");
}
break;
case GET_COL_RECVD:
str = (recvd_value == NULL ? "-" : recvd_value);
break;
default:
continue;
}
if (i == (ZFS_GET_NCOLS - 1) ||
cbp->cb_columns[i + 1] == GET_COL_NONE)
(void) printf("%s", str);
else if (cbp->cb_scripted)
(void) printf("%s\t", str);
else
(void) printf("%-*s ",
cbp->cb_colwidths[cbp->cb_columns[i]],
str);
}
(void) printf("\n");
}
/*
* Given a numeric suffix, convert the value into a number of bits that the
* resulting value must be shifted.
*/
static int
str2shift(libzfs_handle_t *hdl, const char *buf)
{
const char *ends = "BKMGTPEZ";
int i;
if (buf[0] == '\0')
return (0);
for (i = 0; i < strlen(ends); i++) {
if (toupper(buf[0]) == ends[i])
break;
}
if (i == strlen(ends)) {
if (hdl)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid numeric suffix '%s'"), buf);
return (-1);
}
/*
* Allow 'G' = 'GB' = 'GiB', case-insensitively.
* However, 'BB' and 'BiB' are disallowed.
*/
if (buf[1] == '\0' ||
(toupper(buf[0]) != 'B' &&
((toupper(buf[1]) == 'B' && buf[2] == '\0') ||
(toupper(buf[1]) == 'I' && toupper(buf[2]) == 'B' &&
buf[3] == '\0'))))
return (10 * i);
if (hdl)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid numeric suffix '%s'"), buf);
return (-1);
}
/*
* Convert a string of the form '100G' into a real number. Used when setting
* properties or creating a volume. 'buf' is used to place an extended error
* message for the caller to use.
*/
int
zfs_nicestrtonum(libzfs_handle_t *hdl, const char *value, uint64_t *num)
{
char *end;
int shift;
*num = 0;
/* Check to see if this looks like a number. */
if ((value[0] < '0' || value[0] > '9') && value[0] != '.') {
if (hdl)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"bad numeric value '%s'"), value);
return (-1);
}
/* Rely on strtoull() to process the numeric portion. */
errno = 0;
*num = strtoull(value, &end, 10);
/*
* Check for ERANGE, which indicates that the value is too large to fit
* in a 64-bit value.
*/
if (errno == ERANGE) {
if (hdl)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"numeric value is too large"));
return (-1);
}
/*
* If we have a decimal value, then do the computation with floating
* point arithmetic. Otherwise, use standard arithmetic.
*/
if (*end == '.') {
double fval = strtod(value, &end);
if ((shift = str2shift(hdl, end)) == -1)
return (-1);
fval *= pow(2, shift);
/*
* UINT64_MAX is not exactly representable as a double.
* The closest representation is UINT64_MAX + 1, so we
* use a >= comparison instead of > for the bounds check.
*/
if (fval >= (double)UINT64_MAX) {
if (hdl)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"numeric value is too large"));
return (-1);
}
*num = (uint64_t)fval;
} else {
if ((shift = str2shift(hdl, end)) == -1)
return (-1);
/* Check for overflow */
if (shift >= 64 || (*num << shift) >> shift != *num) {
if (hdl)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"numeric value is too large"));
return (-1);
}
*num <<= shift;
}
return (0);
}
/*
* Given a propname=value nvpair to set, parse any numeric properties
* (index, boolean, etc) if they are specified as strings and add the
* resulting nvpair to the returned nvlist.
*
* At the DSL layer, all properties are either 64-bit numbers or strings.
* We want the user to be able to ignore this fact and specify properties
* as native values (numbers, for example) or as strings (to simplify
* command line utilities). This also handles converting index types
* (compression, checksum, etc) from strings to their on-disk index.
*/
int
zprop_parse_value(libzfs_handle_t *hdl, nvpair_t *elem, int prop,
zfs_type_t type, nvlist_t *ret, char **svalp, uint64_t *ivalp,
const char *errbuf)
{
data_type_t datatype = nvpair_type(elem);
zprop_type_t proptype;
const char *propname;
char *value;
boolean_t isnone = B_FALSE;
boolean_t isauto = B_FALSE;
int err = 0;
if (type == ZFS_TYPE_POOL) {
proptype = zpool_prop_get_type(prop);
propname = zpool_prop_to_name(prop);
} else {
proptype = zfs_prop_get_type(prop);
propname = zfs_prop_to_name(prop);
}
/*
* Convert any properties to the internal DSL value types.
*/
*svalp = NULL;
*ivalp = 0;
switch (proptype) {
case PROP_TYPE_STRING:
if (datatype != DATA_TYPE_STRING) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be a string"), nvpair_name(elem));
goto error;
}
err = nvpair_value_string(elem, svalp);
if (err != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' is invalid"), nvpair_name(elem));
goto error;
}
if (strlen(*svalp) >= ZFS_MAXPROPLEN) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' is too long"), nvpair_name(elem));
goto error;
}
break;
case PROP_TYPE_NUMBER:
if (datatype == DATA_TYPE_STRING) {
(void) nvpair_value_string(elem, &value);
if (strcmp(value, "none") == 0) {
isnone = B_TRUE;
} else if (strcmp(value, "auto") == 0) {
isauto = B_TRUE;
} else if (zfs_nicestrtonum(hdl, value, ivalp) != 0) {
goto error;
}
} else if (datatype == DATA_TYPE_UINT64) {
(void) nvpair_value_uint64(elem, ivalp);
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be a number"), nvpair_name(elem));
goto error;
}
/*
* Quota special: force 'none' and don't allow 0.
*/
if ((type & ZFS_TYPE_DATASET) && *ivalp == 0 && !isnone &&
(prop == ZFS_PROP_QUOTA || prop == ZFS_PROP_REFQUOTA)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"use 'none' to disable quota/refquota"));
goto error;
}
/*
* Special handling for "*_limit=none". In this case it's not
* 0 but UINT64_MAX.
*/
if ((type & ZFS_TYPE_DATASET) && isnone &&
(prop == ZFS_PROP_FILESYSTEM_LIMIT ||
prop == ZFS_PROP_SNAPSHOT_LIMIT)) {
*ivalp = UINT64_MAX;
}
/*
* Special handling for setting 'refreservation' to 'auto'. Use
* UINT64_MAX to tell the caller to use zfs_fix_auto_resv().
* 'auto' is only allowed on volumes.
*/
if (isauto) {
switch (prop) {
case ZFS_PROP_REFRESERVATION:
if ((type & ZFS_TYPE_VOLUME) == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s=auto' only allowed on "
"volumes"), nvpair_name(elem));
goto error;
}
*ivalp = UINT64_MAX;
break;
default:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'auto' is invalid value for '%s'"),
nvpair_name(elem));
goto error;
}
}
break;
case PROP_TYPE_INDEX:
if (datatype != DATA_TYPE_STRING) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be a string"), nvpair_name(elem));
goto error;
}
(void) nvpair_value_string(elem, &value);
if (zprop_string_to_index(prop, value, ivalp, type) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be one of '%s'"), propname,
zprop_values(prop, type));
goto error;
}
break;
default:
abort();
}
/*
* Add the result to our return set of properties.
*/
if (*svalp != NULL) {
if (nvlist_add_string(ret, propname, *svalp) != 0) {
(void) no_memory(hdl);
return (-1);
}
} else {
if (nvlist_add_uint64(ret, propname, *ivalp) != 0) {
(void) no_memory(hdl);
return (-1);
}
}
return (0);
error:
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
return (-1);
}
static int
addlist(libzfs_handle_t *hdl, char *propname, zprop_list_t **listp,
zfs_type_t type)
{
int prop;
zprop_list_t *entry;
prop = zprop_name_to_prop(propname, type);
if (prop != ZPROP_INVAL && !zprop_valid_for_type(prop, type, B_FALSE))
prop = ZPROP_INVAL;
/*
* When no property table entry can be found, return failure if
* this is a pool property or if this isn't a user-defined
* dataset property,
*/
if (prop == ZPROP_INVAL && ((type == ZFS_TYPE_POOL &&
!zpool_prop_feature(propname) &&
!zpool_prop_unsupported(propname)) ||
(type == ZFS_TYPE_DATASET && !zfs_prop_user(propname) &&
!zfs_prop_userquota(propname) && !zfs_prop_written(propname)))) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid property '%s'"), propname);
return (zfs_error(hdl, EZFS_BADPROP,
dgettext(TEXT_DOMAIN, "bad property list")));
}
if ((entry = zfs_alloc(hdl, sizeof (zprop_list_t))) == NULL)
return (-1);
entry->pl_prop = prop;
if (prop == ZPROP_INVAL) {
if ((entry->pl_user_prop = zfs_strdup(hdl, propname)) ==
NULL) {
free(entry);
return (-1);
}
entry->pl_width = strlen(propname);
} else {
entry->pl_width = zprop_width(prop, &entry->pl_fixed,
type);
}
*listp = entry;
return (0);
}
/*
* Given a comma-separated list of properties, construct a property list
* containing both user-defined and native properties. This function will
* return a NULL list if 'all' is specified, which can later be expanded
* by zprop_expand_list().
*/
int
zprop_get_list(libzfs_handle_t *hdl, char *props, zprop_list_t **listp,
zfs_type_t type)
{
*listp = NULL;
/*
* If 'all' is specified, return a NULL list.
*/
if (strcmp(props, "all") == 0)
return (0);
/*
* If no props were specified, return an error.
*/
if (props[0] == '\0') {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"no properties specified"));
return (zfs_error(hdl, EZFS_BADPROP, dgettext(TEXT_DOMAIN,
"bad property list")));
}
/*
* It would be nice to use getsubopt() here, but the inclusion of column
* aliases makes this more effort than it's worth.
*/
while (*props != '\0') {
size_t len;
char *p;
char c;
if ((p = strchr(props, ',')) == NULL) {
len = strlen(props);
p = props + len;
} else {
len = p - props;
}
/*
* Check for empty options.
*/
if (len == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"empty property name"));
return (zfs_error(hdl, EZFS_BADPROP,
dgettext(TEXT_DOMAIN, "bad property list")));
}
/*
* Check all regular property names.
*/
c = props[len];
props[len] = '\0';
if (strcmp(props, "space") == 0) {
static char *spaceprops[] = {
"name", "avail", "used", "usedbysnapshots",
"usedbydataset", "usedbyrefreservation",
"usedbychildren", NULL
};
int i;
for (i = 0; spaceprops[i]; i++) {
if (addlist(hdl, spaceprops[i], listp, type))
return (-1);
listp = &(*listp)->pl_next;
}
} else {
if (addlist(hdl, props, listp, type))
return (-1);
listp = &(*listp)->pl_next;
}
props = p;
if (c == ',')
props++;
}
return (0);
}
void
zprop_free_list(zprop_list_t *pl)
{
zprop_list_t *next;
while (pl != NULL) {
next = pl->pl_next;
free(pl->pl_user_prop);
free(pl);
pl = next;
}
}
typedef struct expand_data {
zprop_list_t **last;
libzfs_handle_t *hdl;
zfs_type_t type;
} expand_data_t;
static int
zprop_expand_list_cb(int prop, void *cb)
{
zprop_list_t *entry;
expand_data_t *edp = cb;
if ((entry = zfs_alloc(edp->hdl, sizeof (zprop_list_t))) == NULL)
return (ZPROP_INVAL);
entry->pl_prop = prop;
entry->pl_width = zprop_width(prop, &entry->pl_fixed, edp->type);
entry->pl_all = B_TRUE;
*(edp->last) = entry;
edp->last = &entry->pl_next;
return (ZPROP_CONT);
}
int
zprop_expand_list(libzfs_handle_t *hdl, zprop_list_t **plp, zfs_type_t type)
{
zprop_list_t *entry;
zprop_list_t **last;
expand_data_t exp;
if (*plp == NULL) {
/*
* If this is the very first time we've been called for an 'all'
* specification, expand the list to include all native
* properties.
*/
last = plp;
exp.last = last;
exp.hdl = hdl;
exp.type = type;
if (zprop_iter_common(zprop_expand_list_cb, &exp, B_FALSE,
B_FALSE, type) == ZPROP_INVAL)
return (-1);
/*
* Add 'name' to the beginning of the list, which is handled
* specially.
*/
if ((entry = zfs_alloc(hdl, sizeof (zprop_list_t))) == NULL)
return (-1);
entry->pl_prop = (type == ZFS_TYPE_POOL) ? ZPOOL_PROP_NAME :
ZFS_PROP_NAME;
entry->pl_width = zprop_width(entry->pl_prop,
&entry->pl_fixed, type);
entry->pl_all = B_TRUE;
entry->pl_next = *plp;
*plp = entry;
}
return (0);
}
int
zprop_iter(zprop_func func, void *cb, boolean_t show_all, boolean_t ordered,
zfs_type_t type)
{
return (zprop_iter_common(func, cb, show_all, ordered, type));
}
/*
* Fill given version buffer with zfs userland version
*/
void
zfs_version_userland(char *version, int len)
{
(void) strlcpy(version, ZFS_META_ALIAS, len);
}
/*
* Prints both zfs userland and kernel versions
* Returns 0 on success, and -1 on error (with errno set)
*/
int
zfs_version_print(void)
{
char zver_userland[128];
char zver_kernel[128];
zfs_version_userland(zver_userland, sizeof (zver_userland));
(void) printf("%s\n", zver_userland);
if (zfs_version_kernel(zver_kernel, sizeof (zver_kernel)) == -1) {
fprintf(stderr, "zfs_version_kernel() failed: %s\n",
strerror(errno));
return (-1);
}
(void) printf("zfs-kmod-%s\n", zver_kernel);
return (0);
}
/*
* Return 1 if the user requested ANSI color output, and our terminal supports
* it. Return 0 for no color.
*/
static int
use_color(void)
{
static int use_color = -1;
char *term;
/*
* Optimization:
*
* For each zpool invocation, we do a single check to see if we should
* be using color or not, and cache that value for the lifetime of the
* the zpool command. That makes it cheap to call use_color() when
* we're printing with color. We assume that the settings are not going
* to change during the invocation of a zpool command (the user isn't
* going to change the ZFS_COLOR value while zpool is running, for
* example).
*/
if (use_color != -1) {
/*
* We've already figured out if we should be using color or
* not. Return the cached value.
*/
return (use_color);
}
term = getenv("TERM");
/*
* The user sets the ZFS_COLOR env var set to enable zpool ANSI color
* output. However if NO_COLOR is set (https://no-color.org/) then
* don't use it. Also, don't use color if terminal doesn't support
* it.
*/
if (libzfs_envvar_is_set("ZFS_COLOR") &&
!libzfs_envvar_is_set("NO_COLOR") &&
isatty(STDOUT_FILENO) && term && strcmp("dumb", term) != 0 &&
strcmp("unknown", term) != 0) {
/* Color supported */
use_color = 1;
} else {
use_color = 0;
}
return (use_color);
}
/*
* color_start() and color_end() are used for when you want to colorize a block
* of text. For example:
*
* color_start(ANSI_RED_FG)
* printf("hello");
* printf("world");
* color_end();
*/
void
color_start(char *color)
{
if (use_color())
printf("%s", color);
}
void
color_end(void)
{
if (use_color())
printf(ANSI_RESET);
}
/* printf() with a color. If color is NULL, then do a normal printf. */
int
printf_color(char *color, char *format, ...)
{
va_list aptr;
int rc;
if (color)
color_start(color);
va_start(aptr, format);
rc = vprintf(format, aptr);
va_end(aptr);
if (color)
color_end();
return (rc);
}
diff --git a/sys/contrib/openzfs/lib/libzfs_core/libzfs_core.abi b/sys/contrib/openzfs/lib/libzfs_core/libzfs_core.abi
index c54a994f79bc..1b018dd063e0 100644
--- a/sys/contrib/openzfs/lib/libzfs_core/libzfs_core.abi
+++ b/sys/contrib/openzfs/lib/libzfs_core/libzfs_core.abi
@@ -1,4522 +1,3685 @@
-<abi-corpus path='libzfs_core.so' architecture='elf-amd-x86_64' soname='libzfs_core.so.3'>
+<abi-corpus architecture='elf-amd-x86_64' soname='libzfs_core.so.3'>
<elf-needed>
- <dependency name='libatomic.so.1'/>
<dependency name='libuuid.so.1'/>
<dependency name='libz.so.1'/>
<dependency name='librt.so.1'/>
<dependency name='libm.so.6'/>
<dependency name='libblkid.so.1'/>
<dependency name='libudev.so.1'/>
<dependency name='libnvpair.so.3'/>
<dependency name='libpthread.so.0'/>
<dependency name='libc.so.6'/>
<dependency name='ld-linux-x86-64.so.2'/>
</elf-needed>
<elf-function-symbols>
+ <elf-symbol name='_fini' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='_sol_getmntent' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_16_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_32_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_64_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_8_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_char' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_char_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_int' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_int_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_long' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_long_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_ptr' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_ptr_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_short' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_short_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_16_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_32_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_64_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_8_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_uchar' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_uchar_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_uint_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_ulong_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_ushort_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_ptr' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_uchar' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_clear_long_excl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_16_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_32_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_64_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_8_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_uchar' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_uchar_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_uint_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_ulong_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_ushort_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_16_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_32_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_64_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_8_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_uchar' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_uchar_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_uint_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_ulong_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_ushort_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_16_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_32_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_64_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_8_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_uchar' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_uchar_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_uint_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_ulong_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_ushort_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_set_long_excl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_16_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_32_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_64_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_8_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_char' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_char_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_int' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_int_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_long' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_long_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_ptr' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_ptr_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_short' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_short_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_ptr' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_uchar' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_add' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_destroy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_destroy_nodes' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_find' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_first' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_insert' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_insert_here' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_is_empty' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_last' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_nearest' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_numnodes' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_remove' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_swap' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_update' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_update_gt' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_update_lt' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_walk' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='efi_alloc_and_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='efi_alloc_and_read' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='efi_err_check' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='efi_free' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='efi_rescan' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='efi_type' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='efi_use_whole_disk' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='efi_write' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='get_system_hostid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getexecname' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getextmntent' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getmntany' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getzoneid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='is_mpath_whole_disk' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libspl_assertf' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_core_fini' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_core_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_destroy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_head' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_insert_after' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_insert_before' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_insert_head' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_insert_tail' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_is_empty' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_link_active' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_link_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_link_replace' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_move_tail' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_next' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_prev' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_remove' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_remove_head' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_remove_tail' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_tail' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_bookmark' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_change_key' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_channel_program' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_channel_program_nosync' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_clone' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_destroy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_destroy_bookmarks' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_destroy_snaps' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_exists' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_get_bookmark_props' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_get_bookmarks' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_get_bootenv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_get_holds' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_hold' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_initialize' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_load_key' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_pool_checkpoint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_pool_checkpoint_discard' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_promote' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_receive' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_receive_one' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_receive_resumable' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_receive_with_cmdprops' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_receive_with_header' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_redact' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_release' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_rename' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_reopen' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_rollback' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_rollback_to' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_send' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_send_redacted' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_send_resume' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_send_resume_redacted' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_send_space' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_send_space_resume_redacted' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_set_bootenv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_snaprange_space' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_snapshot' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_sync' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_trim' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_unload_key' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_wait' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_wait_fs' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_wait_tag' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='membar_consumer' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='membar_enter' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='membar_exit' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='membar_producer' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='mkdirp' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='print_timestamp' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='spl_pagesize' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='strlcat' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='strlcpy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='tpool_abandon' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='tpool_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='tpool_destroy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='tpool_dispatch' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='tpool_member' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='tpool_resume' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='tpool_suspend' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='tpool_suspended' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='tpool_wait' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='update_vdev_config_dev_strs' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_append_partition' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_basename' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_dev_flush' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_dev_is_dm' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_dev_is_whole_disk' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_device_get_devid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_device_get_physical' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_dirnamelen' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_enclosure_sysfs_path' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_underlying_path' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_ioctl_fd' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_isnumber' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_nicebytes' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_nicenum' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_nicenum_format' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_niceraw' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_nicetime' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_resolve_shortname' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_strcmp_pathname' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_strip_partition' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_strip_path' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_default_search_paths' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_dump_ddt' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_find_config' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_history_unpack' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_label_disk_wait' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_read_label' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_search_import' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
</elf-function-symbols>
<elf-variable-symbols>
<elf-symbol name='efi_debug' size='4' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libspl_assert_ok' size='4' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
</elf-variable-symbols>
- <abi-instr version='1.0' address-size='64' path='libzfs_core.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libzfs_core' language='LANG_C99'>
+ <abi-instr version='1.0' address-size='64' path='libzfs_core.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs_core' language='LANG_C99'>
<type-decl name='int' size-in-bits='32' id='type-id-1'/>
- <function-decl name='libzfs_core_init' mangled-name='libzfs_core_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_core_init'>
- <return type-id='type-id-1'/>
- </function-decl>
- <union-decl name='__anonymous_union__' size-in-bits='320' is-anonymous='yes' visibility='default' id='type-id-2'>
- <data-member access='private'>
- <var-decl name='__data' type-id='type-id-3' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='__size' type-id='type-id-4' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='__align' type-id='type-id-5' visibility='default'/>
- </data-member>
- </union-decl>
- <class-decl name='__pthread_mutex_s' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-3'>
+ <type-decl name='char' size-in-bits='8' id='type-id-2'/>
+ <qualified-type-def type-id='type-id-2' const='yes' id='type-id-3'/>
+ <pointer-type-def type-id='type-id-3' size-in-bits='64' id='type-id-4'/>
+ <class-decl name='nvlist' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-5'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='__lock' type-id='type-id-1' visibility='default'/>
+ <var-decl name='nvl_version' type-id='type-id-6' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='__count' type-id='type-id-6' visibility='default'/>
+ <var-decl name='nvl_nvflag' type-id='type-id-7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='__owner' type-id='type-id-1' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='96'>
- <var-decl name='__nusers' type-id='type-id-6' visibility='default'/>
+ <var-decl name='nvl_priv' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='__kind' type-id='type-id-1' visibility='default'/>
+ <var-decl name='nvl_flag' type-id='type-id-7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='160'>
- <var-decl name='__spins' type-id='type-id-7' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='176'>
- <var-decl name='__elision' type-id='type-id-7' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='__list' type-id='type-id-8' visibility='default'/>
+ <var-decl name='nvl_pad' type-id='type-id-6' visibility='default'/>
</data-member>
</class-decl>
- <type-decl name='unsigned int' size-in-bits='32' id='type-id-6'/>
- <type-decl name='short int' size-in-bits='16' id='type-id-7'/>
- <class-decl name='__pthread_internal_list' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-9'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='__prev' type-id='type-id-10' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='__next' type-id='type-id-10' visibility='default'/>
- </data-member>
- </class-decl>
- <pointer-type-def type-id='type-id-9' size-in-bits='64' id='type-id-10'/>
- <typedef-decl name='__pthread_list_t' type-id='type-id-9' id='type-id-8'/>
- <type-decl name='char' size-in-bits='8' id='type-id-11'/>
- <type-decl name='__ARRAY_SIZE_TYPE__' size-in-bits='64' id='type-id-12'/>
-
- <array-type-def dimensions='1' type-id='type-id-11' size-in-bits='320' id='type-id-4'>
- <subrange length='40' type-id='type-id-12' id='type-id-13'/>
-
- </array-type-def>
- <type-decl name='long int' size-in-bits='64' id='type-id-5'/>
- <pointer-type-def type-id='type-id-2' size-in-bits='64' id='type-id-14'/>
- <function-decl name='pthread_mutex_lock' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-14'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <qualified-type-def type-id='type-id-11' const='yes' id='type-id-15'/>
+ <typedef-decl name='__int32_t' type-id='type-id-1' id='type-id-9'/>
+ <typedef-decl name='int32_t' type-id='type-id-9' id='type-id-6'/>
+ <type-decl name='unsigned int' size-in-bits='32' id='type-id-10'/>
+ <typedef-decl name='__uint32_t' type-id='type-id-10' id='type-id-11'/>
+ <typedef-decl name='uint32_t' type-id='type-id-11' id='type-id-7'/>
+ <type-decl name='unsigned long int' size-in-bits='64' id='type-id-12'/>
+ <typedef-decl name='__uint64_t' type-id='type-id-12' id='type-id-13'/>
+ <typedef-decl name='uint64_t' type-id='type-id-13' id='type-id-8'/>
+ <typedef-decl name='nvlist_t' type-id='type-id-5' id='type-id-14'/>
+ <pointer-type-def type-id='type-id-14' size-in-bits='64' id='type-id-15'/>
<pointer-type-def type-id='type-id-15' size-in-bits='64' id='type-id-16'/>
- <function-decl name='open' mangled-name='open64' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-16'/>
- <parameter type-id='type-id-1'/>
- <parameter is-variadic='yes'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='pthread_mutex_unlock' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-14'/>
+ <function-decl name='lzc_get_bootenv' mangled-name='lzc_get_bootenv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_get_bootenv'>
+ <parameter type-id='type-id-4' name='pool'/>
+ <parameter type-id='type-id-16' name='outnvl'/>
<return type-id='type-id-1'/>
</function-decl>
- <type-decl name='void' id='type-id-17'/>
- <function-decl name='libzfs_core_fini' mangled-name='libzfs_core_fini' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_core_fini'>
- <return type-id='type-id-17'/>
- </function-decl>
- <function-decl name='close' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-1'/>
+ <qualified-type-def type-id='type-id-14' const='yes' id='type-id-17'/>
+ <pointer-type-def type-id='type-id-17' size-in-bits='64' id='type-id-18'/>
+ <function-decl name='lzc_set_bootenv' mangled-name='lzc_set_bootenv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_set_bootenv'>
+ <parameter type-id='type-id-4' name='pool'/>
+ <parameter type-id='type-id-18' name='env'/>
<return type-id='type-id-1'/>
</function-decl>
- <type-decl name='unnamed-enum-underlying-type' is-anonymous='yes' size-in-bits='32' alignment-in-bits='32' id='type-id-18'/>
- <enum-decl name='lzc_dataset_type' id='type-id-19'>
- <underlying-type type-id='type-id-18'/>
- <enumerator name='LZC_DATSET_TYPE_ZFS' value='2'/>
- <enumerator name='LZC_DATSET_TYPE_ZVOL' value='3'/>
+ <type-decl name='unnamed-enum-underlying-type' is-anonymous='yes' size-in-bits='32' alignment-in-bits='32' id='type-id-19'/>
+ <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-20'>
+ <underlying-type type-id='type-id-19'/>
+ <enumerator name='ZFS_WAIT_DELETEQ' value='0'/>
+ <enumerator name='ZFS_WAIT_NUM_ACTIVITIES' value='1'/>
</enum-decl>
- <class-decl name='nvlist' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-20'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='nvl_version' type-id='type-id-21' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='nvl_nvflag' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='nvl_priv' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='nvl_flag' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='160'>
- <var-decl name='nvl_pad' type-id='type-id-21' visibility='default'/>
- </data-member>
- </class-decl>
- <typedef-decl name='__int32_t' type-id='type-id-1' id='type-id-24'/>
- <typedef-decl name='int32_t' type-id='type-id-24' id='type-id-21'/>
- <typedef-decl name='__uint32_t' type-id='type-id-6' id='type-id-25'/>
- <typedef-decl name='uint32_t' type-id='type-id-25' id='type-id-22'/>
- <type-decl name='unsigned long int' size-in-bits='64' id='type-id-26'/>
- <typedef-decl name='__uint64_t' type-id='type-id-26' id='type-id-27'/>
- <typedef-decl name='uint64_t' type-id='type-id-27' id='type-id-23'/>
- <typedef-decl name='nvlist_t' type-id='type-id-20' id='type-id-28'/>
- <pointer-type-def type-id='type-id-28' size-in-bits='64' id='type-id-29'/>
- <type-decl name='unsigned char' size-in-bits='8' id='type-id-30'/>
- <typedef-decl name='__uint8_t' type-id='type-id-30' id='type-id-31'/>
- <typedef-decl name='uint8_t' type-id='type-id-31' id='type-id-32'/>
- <pointer-type-def type-id='type-id-32' size-in-bits='64' id='type-id-33'/>
- <typedef-decl name='uint_t' type-id='type-id-6' id='type-id-34'/>
- <function-decl name='lzc_create' mangled-name='lzc_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_create'>
- <parameter type-id='type-id-16' name='fsname'/>
- <parameter type-id='type-id-19' name='type'/>
- <parameter type-id='type-id-29' name='props'/>
- <parameter type-id='type-id-33' name='wkeydata'/>
- <parameter type-id='type-id-34' name='wkeylen'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <pointer-type-def type-id='type-id-20' size-in-bits='64' id='type-id-35'/>
- <function-decl name='fnvlist_alloc' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-35'/>
- </function-decl>
- <function-decl name='fnvlist_add_int32' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-16'/>
- <parameter type-id='type-id-1'/>
- <return type-id='type-id-17'/>
- </function-decl>
- <function-decl name='fnvlist_add_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-16'/>
- <parameter type-id='type-id-35'/>
- <return type-id='type-id-17'/>
- </function-decl>
- <pointer-type-def type-id='type-id-30' size-in-bits='64' id='type-id-36'/>
- <function-decl name='fnvlist_add_uint8_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-16'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-6'/>
- <return type-id='type-id-17'/>
- </function-decl>
- <function-decl name='nvlist_free' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-35'/>
- <return type-id='type-id-17'/>
- </function-decl>
- <function-decl name='libspl_assertf' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-16'/>
- <parameter type-id='type-id-16'/>
- <parameter type-id='type-id-1'/>
- <parameter type-id='type-id-16'/>
- <parameter is-variadic='yes'/>
- <return type-id='type-id-17'/>
- </function-decl>
- <pointer-type-def type-id='type-id-11' size-in-bits='64' id='type-id-37'/>
- <function-decl name='strlcpy' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-37'/>
- <parameter type-id='type-id-16'/>
- <parameter type-id='type-id-26'/>
- <return type-id='type-id-26'/>
- </function-decl>
- <pointer-type-def type-id='type-id-26' size-in-bits='64' id='type-id-38'/>
- <function-decl name='fnvlist_pack' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-38'/>
- <return type-id='type-id-37'/>
- </function-decl>
- <function-decl name='fnvlist_lookup_uint64' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-16'/>
- <return type-id='type-id-26'/>
- </function-decl>
- <class-decl name='zfs_cmd' size-in-bits='109952' is-struct='yes' visibility='default' id='type-id-39'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='zc_name' type-id='type-id-40' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32768'>
- <var-decl name='zc_nvlist_src' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32832'>
- <var-decl name='zc_nvlist_src_size' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32896'>
- <var-decl name='zc_nvlist_dst' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32960'>
- <var-decl name='zc_nvlist_dst_size' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='33024'>
- <var-decl name='zc_nvlist_dst_filled' type-id='type-id-41' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='33056'>
- <var-decl name='zc_pad2' type-id='type-id-1' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='33088'>
- <var-decl name='zc_history' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='33152'>
- <var-decl name='zc_value' type-id='type-id-42' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='98688'>
- <var-decl name='zc_string' type-id='type-id-43' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='100736'>
- <var-decl name='zc_guid' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='100800'>
- <var-decl name='zc_nvlist_conf' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='100864'>
- <var-decl name='zc_nvlist_conf_size' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='100928'>
- <var-decl name='zc_cookie' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='100992'>
- <var-decl name='zc_objset_type' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='101056'>
- <var-decl name='zc_perm_action' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='101120'>
- <var-decl name='zc_history_len' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='101184'>
- <var-decl name='zc_history_offset' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='101248'>
- <var-decl name='zc_obj' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='101312'>
- <var-decl name='zc_iflags' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='101376'>
- <var-decl name='zc_share' type-id='type-id-44' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='101632'>
- <var-decl name='zc_objset_stats' type-id='type-id-45' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='103936'>
- <var-decl name='zc_begin_record' type-id='type-id-46' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='106368'>
- <var-decl name='zc_inject_record' type-id='type-id-47' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109184'>
- <var-decl name='zc_defer_destroy' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109216'>
- <var-decl name='zc_flags' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109248'>
- <var-decl name='zc_action_handle' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109312'>
- <var-decl name='zc_cleanup_fd' type-id='type-id-1' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109344'>
- <var-decl name='zc_simple' type-id='type-id-32' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109352'>
- <var-decl name='zc_pad' type-id='type-id-48' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109376'>
- <var-decl name='zc_sendobj' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109440'>
- <var-decl name='zc_fromobj' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109504'>
- <var-decl name='zc_createtxg' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109568'>
- <var-decl name='zc_stat' type-id='type-id-49' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109888'>
- <var-decl name='zc_zoneid' type-id='type-id-23' visibility='default'/>
- </data-member>
- </class-decl>
-
- <array-type-def dimensions='1' type-id='type-id-11' size-in-bits='32768' id='type-id-40'>
- <subrange length='4096' type-id='type-id-12' id='type-id-50'/>
-
- </array-type-def>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-51'>
- <underlying-type type-id='type-id-18'/>
+ <typedef-decl name='zfs_wait_activity_t' type-id='type-id-20' id='type-id-21'/>
+ <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-22'>
+ <underlying-type type-id='type-id-19'/>
<enumerator name='B_FALSE' value='0'/>
<enumerator name='B_TRUE' value='1'/>
</enum-decl>
- <typedef-decl name='boolean_t' type-id='type-id-51' id='type-id-41'/>
-
- <array-type-def dimensions='1' type-id='type-id-11' size-in-bits='65536' id='type-id-42'>
- <subrange length='8192' type-id='type-id-12' id='type-id-52'/>
-
- </array-type-def>
-
- <array-type-def dimensions='1' type-id='type-id-11' size-in-bits='2048' id='type-id-43'>
- <subrange length='256' type-id='type-id-12' id='type-id-53'/>
-
- </array-type-def>
- <class-decl name='zfs_share' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-54'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='z_exportdata' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='z_sharedata' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='z_sharetype' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='z_sharemax' type-id='type-id-23' visibility='default'/>
- </data-member>
- </class-decl>
- <typedef-decl name='zfs_share_t' type-id='type-id-54' id='type-id-44'/>
- <class-decl name='dmu_objset_stats' size-in-bits='2304' is-struct='yes' visibility='default' id='type-id-55'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='dds_num_clones' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='dds_creation_txg' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='dds_guid' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='dds_type' type-id='type-id-56' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='224'>
- <var-decl name='dds_is_snapshot' type-id='type-id-32' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='232'>
- <var-decl name='dds_inconsistent' type-id='type-id-32' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='240'>
- <var-decl name='dds_redacted' type-id='type-id-32' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='248'>
- <var-decl name='dds_origin' type-id='type-id-43' visibility='default'/>
- </data-member>
- </class-decl>
- <enum-decl name='dmu_objset_type' id='type-id-57'>
- <underlying-type type-id='type-id-18'/>
- <enumerator name='DMU_OST_NONE' value='0'/>
- <enumerator name='DMU_OST_META' value='1'/>
- <enumerator name='DMU_OST_ZFS' value='2'/>
- <enumerator name='DMU_OST_ZVOL' value='3'/>
- <enumerator name='DMU_OST_OTHER' value='4'/>
- <enumerator name='DMU_OST_ANY' value='5'/>
- <enumerator name='DMU_OST_NUMTYPES' value='6'/>
- </enum-decl>
- <typedef-decl name='dmu_objset_type_t' type-id='type-id-57' id='type-id-56'/>
- <typedef-decl name='dmu_objset_stats_t' type-id='type-id-55' id='type-id-45'/>
- <class-decl name='drr_begin' size-in-bits='2432' is-struct='yes' visibility='default' id='type-id-46'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='drr_magic' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='drr_versioninfo' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='drr_creation_time' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='drr_type' type-id='type-id-56' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='224'>
- <var-decl name='drr_flags' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='drr_toguid' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='drr_fromguid' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='drr_toname' type-id='type-id-43' visibility='default'/>
- </data-member>
- </class-decl>
- <class-decl name='zinject_record' size-in-bits='2816' is-struct='yes' visibility='default' id='type-id-58'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='zi_objset' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='zi_object' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='zi_start' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='zi_end' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='zi_guid' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='zi_level' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='352'>
- <var-decl name='zi_error' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='zi_type' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='zi_freq' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='480'>
- <var-decl name='zi_failfast' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='zi_func' type-id='type-id-43' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='2560'>
- <var-decl name='zi_iotype' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='2592'>
- <var-decl name='zi_duration' type-id='type-id-21' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='2624'>
- <var-decl name='zi_timer' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='2688'>
- <var-decl name='zi_nlanes' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='2752'>
- <var-decl name='zi_cmd' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='2784'>
- <var-decl name='zi_dvas' type-id='type-id-22' visibility='default'/>
- </data-member>
- </class-decl>
- <typedef-decl name='zinject_record_t' type-id='type-id-58' id='type-id-47'/>
-
- <array-type-def dimensions='1' type-id='type-id-32' size-in-bits='24' id='type-id-48'>
- <subrange length='3' type-id='type-id-12' id='type-id-59'/>
-
- </array-type-def>
- <class-decl name='zfs_stat' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-60'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='zs_gen' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='zs_mode' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='zs_links' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='zs_ctime' type-id='type-id-61' visibility='default'/>
- </data-member>
- </class-decl>
-
- <array-type-def dimensions='1' type-id='type-id-23' size-in-bits='128' id='type-id-61'>
- <subrange length='2' type-id='type-id-12' id='type-id-62'/>
-
- </array-type-def>
- <typedef-decl name='zfs_stat_t' type-id='type-id-60' id='type-id-49'/>
- <pointer-type-def type-id='type-id-39' size-in-bits='64' id='type-id-63'/>
- <function-decl name='zfs_ioctl_fd' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-1'/>
- <parameter type-id='type-id-26'/>
- <parameter type-id='type-id-63'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='fnvlist_unpack' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-37'/>
- <parameter type-id='type-id-26'/>
- <return type-id='type-id-35'/>
- </function-decl>
- <function-decl name='fnvlist_pack_free' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-37'/>
- <parameter type-id='type-id-26'/>
- <return type-id='type-id-17'/>
- </function-decl>
- <function-decl name='lzc_clone' mangled-name='lzc_clone' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_clone'>
- <parameter type-id='type-id-16' name='fsname'/>
- <parameter type-id='type-id-16' name='origin'/>
- <parameter type-id='type-id-29' name='props'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='fnvlist_add_string' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-16'/>
- <parameter type-id='type-id-16'/>
- <return type-id='type-id-17'/>
- </function-decl>
- <function-decl name='lzc_promote' mangled-name='lzc_promote' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_promote'>
- <parameter type-id='type-id-16' name='fsname'/>
- <parameter type-id='type-id-37' name='snapnamebuf'/>
- <parameter type-id='type-id-1' name='snapnamelen'/>
+ <typedef-decl name='boolean_t' type-id='type-id-22' id='type-id-23'/>
+ <pointer-type-def type-id='type-id-23' size-in-bits='64' id='type-id-24'/>
+ <function-decl name='lzc_wait_fs' mangled-name='lzc_wait_fs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_wait_fs'>
+ <parameter type-id='type-id-4' name='fs'/>
+ <parameter type-id='type-id-21' name='activity'/>
+ <parameter type-id='type-id-24' name='waited'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_rename' mangled-name='lzc_rename' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_rename'>
- <parameter type-id='type-id-16' name='source'/>
- <parameter type-id='type-id-16' name='target'/>
+ <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-25'>
+ <underlying-type type-id='type-id-19'/>
+ <enumerator name='ZPOOL_WAIT_CKPT_DISCARD' value='0'/>
+ <enumerator name='ZPOOL_WAIT_FREE' value='1'/>
+ <enumerator name='ZPOOL_WAIT_INITIALIZE' value='2'/>
+ <enumerator name='ZPOOL_WAIT_REPLACE' value='3'/>
+ <enumerator name='ZPOOL_WAIT_REMOVE' value='4'/>
+ <enumerator name='ZPOOL_WAIT_RESILVER' value='5'/>
+ <enumerator name='ZPOOL_WAIT_SCRUB' value='6'/>
+ <enumerator name='ZPOOL_WAIT_TRIM' value='7'/>
+ <enumerator name='ZPOOL_WAIT_NUM_ACTIVITIES' value='8'/>
+ </enum-decl>
+ <typedef-decl name='zpool_wait_activity_t' type-id='type-id-25' id='type-id-26'/>
+ <function-decl name='lzc_wait_tag' mangled-name='lzc_wait_tag' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_wait_tag'>
+ <parameter type-id='type-id-4' name='pool'/>
+ <parameter type-id='type-id-26' name='activity'/>
+ <parameter type-id='type-id-8' name='tag'/>
+ <parameter type-id='type-id-24' name='waited'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_destroy' mangled-name='lzc_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_destroy'>
- <parameter type-id='type-id-16' name='fsname'/>
+ <function-decl name='lzc_wait' mangled-name='lzc_wait' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_wait'>
+ <parameter type-id='type-id-4' name='pool'/>
+ <parameter type-id='type-id-26' name='activity'/>
+ <parameter type-id='type-id-24' name='waited'/>
<return type-id='type-id-1'/>
</function-decl>
- <pointer-type-def type-id='type-id-29' size-in-bits='64' id='type-id-64'/>
- <function-decl name='lzc_snapshot' mangled-name='lzc_snapshot' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_snapshot'>
- <parameter type-id='type-id-29' name='snaps'/>
- <parameter type-id='type-id-29' name='props'/>
- <parameter type-id='type-id-64' name='errlist'/>
+ <function-decl name='lzc_redact' mangled-name='lzc_redact' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_redact'>
+ <parameter type-id='type-id-4' name='snapshot'/>
+ <parameter type-id='type-id-4' name='bookname'/>
+ <parameter type-id='type-id-15' name='snapnv'/>
<return type-id='type-id-1'/>
</function-decl>
- <class-decl name='nvpair' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-65'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='nvp_size' type-id='type-id-21' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='nvp_name_sz' type-id='type-id-66' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='48'>
- <var-decl name='nvp_reserve' type-id='type-id-66' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='nvp_value_elem' type-id='type-id-21' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='96'>
- <var-decl name='nvp_type' type-id='type-id-67' visibility='default'/>
- </data-member>
- </class-decl>
- <typedef-decl name='__int16_t' type-id='type-id-7' id='type-id-68'/>
- <typedef-decl name='int16_t' type-id='type-id-68' id='type-id-66'/>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-69'>
- <underlying-type type-id='type-id-18'/>
- <enumerator name='DATA_TYPE_DONTCARE' value='-1'/>
- <enumerator name='DATA_TYPE_UNKNOWN' value='0'/>
- <enumerator name='DATA_TYPE_BOOLEAN' value='1'/>
- <enumerator name='DATA_TYPE_BYTE' value='2'/>
- <enumerator name='DATA_TYPE_INT16' value='3'/>
- <enumerator name='DATA_TYPE_UINT16' value='4'/>
- <enumerator name='DATA_TYPE_INT32' value='5'/>
- <enumerator name='DATA_TYPE_UINT32' value='6'/>
- <enumerator name='DATA_TYPE_INT64' value='7'/>
- <enumerator name='DATA_TYPE_UINT64' value='8'/>
- <enumerator name='DATA_TYPE_STRING' value='9'/>
- <enumerator name='DATA_TYPE_BYTE_ARRAY' value='10'/>
- <enumerator name='DATA_TYPE_INT16_ARRAY' value='11'/>
- <enumerator name='DATA_TYPE_UINT16_ARRAY' value='12'/>
- <enumerator name='DATA_TYPE_INT32_ARRAY' value='13'/>
- <enumerator name='DATA_TYPE_UINT32_ARRAY' value='14'/>
- <enumerator name='DATA_TYPE_INT64_ARRAY' value='15'/>
- <enumerator name='DATA_TYPE_UINT64_ARRAY' value='16'/>
- <enumerator name='DATA_TYPE_STRING_ARRAY' value='17'/>
- <enumerator name='DATA_TYPE_HRTIME' value='18'/>
- <enumerator name='DATA_TYPE_NVLIST' value='19'/>
- <enumerator name='DATA_TYPE_NVLIST_ARRAY' value='20'/>
- <enumerator name='DATA_TYPE_BOOLEAN_VALUE' value='21'/>
- <enumerator name='DATA_TYPE_INT8' value='22'/>
- <enumerator name='DATA_TYPE_UINT8' value='23'/>
- <enumerator name='DATA_TYPE_BOOLEAN_ARRAY' value='24'/>
- <enumerator name='DATA_TYPE_INT8_ARRAY' value='25'/>
- <enumerator name='DATA_TYPE_UINT8_ARRAY' value='26'/>
- <enumerator name='DATA_TYPE_DOUBLE' value='27'/>
+ <enum-decl name='pool_trim_func' id='type-id-27'>
+ <underlying-type type-id='type-id-19'/>
+ <enumerator name='POOL_TRIM_START' value='0'/>
+ <enumerator name='POOL_TRIM_CANCEL' value='1'/>
+ <enumerator name='POOL_TRIM_SUSPEND' value='2'/>
+ <enumerator name='POOL_TRIM_FUNCS' value='3'/>
</enum-decl>
- <typedef-decl name='data_type_t' type-id='type-id-69' id='type-id-67'/>
- <pointer-type-def type-id='type-id-65' size-in-bits='64' id='type-id-70'/>
- <function-decl name='nvlist_next_nvpair' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-70'/>
- <return type-id='type-id-70'/>
- </function-decl>
- <function-decl name='nvpair_name' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-70'/>
- <return type-id='type-id-37'/>
- </function-decl>
- <function-decl name='lzc_destroy_snaps' mangled-name='lzc_destroy_snaps' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_destroy_snaps'>
- <parameter type-id='type-id-29' name='snaps'/>
- <parameter type-id='type-id-41' name='defer'/>
- <parameter type-id='type-id-64' name='errlist'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='fnvlist_add_boolean' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-16'/>
- <return type-id='type-id-17'/>
- </function-decl>
- <pointer-type-def type-id='type-id-23' size-in-bits='64' id='type-id-71'/>
- <function-decl name='lzc_snaprange_space' mangled-name='lzc_snaprange_space' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_snaprange_space'>
- <parameter type-id='type-id-16' name='firstsnap'/>
- <parameter type-id='type-id-16' name='lastsnap'/>
- <parameter type-id='type-id-71' name='usedp'/>
+ <typedef-decl name='pool_trim_func_t' type-id='type-id-27' id='type-id-28'/>
+ <function-decl name='lzc_trim' mangled-name='lzc_trim' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_trim'>
+ <parameter type-id='type-id-4' name='poolname'/>
+ <parameter type-id='type-id-28' name='cmd_type'/>
+ <parameter type-id='type-id-8' name='rate'/>
+ <parameter type-id='type-id-23' name='secure'/>
+ <parameter type-id='type-id-15' name='vdevs'/>
+ <parameter type-id='type-id-16' name='errlist'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='fnvlist_free' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-35'/>
- <return type-id='type-id-17'/>
- </function-decl>
- <function-decl name='lzc_exists' mangled-name='lzc_exists' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_exists'>
- <parameter type-id='type-id-16' name='dataset'/>
- <return type-id='type-id-41'/>
- </function-decl>
- <function-decl name='lzc_sync' mangled-name='lzc_sync' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_sync'>
- <parameter type-id='type-id-16' name='pool_name'/>
- <parameter type-id='type-id-29' name='innvl'/>
- <parameter type-id='type-id-64' name='outnvl'/>
+ <enum-decl name='pool_initialize_func' id='type-id-29'>
+ <underlying-type type-id='type-id-19'/>
+ <enumerator name='POOL_INITIALIZE_START' value='0'/>
+ <enumerator name='POOL_INITIALIZE_CANCEL' value='1'/>
+ <enumerator name='POOL_INITIALIZE_SUSPEND' value='2'/>
+ <enumerator name='POOL_INITIALIZE_FUNCS' value='3'/>
+ </enum-decl>
+ <typedef-decl name='pool_initialize_func_t' type-id='type-id-29' id='type-id-30'/>
+ <function-decl name='lzc_initialize' mangled-name='lzc_initialize' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_initialize'>
+ <parameter type-id='type-id-4' name='poolname'/>
+ <parameter type-id='type-id-30' name='cmd_type'/>
+ <parameter type-id='type-id-15' name='vdevs'/>
+ <parameter type-id='type-id-16' name='errlist'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_hold' mangled-name='lzc_hold' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_hold'>
- <parameter type-id='type-id-29' name='holds'/>
- <parameter type-id='type-id-1' name='cleanup_fd'/>
- <parameter type-id='type-id-64' name='errlist'/>
+ <function-decl name='lzc_reopen' mangled-name='lzc_reopen' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_reopen'>
+ <parameter type-id='type-id-4' name='pool_name'/>
+ <parameter type-id='type-id-23' name='scrub_restart'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_release' mangled-name='lzc_release' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_release'>
- <parameter type-id='type-id-29' name='holds'/>
- <parameter type-id='type-id-64' name='errlist'/>
+ <type-decl name='unsigned char' size-in-bits='8' id='type-id-31'/>
+ <typedef-decl name='__uint8_t' type-id='type-id-31' id='type-id-32'/>
+ <typedef-decl name='uint8_t' type-id='type-id-32' id='type-id-33'/>
+ <pointer-type-def type-id='type-id-33' size-in-bits='64' id='type-id-34'/>
+ <typedef-decl name='uint_t' type-id='type-id-10' id='type-id-35'/>
+ <function-decl name='lzc_change_key' mangled-name='lzc_change_key' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_change_key'>
+ <parameter type-id='type-id-4' name='fsname'/>
+ <parameter type-id='type-id-8' name='crypt_cmd'/>
+ <parameter type-id='type-id-15' name='props'/>
+ <parameter type-id='type-id-34' name='wkeydata'/>
+ <parameter type-id='type-id-35' name='wkeylen'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_get_holds' mangled-name='lzc_get_holds' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_get_holds'>
- <parameter type-id='type-id-16' name='snapname'/>
- <parameter type-id='type-id-64' name='holdsp'/>
+ <function-decl name='lzc_unload_key' mangled-name='lzc_unload_key' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_unload_key'>
+ <parameter type-id='type-id-4' name='fsname'/>
<return type-id='type-id-1'/>
</function-decl>
- <enum-decl name='lzc_send_flags' id='type-id-72'>
- <underlying-type type-id='type-id-18'/>
- <enumerator name='LZC_SEND_FLAG_EMBED_DATA' value='1'/>
- <enumerator name='LZC_SEND_FLAG_LARGE_BLOCK' value='2'/>
- <enumerator name='LZC_SEND_FLAG_COMPRESS' value='4'/>
- <enumerator name='LZC_SEND_FLAG_RAW' value='8'/>
- <enumerator name='LZC_SEND_FLAG_SAVED' value='16'/>
- </enum-decl>
- <function-decl name='lzc_send' mangled-name='lzc_send' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_send'>
- <parameter type-id='type-id-16' name='snapname'/>
- <parameter type-id='type-id-16' name='from'/>
- <parameter type-id='type-id-1' name='fd'/>
- <parameter type-id='type-id-72' name='flags'/>
+ <function-decl name='lzc_load_key' mangled-name='lzc_load_key' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_load_key'>
+ <parameter type-id='type-id-4' name='fsname'/>
+ <parameter type-id='type-id-23' name='noop'/>
+ <parameter type-id='type-id-34' name='wkeydata'/>
+ <parameter type-id='type-id-35' name='wkeylen'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_send_resume_redacted' mangled-name='lzc_send_resume_redacted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_send_resume_redacted'>
- <parameter type-id='type-id-16' name='snapname'/>
- <parameter type-id='type-id-16' name='from'/>
- <parameter type-id='type-id-1' name='fd'/>
- <parameter type-id='type-id-72' name='flags'/>
- <parameter type-id='type-id-23' name='resumeobj'/>
- <parameter type-id='type-id-23' name='resumeoff'/>
- <parameter type-id='type-id-16' name='redactbook'/>
+ <function-decl name='lzc_channel_program_nosync' mangled-name='lzc_channel_program_nosync' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_channel_program_nosync'>
+ <parameter type-id='type-id-4' name='pool'/>
+ <parameter type-id='type-id-4' name='program'/>
+ <parameter type-id='type-id-8' name='timeout'/>
+ <parameter type-id='type-id-8' name='memlimit'/>
+ <parameter type-id='type-id-15' name='argnvl'/>
+ <parameter type-id='type-id-16' name='outnvl'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='fnvlist_add_uint64' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-16'/>
- <parameter type-id='type-id-26'/>
- <return type-id='type-id-17'/>
- </function-decl>
- <function-decl name='lzc_send_redacted' mangled-name='lzc_send_redacted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_send_redacted'>
- <parameter type-id='type-id-16' name='snapname'/>
- <parameter type-id='type-id-16' name='from'/>
- <parameter type-id='type-id-1' name='fd'/>
- <parameter type-id='type-id-72' name='flags'/>
- <parameter type-id='type-id-16' name='redactbook'/>
+ <function-decl name='lzc_pool_checkpoint_discard' mangled-name='lzc_pool_checkpoint_discard' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_pool_checkpoint_discard'>
+ <parameter type-id='type-id-4' name='pool'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_send_resume' mangled-name='lzc_send_resume' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_send_resume'>
- <parameter type-id='type-id-16' name='snapname'/>
- <parameter type-id='type-id-16' name='from'/>
- <parameter type-id='type-id-1' name='fd'/>
- <parameter type-id='type-id-72' name='flags'/>
- <parameter type-id='type-id-23' name='resumeobj'/>
- <parameter type-id='type-id-23' name='resumeoff'/>
+ <function-decl name='lzc_pool_checkpoint' mangled-name='lzc_pool_checkpoint' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_pool_checkpoint'>
+ <parameter type-id='type-id-4' name='pool'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_send_space_resume_redacted' mangled-name='lzc_send_space_resume_redacted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_send_space_resume_redacted'>
- <parameter type-id='type-id-16' name='snapname'/>
- <parameter type-id='type-id-16' name='from'/>
- <parameter type-id='type-id-72' name='flags'/>
- <parameter type-id='type-id-23' name='resumeobj'/>
- <parameter type-id='type-id-23' name='resumeoff'/>
- <parameter type-id='type-id-23' name='resume_bytes'/>
- <parameter type-id='type-id-16' name='redactbook'/>
- <parameter type-id='type-id-1' name='fd'/>
- <parameter type-id='type-id-71' name='spacep'/>
+ <function-decl name='lzc_channel_program' mangled-name='lzc_channel_program' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_channel_program'>
+ <parameter type-id='type-id-4' name='pool'/>
+ <parameter type-id='type-id-4' name='program'/>
+ <parameter type-id='type-id-8' name='timeout'/>
+ <parameter type-id='type-id-8' name='memlimit'/>
+ <parameter type-id='type-id-15' name='argnvl'/>
+ <parameter type-id='type-id-16' name='outnvl'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_send_space' mangled-name='lzc_send_space' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_send_space'>
- <parameter type-id='type-id-16' name='snapname'/>
- <parameter type-id='type-id-16' name='from'/>
- <parameter type-id='type-id-72' name='flags'/>
- <parameter type-id='type-id-71' name='spacep'/>
+ <function-decl name='lzc_destroy_bookmarks' mangled-name='lzc_destroy_bookmarks' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_destroy_bookmarks'>
+ <parameter type-id='type-id-15' name='bmarks'/>
+ <parameter type-id='type-id-16' name='errlist'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_receive' mangled-name='lzc_receive' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_receive'>
- <parameter type-id='type-id-16' name='snapname'/>
- <parameter type-id='type-id-29' name='props'/>
- <parameter type-id='type-id-16' name='origin'/>
- <parameter type-id='type-id-41' name='force'/>
- <parameter type-id='type-id-41' name='raw'/>
- <parameter type-id='type-id-1' name='fd'/>
+ <function-decl name='lzc_get_bookmark_props' mangled-name='lzc_get_bookmark_props' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_get_bookmark_props'>
+ <parameter type-id='type-id-4' name='bookmark'/>
+ <parameter type-id='type-id-16' name='props'/>
<return type-id='type-id-1'/>
</function-decl>
- <pointer-type-def type-id='type-id-17' size-in-bits='64' id='type-id-73'/>
- <function-decl name='read' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-1'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-26'/>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='fnvlist_add_byte_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-16'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-6'/>
- <return type-id='type-id-17'/>
- </function-decl>
- <function-decl name='nvlist_lookup_uint64' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-16'/>
- <parameter type-id='type-id-38'/>
+ <function-decl name='lzc_get_bookmarks' mangled-name='lzc_get_bookmarks' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_get_bookmarks'>
+ <parameter type-id='type-id-4' name='fsname'/>
+ <parameter type-id='type-id-15' name='props'/>
+ <parameter type-id='type-id-16' name='bmarks'/>
<return type-id='type-id-1'/>
</function-decl>
- <pointer-type-def type-id='type-id-35' size-in-bits='64' id='type-id-74'/>
- <function-decl name='nvlist_lookup_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-16'/>
- <parameter type-id='type-id-74'/>
+ <function-decl name='lzc_bookmark' mangled-name='lzc_bookmark' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_bookmark'>
+ <parameter type-id='type-id-15' name='bmarks'/>
+ <parameter type-id='type-id-16' name='errlist'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='fnvlist_dup' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-35'/>
- <return type-id='type-id-35'/>
- </function-decl>
- <function-decl name='nvlist_unpack' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-37'/>
- <parameter type-id='type-id-26'/>
- <parameter type-id='type-id-74'/>
- <parameter type-id='type-id-1'/>
+ <function-decl name='lzc_rollback_to' mangled-name='lzc_rollback_to' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_rollback_to'>
+ <parameter type-id='type-id-4' name='fsname'/>
+ <parameter type-id='type-id-4' name='snapname'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_receive_resumable' mangled-name='lzc_receive_resumable' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_receive_resumable'>
- <parameter type-id='type-id-16' name='snapname'/>
- <parameter type-id='type-id-29' name='props'/>
- <parameter type-id='type-id-16' name='origin'/>
- <parameter type-id='type-id-41' name='force'/>
- <parameter type-id='type-id-41' name='raw'/>
- <parameter type-id='type-id-1' name='fd'/>
+ <pointer-type-def type-id='type-id-2' size-in-bits='64' id='type-id-36'/>
+ <function-decl name='lzc_rollback' mangled-name='lzc_rollback' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_rollback'>
+ <parameter type-id='type-id-4' name='fsname'/>
+ <parameter type-id='type-id-36' name='snapnamebuf'/>
+ <parameter type-id='type-id-1' name='snapnamelen'/>
<return type-id='type-id-1'/>
</function-decl>
- <class-decl name='dmu_replay_record' size-in-bits='2496' is-struct='yes' visibility='default' id='type-id-75'>
+ <class-decl name='dmu_replay_record' size-in-bits='2496' is-struct='yes' visibility='default' id='type-id-37'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='drr_type' type-id='type-id-76' visibility='default'/>
+ <var-decl name='drr_type' type-id='type-id-38' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='drr_payloadlen' type-id='type-id-22' visibility='default'/>
+ <var-decl name='drr_payloadlen' type-id='type-id-7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='drr_u' type-id='type-id-77' visibility='default'/>
+ <var-decl name='drr_u' type-id='type-id-39' visibility='default'/>
</data-member>
</class-decl>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-76'>
- <underlying-type type-id='type-id-18'/>
+ <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-38'>
+ <underlying-type type-id='type-id-19'/>
<enumerator name='DRR_BEGIN' value='0'/>
<enumerator name='DRR_OBJECT' value='1'/>
<enumerator name='DRR_FREEOBJECTS' value='2'/>
<enumerator name='DRR_WRITE' value='3'/>
<enumerator name='DRR_FREE' value='4'/>
<enumerator name='DRR_END' value='5'/>
<enumerator name='DRR_WRITE_BYREF' value='6'/>
<enumerator name='DRR_SPILL' value='7'/>
<enumerator name='DRR_WRITE_EMBEDDED' value='8'/>
<enumerator name='DRR_OBJECT_RANGE' value='9'/>
<enumerator name='DRR_REDACT' value='10'/>
<enumerator name='DRR_NUMTYPES' value='11'/>
</enum-decl>
- <union-decl name='__anonymous_union__' size-in-bits='2432' is-anonymous='yes' visibility='default' id='type-id-77'>
+ <union-decl name='__anonymous_union__' size-in-bits='2432' is-anonymous='yes' visibility='default' id='type-id-39'>
<data-member access='private'>
- <var-decl name='drr_begin' type-id='type-id-46' visibility='default'/>
+ <var-decl name='drr_begin' type-id='type-id-40' visibility='default'/>
</data-member>
<data-member access='private'>
- <var-decl name='drr_end' type-id='type-id-78' visibility='default'/>
+ <var-decl name='drr_end' type-id='type-id-41' visibility='default'/>
</data-member>
<data-member access='private'>
- <var-decl name='drr_object' type-id='type-id-79' visibility='default'/>
+ <var-decl name='drr_object' type-id='type-id-42' visibility='default'/>
</data-member>
<data-member access='private'>
- <var-decl name='drr_freeobjects' type-id='type-id-80' visibility='default'/>
+ <var-decl name='drr_freeobjects' type-id='type-id-43' visibility='default'/>
</data-member>
<data-member access='private'>
- <var-decl name='drr_write' type-id='type-id-81' visibility='default'/>
+ <var-decl name='drr_write' type-id='type-id-44' visibility='default'/>
</data-member>
<data-member access='private'>
- <var-decl name='drr_free' type-id='type-id-82' visibility='default'/>
+ <var-decl name='drr_free' type-id='type-id-45' visibility='default'/>
</data-member>
<data-member access='private'>
- <var-decl name='drr_write_byref' type-id='type-id-83' visibility='default'/>
+ <var-decl name='drr_write_byref' type-id='type-id-46' visibility='default'/>
</data-member>
<data-member access='private'>
- <var-decl name='drr_spill' type-id='type-id-84' visibility='default'/>
+ <var-decl name='drr_spill' type-id='type-id-47' visibility='default'/>
</data-member>
<data-member access='private'>
- <var-decl name='drr_write_embedded' type-id='type-id-85' visibility='default'/>
+ <var-decl name='drr_write_embedded' type-id='type-id-48' visibility='default'/>
</data-member>
<data-member access='private'>
- <var-decl name='drr_object_range' type-id='type-id-86' visibility='default'/>
+ <var-decl name='drr_object_range' type-id='type-id-49' visibility='default'/>
</data-member>
<data-member access='private'>
- <var-decl name='drr_redact' type-id='type-id-87' visibility='default'/>
+ <var-decl name='drr_redact' type-id='type-id-50' visibility='default'/>
</data-member>
<data-member access='private'>
- <var-decl name='drr_checksum' type-id='type-id-88' visibility='default'/>
+ <var-decl name='drr_checksum' type-id='type-id-51' visibility='default'/>
</data-member>
</union-decl>
- <class-decl name='drr_end' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-78'>
+ <class-decl name='drr_begin' size-in-bits='2432' is-struct='yes' visibility='default' id='type-id-40'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='drr_checksum' type-id='type-id-89' visibility='default'/>
+ <var-decl name='drr_magic' type-id='type-id-8' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='drr_versioninfo' type-id='type-id-8' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='drr_creation_time' type-id='type-id-8' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='drr_type' type-id='type-id-52' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='224'>
+ <var-decl name='drr_flags' type-id='type-id-7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='drr_toguid' type-id='type-id-23' visibility='default'/>
+ <var-decl name='drr_toguid' type-id='type-id-8' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='320'>
+ <var-decl name='drr_fromguid' type-id='type-id-8' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='384'>
+ <var-decl name='drr_toname' type-id='type-id-53' visibility='default'/>
</data-member>
</class-decl>
- <class-decl name='zio_cksum' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-90'>
+ <enum-decl name='dmu_objset_type' id='type-id-54'>
+ <underlying-type type-id='type-id-19'/>
+ <enumerator name='DMU_OST_NONE' value='0'/>
+ <enumerator name='DMU_OST_META' value='1'/>
+ <enumerator name='DMU_OST_ZFS' value='2'/>
+ <enumerator name='DMU_OST_ZVOL' value='3'/>
+ <enumerator name='DMU_OST_OTHER' value='4'/>
+ <enumerator name='DMU_OST_ANY' value='5'/>
+ <enumerator name='DMU_OST_NUMTYPES' value='6'/>
+ </enum-decl>
+ <typedef-decl name='dmu_objset_type_t' type-id='type-id-54' id='type-id-52'/>
+
+ <array-type-def dimensions='1' type-id='type-id-2' size-in-bits='2048' id='type-id-53'>
+ <subrange length='256' type-id='type-id-12' id='type-id-55'/>
+
+ </array-type-def>
+ <class-decl name='drr_end' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-41'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='zc_word' type-id='type-id-91' visibility='default'/>
+ <var-decl name='drr_checksum' type-id='type-id-56' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='drr_toguid' type-id='type-id-8' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='zio_cksum' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-57'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='zc_word' type-id='type-id-58' visibility='default'/>
</data-member>
</class-decl>
- <array-type-def dimensions='1' type-id='type-id-23' size-in-bits='256' id='type-id-91'>
- <subrange length='4' type-id='type-id-12' id='type-id-92'/>
+ <array-type-def dimensions='1' type-id='type-id-8' size-in-bits='256' id='type-id-58'>
+ <subrange length='4' type-id='type-id-12' id='type-id-59'/>
</array-type-def>
- <typedef-decl name='zio_cksum_t' type-id='type-id-90' id='type-id-89'/>
- <class-decl name='drr_object' size-in-bits='448' is-struct='yes' visibility='default' id='type-id-79'>
+ <typedef-decl name='zio_cksum_t' type-id='type-id-57' id='type-id-56'/>
+ <class-decl name='drr_object' size-in-bits='448' is-struct='yes' visibility='default' id='type-id-42'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='drr_object' type-id='type-id-23' visibility='default'/>
+ <var-decl name='drr_object' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='drr_type' type-id='type-id-93' visibility='default'/>
+ <var-decl name='drr_type' type-id='type-id-60' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='96'>
- <var-decl name='drr_bonustype' type-id='type-id-93' visibility='default'/>
+ <var-decl name='drr_bonustype' type-id='type-id-60' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='drr_blksz' type-id='type-id-22' visibility='default'/>
+ <var-decl name='drr_blksz' type-id='type-id-7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='160'>
- <var-decl name='drr_bonuslen' type-id='type-id-22' visibility='default'/>
+ <var-decl name='drr_bonuslen' type-id='type-id-7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='drr_checksumtype' type-id='type-id-32' visibility='default'/>
+ <var-decl name='drr_checksumtype' type-id='type-id-33' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='200'>
- <var-decl name='drr_compress' type-id='type-id-32' visibility='default'/>
+ <var-decl name='drr_compress' type-id='type-id-33' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='208'>
- <var-decl name='drr_dn_slots' type-id='type-id-32' visibility='default'/>
+ <var-decl name='drr_dn_slots' type-id='type-id-33' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='216'>
- <var-decl name='drr_flags' type-id='type-id-32' visibility='default'/>
+ <var-decl name='drr_flags' type-id='type-id-33' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='224'>
- <var-decl name='drr_raw_bonuslen' type-id='type-id-22' visibility='default'/>
+ <var-decl name='drr_raw_bonuslen' type-id='type-id-7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='drr_toguid' type-id='type-id-23' visibility='default'/>
+ <var-decl name='drr_toguid' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='drr_indblkshift' type-id='type-id-32' visibility='default'/>
+ <var-decl name='drr_indblkshift' type-id='type-id-33' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='328'>
- <var-decl name='drr_nlevels' type-id='type-id-32' visibility='default'/>
+ <var-decl name='drr_nlevels' type-id='type-id-33' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='336'>
- <var-decl name='drr_nblkptr' type-id='type-id-32' visibility='default'/>
+ <var-decl name='drr_nblkptr' type-id='type-id-33' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='344'>
- <var-decl name='drr_pad' type-id='type-id-94' visibility='default'/>
+ <var-decl name='drr_pad' type-id='type-id-61' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='drr_maxblkid' type-id='type-id-23' visibility='default'/>
+ <var-decl name='drr_maxblkid' type-id='type-id-8' visibility='default'/>
</data-member>
</class-decl>
- <enum-decl name='dmu_object_type' id='type-id-95'>
- <underlying-type type-id='type-id-18'/>
+ <enum-decl name='dmu_object_type' id='type-id-62'>
+ <underlying-type type-id='type-id-19'/>
<enumerator name='DMU_OT_NONE' value='0'/>
<enumerator name='DMU_OT_OBJECT_DIRECTORY' value='1'/>
<enumerator name='DMU_OT_OBJECT_ARRAY' value='2'/>
<enumerator name='DMU_OT_PACKED_NVLIST' value='3'/>
<enumerator name='DMU_OT_PACKED_NVLIST_SIZE' value='4'/>
<enumerator name='DMU_OT_BPOBJ' value='5'/>
<enumerator name='DMU_OT_BPOBJ_HDR' value='6'/>
<enumerator name='DMU_OT_SPACE_MAP_HEADER' value='7'/>
<enumerator name='DMU_OT_SPACE_MAP' value='8'/>
<enumerator name='DMU_OT_INTENT_LOG' value='9'/>
<enumerator name='DMU_OT_DNODE' value='10'/>
<enumerator name='DMU_OT_OBJSET' value='11'/>
<enumerator name='DMU_OT_DSL_DIR' value='12'/>
<enumerator name='DMU_OT_DSL_DIR_CHILD_MAP' value='13'/>
<enumerator name='DMU_OT_DSL_DS_SNAP_MAP' value='14'/>
<enumerator name='DMU_OT_DSL_PROPS' value='15'/>
<enumerator name='DMU_OT_DSL_DATASET' value='16'/>
<enumerator name='DMU_OT_ZNODE' value='17'/>
<enumerator name='DMU_OT_OLDACL' value='18'/>
<enumerator name='DMU_OT_PLAIN_FILE_CONTENTS' value='19'/>
<enumerator name='DMU_OT_DIRECTORY_CONTENTS' value='20'/>
<enumerator name='DMU_OT_MASTER_NODE' value='21'/>
<enumerator name='DMU_OT_UNLINKED_SET' value='22'/>
<enumerator name='DMU_OT_ZVOL' value='23'/>
<enumerator name='DMU_OT_ZVOL_PROP' value='24'/>
<enumerator name='DMU_OT_PLAIN_OTHER' value='25'/>
<enumerator name='DMU_OT_UINT64_OTHER' value='26'/>
<enumerator name='DMU_OT_ZAP_OTHER' value='27'/>
<enumerator name='DMU_OT_ERROR_LOG' value='28'/>
<enumerator name='DMU_OT_SPA_HISTORY' value='29'/>
<enumerator name='DMU_OT_SPA_HISTORY_OFFSETS' value='30'/>
<enumerator name='DMU_OT_POOL_PROPS' value='31'/>
<enumerator name='DMU_OT_DSL_PERMS' value='32'/>
<enumerator name='DMU_OT_ACL' value='33'/>
<enumerator name='DMU_OT_SYSACL' value='34'/>
<enumerator name='DMU_OT_FUID' value='35'/>
<enumerator name='DMU_OT_FUID_SIZE' value='36'/>
<enumerator name='DMU_OT_NEXT_CLONES' value='37'/>
<enumerator name='DMU_OT_SCAN_QUEUE' value='38'/>
<enumerator name='DMU_OT_USERGROUP_USED' value='39'/>
<enumerator name='DMU_OT_USERGROUP_QUOTA' value='40'/>
<enumerator name='DMU_OT_USERREFS' value='41'/>
<enumerator name='DMU_OT_DDT_ZAP' value='42'/>
<enumerator name='DMU_OT_DDT_STATS' value='43'/>
<enumerator name='DMU_OT_SA' value='44'/>
<enumerator name='DMU_OT_SA_MASTER_NODE' value='45'/>
<enumerator name='DMU_OT_SA_ATTR_REGISTRATION' value='46'/>
<enumerator name='DMU_OT_SA_ATTR_LAYOUTS' value='47'/>
<enumerator name='DMU_OT_SCAN_XLATE' value='48'/>
<enumerator name='DMU_OT_DEDUP' value='49'/>
<enumerator name='DMU_OT_DEADLIST' value='50'/>
<enumerator name='DMU_OT_DEADLIST_HDR' value='51'/>
<enumerator name='DMU_OT_DSL_CLONES' value='52'/>
<enumerator name='DMU_OT_BPOBJ_SUBOBJ' value='53'/>
<enumerator name='DMU_OT_NUMTYPES' value='54'/>
<enumerator name='DMU_OTN_UINT8_DATA' value='128'/>
<enumerator name='DMU_OTN_UINT8_METADATA' value='192'/>
<enumerator name='DMU_OTN_UINT16_DATA' value='129'/>
<enumerator name='DMU_OTN_UINT16_METADATA' value='193'/>
<enumerator name='DMU_OTN_UINT32_DATA' value='130'/>
<enumerator name='DMU_OTN_UINT32_METADATA' value='194'/>
<enumerator name='DMU_OTN_UINT64_DATA' value='131'/>
<enumerator name='DMU_OTN_UINT64_METADATA' value='195'/>
<enumerator name='DMU_OTN_ZAP_DATA' value='132'/>
<enumerator name='DMU_OTN_ZAP_METADATA' value='196'/>
<enumerator name='DMU_OTN_UINT8_ENC_DATA' value='160'/>
<enumerator name='DMU_OTN_UINT8_ENC_METADATA' value='224'/>
<enumerator name='DMU_OTN_UINT16_ENC_DATA' value='161'/>
<enumerator name='DMU_OTN_UINT16_ENC_METADATA' value='225'/>
<enumerator name='DMU_OTN_UINT32_ENC_DATA' value='162'/>
<enumerator name='DMU_OTN_UINT32_ENC_METADATA' value='226'/>
<enumerator name='DMU_OTN_UINT64_ENC_DATA' value='163'/>
<enumerator name='DMU_OTN_UINT64_ENC_METADATA' value='227'/>
<enumerator name='DMU_OTN_ZAP_ENC_DATA' value='164'/>
<enumerator name='DMU_OTN_ZAP_ENC_METADATA' value='228'/>
</enum-decl>
- <typedef-decl name='dmu_object_type_t' type-id='type-id-95' id='type-id-93'/>
+ <typedef-decl name='dmu_object_type_t' type-id='type-id-62' id='type-id-60'/>
- <array-type-def dimensions='1' type-id='type-id-32' size-in-bits='40' id='type-id-94'>
- <subrange length='5' type-id='type-id-12' id='type-id-96'/>
+ <array-type-def dimensions='1' type-id='type-id-33' size-in-bits='40' id='type-id-61'>
+ <subrange length='5' type-id='type-id-12' id='type-id-63'/>
</array-type-def>
- <class-decl name='drr_freeobjects' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-80'>
+ <class-decl name='drr_freeobjects' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-43'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='drr_firstobj' type-id='type-id-23' visibility='default'/>
+ <var-decl name='drr_firstobj' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='drr_numobjs' type-id='type-id-23' visibility='default'/>
+ <var-decl name='drr_numobjs' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='drr_toguid' type-id='type-id-23' visibility='default'/>
+ <var-decl name='drr_toguid' type-id='type-id-8' visibility='default'/>
</data-member>
</class-decl>
- <class-decl name='drr_write' size-in-bits='1088' is-struct='yes' visibility='default' id='type-id-81'>
+ <class-decl name='drr_write' size-in-bits='1088' is-struct='yes' visibility='default' id='type-id-44'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='drr_object' type-id='type-id-23' visibility='default'/>
+ <var-decl name='drr_object' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='drr_type' type-id='type-id-93' visibility='default'/>
+ <var-decl name='drr_type' type-id='type-id-60' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='96'>
- <var-decl name='drr_pad' type-id='type-id-22' visibility='default'/>
+ <var-decl name='drr_pad' type-id='type-id-7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='drr_offset' type-id='type-id-23' visibility='default'/>
+ <var-decl name='drr_offset' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='drr_logical_size' type-id='type-id-23' visibility='default'/>
+ <var-decl name='drr_logical_size' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='drr_toguid' type-id='type-id-23' visibility='default'/>
+ <var-decl name='drr_toguid' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='drr_checksumtype' type-id='type-id-32' visibility='default'/>
+ <var-decl name='drr_checksumtype' type-id='type-id-33' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='328'>
- <var-decl name='drr_flags' type-id='type-id-32' visibility='default'/>
+ <var-decl name='drr_flags' type-id='type-id-33' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='336'>
- <var-decl name='drr_compressiontype' type-id='type-id-32' visibility='default'/>
+ <var-decl name='drr_compressiontype' type-id='type-id-33' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='344'>
- <var-decl name='drr_pad2' type-id='type-id-94' visibility='default'/>
+ <var-decl name='drr_pad2' type-id='type-id-61' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='drr_key' type-id='type-id-97' visibility='default'/>
+ <var-decl name='drr_key' type-id='type-id-64' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='704'>
- <var-decl name='drr_compressed_size' type-id='type-id-23' visibility='default'/>
+ <var-decl name='drr_compressed_size' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='768'>
- <var-decl name='drr_salt' type-id='type-id-98' visibility='default'/>
+ <var-decl name='drr_salt' type-id='type-id-65' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='832'>
- <var-decl name='drr_iv' type-id='type-id-99' visibility='default'/>
+ <var-decl name='drr_iv' type-id='type-id-66' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='928'>
- <var-decl name='drr_mac' type-id='type-id-100' visibility='default'/>
+ <var-decl name='drr_mac' type-id='type-id-67' visibility='default'/>
</data-member>
</class-decl>
- <class-decl name='ddt_key' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-101'>
+ <class-decl name='ddt_key' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-68'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='ddk_cksum' type-id='type-id-89' visibility='default'/>
+ <var-decl name='ddk_cksum' type-id='type-id-56' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='ddk_prop' type-id='type-id-23' visibility='default'/>
+ <var-decl name='ddk_prop' type-id='type-id-8' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='ddt_key_t' type-id='type-id-101' id='type-id-97'/>
+ <typedef-decl name='ddt_key_t' type-id='type-id-68' id='type-id-64'/>
- <array-type-def dimensions='1' type-id='type-id-32' size-in-bits='64' id='type-id-98'>
- <subrange length='8' type-id='type-id-12' id='type-id-102'/>
+ <array-type-def dimensions='1' type-id='type-id-33' size-in-bits='64' id='type-id-65'>
+ <subrange length='8' type-id='type-id-12' id='type-id-69'/>
</array-type-def>
- <array-type-def dimensions='1' type-id='type-id-32' size-in-bits='96' id='type-id-99'>
- <subrange length='12' type-id='type-id-12' id='type-id-103'/>
+ <array-type-def dimensions='1' type-id='type-id-33' size-in-bits='96' id='type-id-66'>
+ <subrange length='12' type-id='type-id-12' id='type-id-70'/>
</array-type-def>
- <array-type-def dimensions='1' type-id='type-id-32' size-in-bits='128' id='type-id-100'>
- <subrange length='16' type-id='type-id-12' id='type-id-104'/>
+ <array-type-def dimensions='1' type-id='type-id-33' size-in-bits='128' id='type-id-67'>
+ <subrange length='16' type-id='type-id-12' id='type-id-71'/>
</array-type-def>
- <class-decl name='drr_free' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-82'>
+ <class-decl name='drr_free' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-45'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='drr_object' type-id='type-id-23' visibility='default'/>
+ <var-decl name='drr_object' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='drr_offset' type-id='type-id-23' visibility='default'/>
+ <var-decl name='drr_offset' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='drr_length' type-id='type-id-23' visibility='default'/>
+ <var-decl name='drr_length' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='drr_toguid' type-id='type-id-23' visibility='default'/>
+ <var-decl name='drr_toguid' type-id='type-id-8' visibility='default'/>
</data-member>
</class-decl>
- <class-decl name='drr_write_byref' size-in-bits='832' is-struct='yes' visibility='default' id='type-id-83'>
+ <class-decl name='drr_write_byref' size-in-bits='832' is-struct='yes' visibility='default' id='type-id-46'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='drr_object' type-id='type-id-23' visibility='default'/>
+ <var-decl name='drr_object' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='drr_offset' type-id='type-id-23' visibility='default'/>
+ <var-decl name='drr_offset' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='drr_length' type-id='type-id-23' visibility='default'/>
+ <var-decl name='drr_length' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='drr_toguid' type-id='type-id-23' visibility='default'/>
+ <var-decl name='drr_toguid' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='drr_refguid' type-id='type-id-23' visibility='default'/>
+ <var-decl name='drr_refguid' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='drr_refobject' type-id='type-id-23' visibility='default'/>
+ <var-decl name='drr_refobject' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='drr_refoffset' type-id='type-id-23' visibility='default'/>
+ <var-decl name='drr_refoffset' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='drr_checksumtype' type-id='type-id-32' visibility='default'/>
+ <var-decl name='drr_checksumtype' type-id='type-id-33' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='456'>
- <var-decl name='drr_flags' type-id='type-id-32' visibility='default'/>
+ <var-decl name='drr_flags' type-id='type-id-33' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='464'>
- <var-decl name='drr_pad2' type-id='type-id-105' visibility='default'/>
+ <var-decl name='drr_pad2' type-id='type-id-72' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='drr_key' type-id='type-id-97' visibility='default'/>
+ <var-decl name='drr_key' type-id='type-id-64' visibility='default'/>
</data-member>
</class-decl>
- <array-type-def dimensions='1' type-id='type-id-32' size-in-bits='48' id='type-id-105'>
- <subrange length='6' type-id='type-id-12' id='type-id-106'/>
+ <array-type-def dimensions='1' type-id='type-id-33' size-in-bits='48' id='type-id-72'>
+ <subrange length='6' type-id='type-id-12' id='type-id-73'/>
</array-type-def>
- <class-decl name='drr_spill' size-in-bits='640' is-struct='yes' visibility='default' id='type-id-84'>
+ <class-decl name='drr_spill' size-in-bits='640' is-struct='yes' visibility='default' id='type-id-47'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='drr_object' type-id='type-id-23' visibility='default'/>
+ <var-decl name='drr_object' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='drr_length' type-id='type-id-23' visibility='default'/>
+ <var-decl name='drr_length' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='drr_toguid' type-id='type-id-23' visibility='default'/>
+ <var-decl name='drr_toguid' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='drr_flags' type-id='type-id-32' visibility='default'/>
+ <var-decl name='drr_flags' type-id='type-id-33' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='200'>
- <var-decl name='drr_compressiontype' type-id='type-id-32' visibility='default'/>
+ <var-decl name='drr_compressiontype' type-id='type-id-33' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='208'>
- <var-decl name='drr_pad' type-id='type-id-105' visibility='default'/>
+ <var-decl name='drr_pad' type-id='type-id-72' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='drr_compressed_size' type-id='type-id-23' visibility='default'/>
+ <var-decl name='drr_compressed_size' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='drr_salt' type-id='type-id-98' visibility='default'/>
+ <var-decl name='drr_salt' type-id='type-id-65' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='drr_iv' type-id='type-id-99' visibility='default'/>
+ <var-decl name='drr_iv' type-id='type-id-66' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='480'>
- <var-decl name='drr_mac' type-id='type-id-100' visibility='default'/>
+ <var-decl name='drr_mac' type-id='type-id-67' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='608'>
- <var-decl name='drr_type' type-id='type-id-93' visibility='default'/>
+ <var-decl name='drr_type' type-id='type-id-60' visibility='default'/>
</data-member>
</class-decl>
- <class-decl name='drr_write_embedded' size-in-bits='384' is-struct='yes' visibility='default' id='type-id-85'>
+ <class-decl name='drr_write_embedded' size-in-bits='384' is-struct='yes' visibility='default' id='type-id-48'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='drr_object' type-id='type-id-23' visibility='default'/>
+ <var-decl name='drr_object' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='drr_offset' type-id='type-id-23' visibility='default'/>
+ <var-decl name='drr_offset' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='drr_length' type-id='type-id-23' visibility='default'/>
+ <var-decl name='drr_length' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='drr_toguid' type-id='type-id-23' visibility='default'/>
+ <var-decl name='drr_toguid' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='drr_compression' type-id='type-id-32' visibility='default'/>
+ <var-decl name='drr_compression' type-id='type-id-33' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='264'>
- <var-decl name='drr_etype' type-id='type-id-32' visibility='default'/>
+ <var-decl name='drr_etype' type-id='type-id-33' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='272'>
- <var-decl name='drr_pad' type-id='type-id-105' visibility='default'/>
+ <var-decl name='drr_pad' type-id='type-id-72' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='drr_lsize' type-id='type-id-22' visibility='default'/>
+ <var-decl name='drr_lsize' type-id='type-id-7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='352'>
- <var-decl name='drr_psize' type-id='type-id-22' visibility='default'/>
+ <var-decl name='drr_psize' type-id='type-id-7' visibility='default'/>
</data-member>
</class-decl>
- <class-decl name='drr_object_range' size-in-bits='512' is-struct='yes' visibility='default' id='type-id-86'>
+ <class-decl name='drr_object_range' size-in-bits='512' is-struct='yes' visibility='default' id='type-id-49'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='drr_firstobj' type-id='type-id-23' visibility='default'/>
+ <var-decl name='drr_firstobj' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='drr_numslots' type-id='type-id-23' visibility='default'/>
+ <var-decl name='drr_numslots' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='drr_toguid' type-id='type-id-23' visibility='default'/>
+ <var-decl name='drr_toguid' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='drr_salt' type-id='type-id-98' visibility='default'/>
+ <var-decl name='drr_salt' type-id='type-id-65' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='drr_iv' type-id='type-id-99' visibility='default'/>
+ <var-decl name='drr_iv' type-id='type-id-66' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='352'>
- <var-decl name='drr_mac' type-id='type-id-100' visibility='default'/>
+ <var-decl name='drr_mac' type-id='type-id-67' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='480'>
- <var-decl name='drr_flags' type-id='type-id-32' visibility='default'/>
+ <var-decl name='drr_flags' type-id='type-id-33' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='488'>
- <var-decl name='drr_pad' type-id='type-id-48' visibility='default'/>
+ <var-decl name='drr_pad' type-id='type-id-74' visibility='default'/>
</data-member>
</class-decl>
- <class-decl name='drr_redact' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-87'>
+
+ <array-type-def dimensions='1' type-id='type-id-33' size-in-bits='24' id='type-id-74'>
+ <subrange length='3' type-id='type-id-12' id='type-id-75'/>
+
+ </array-type-def>
+ <class-decl name='drr_redact' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-50'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='drr_object' type-id='type-id-23' visibility='default'/>
+ <var-decl name='drr_object' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='drr_offset' type-id='type-id-23' visibility='default'/>
+ <var-decl name='drr_offset' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='drr_length' type-id='type-id-23' visibility='default'/>
+ <var-decl name='drr_length' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='drr_toguid' type-id='type-id-23' visibility='default'/>
+ <var-decl name='drr_toguid' type-id='type-id-8' visibility='default'/>
</data-member>
</class-decl>
- <class-decl name='drr_checksum' size-in-bits='2432' is-struct='yes' visibility='default' id='type-id-88'>
+ <class-decl name='drr_checksum' size-in-bits='2432' is-struct='yes' visibility='default' id='type-id-51'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='drr_pad' type-id='type-id-107' visibility='default'/>
+ <var-decl name='drr_pad' type-id='type-id-76' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2176'>
- <var-decl name='drr_checksum' type-id='type-id-89' visibility='default'/>
+ <var-decl name='drr_checksum' type-id='type-id-56' visibility='default'/>
</data-member>
</class-decl>
- <array-type-def dimensions='1' type-id='type-id-23' size-in-bits='2176' id='type-id-107'>
- <subrange length='34' type-id='type-id-12' id='type-id-108'/>
+ <array-type-def dimensions='1' type-id='type-id-8' size-in-bits='2176' id='type-id-76'>
+ <subrange length='34' type-id='type-id-12' id='type-id-77'/>
</array-type-def>
- <typedef-decl name='dmu_replay_record_t' type-id='type-id-75' id='type-id-109'/>
- <qualified-type-def type-id='type-id-109' const='yes' id='type-id-110'/>
- <pointer-type-def type-id='type-id-110' size-in-bits='64' id='type-id-111'/>
- <function-decl name='lzc_receive_with_header' mangled-name='lzc_receive_with_header' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_receive_with_header'>
- <parameter type-id='type-id-16' name='snapname'/>
- <parameter type-id='type-id-29' name='props'/>
- <parameter type-id='type-id-16' name='origin'/>
- <parameter type-id='type-id-41' name='force'/>
- <parameter type-id='type-id-41' name='resumable'/>
- <parameter type-id='type-id-41' name='raw'/>
- <parameter type-id='type-id-1' name='fd'/>
- <parameter type-id='type-id-111' name='begin_record'/>
+ <typedef-decl name='dmu_replay_record_t' type-id='type-id-37' id='type-id-78'/>
+ <qualified-type-def type-id='type-id-78' const='yes' id='type-id-79'/>
+ <pointer-type-def type-id='type-id-79' size-in-bits='64' id='type-id-80'/>
+ <pointer-type-def type-id='type-id-8' size-in-bits='64' id='type-id-81'/>
+ <function-decl name='lzc_receive_with_cmdprops' mangled-name='lzc_receive_with_cmdprops' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_receive_with_cmdprops'>
+ <parameter type-id='type-id-4' name='snapname'/>
+ <parameter type-id='type-id-15' name='props'/>
+ <parameter type-id='type-id-15' name='cmdprops'/>
+ <parameter type-id='type-id-34' name='wkeydata'/>
+ <parameter type-id='type-id-35' name='wkeylen'/>
+ <parameter type-id='type-id-4' name='origin'/>
+ <parameter type-id='type-id-23' name='force'/>
+ <parameter type-id='type-id-23' name='resumable'/>
+ <parameter type-id='type-id-23' name='raw'/>
+ <parameter type-id='type-id-1' name='input_fd'/>
+ <parameter type-id='type-id-80' name='begin_record'/>
+ <parameter type-id='type-id-1' name='cleanup_fd'/>
+ <parameter type-id='type-id-81' name='read_bytes'/>
+ <parameter type-id='type-id-81' name='errflags'/>
+ <parameter type-id='type-id-81' name='action_handle'/>
+ <parameter type-id='type-id-16' name='errors'/>
<return type-id='type-id-1'/>
</function-decl>
<function-decl name='lzc_receive_one' mangled-name='lzc_receive_one' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_receive_one'>
- <parameter type-id='type-id-16' name='snapname'/>
- <parameter type-id='type-id-29' name='props'/>
- <parameter type-id='type-id-16' name='origin'/>
- <parameter type-id='type-id-41' name='force'/>
- <parameter type-id='type-id-41' name='resumable'/>
- <parameter type-id='type-id-41' name='raw'/>
+ <parameter type-id='type-id-4' name='snapname'/>
+ <parameter type-id='type-id-15' name='props'/>
+ <parameter type-id='type-id-4' name='origin'/>
+ <parameter type-id='type-id-23' name='force'/>
+ <parameter type-id='type-id-23' name='resumable'/>
+ <parameter type-id='type-id-23' name='raw'/>
<parameter type-id='type-id-1' name='input_fd'/>
- <parameter type-id='type-id-111' name='begin_record'/>
+ <parameter type-id='type-id-80' name='begin_record'/>
<parameter type-id='type-id-1' name='cleanup_fd'/>
- <parameter type-id='type-id-71' name='read_bytes'/>
- <parameter type-id='type-id-71' name='errflags'/>
- <parameter type-id='type-id-71' name='action_handle'/>
- <parameter type-id='type-id-64' name='errors'/>
+ <parameter type-id='type-id-81' name='read_bytes'/>
+ <parameter type-id='type-id-81' name='errflags'/>
+ <parameter type-id='type-id-81' name='action_handle'/>
+ <parameter type-id='type-id-16' name='errors'/>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='lzc_receive_with_header' mangled-name='lzc_receive_with_header' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_receive_with_header'>
+ <parameter type-id='type-id-4' name='snapname'/>
+ <parameter type-id='type-id-15' name='props'/>
+ <parameter type-id='type-id-4' name='origin'/>
+ <parameter type-id='type-id-23' name='force'/>
+ <parameter type-id='type-id-23' name='resumable'/>
+ <parameter type-id='type-id-23' name='raw'/>
+ <parameter type-id='type-id-1' name='fd'/>
+ <parameter type-id='type-id-80' name='begin_record'/>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='lzc_receive_resumable' mangled-name='lzc_receive_resumable' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_receive_resumable'>
+ <parameter type-id='type-id-4' name='snapname'/>
+ <parameter type-id='type-id-15' name='props'/>
+ <parameter type-id='type-id-4' name='origin'/>
+ <parameter type-id='type-id-23' name='force'/>
+ <parameter type-id='type-id-23' name='raw'/>
+ <parameter type-id='type-id-1' name='fd'/>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='lzc_receive' mangled-name='lzc_receive' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_receive'>
+ <parameter type-id='type-id-4' name='snapname'/>
+ <parameter type-id='type-id-15' name='props'/>
+ <parameter type-id='type-id-4' name='origin'/>
+ <parameter type-id='type-id-23' name='force'/>
+ <parameter type-id='type-id-23' name='raw'/>
+ <parameter type-id='type-id-1' name='fd'/>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <enum-decl name='lzc_send_flags' id='type-id-82'>
+ <underlying-type type-id='type-id-19'/>
+ <enumerator name='LZC_SEND_FLAG_EMBED_DATA' value='1'/>
+ <enumerator name='LZC_SEND_FLAG_LARGE_BLOCK' value='2'/>
+ <enumerator name='LZC_SEND_FLAG_COMPRESS' value='4'/>
+ <enumerator name='LZC_SEND_FLAG_RAW' value='8'/>
+ <enumerator name='LZC_SEND_FLAG_SAVED' value='16'/>
+ </enum-decl>
+ <function-decl name='lzc_send_space' mangled-name='lzc_send_space' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_send_space'>
+ <parameter type-id='type-id-4' name='snapname'/>
+ <parameter type-id='type-id-4' name='from'/>
+ <parameter type-id='type-id-82' name='flags'/>
+ <parameter type-id='type-id-81' name='spacep'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_receive_with_cmdprops' mangled-name='lzc_receive_with_cmdprops' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_receive_with_cmdprops'>
- <parameter type-id='type-id-16' name='snapname'/>
- <parameter type-id='type-id-29' name='props'/>
- <parameter type-id='type-id-29' name='cmdprops'/>
- <parameter type-id='type-id-33' name='wkeydata'/>
- <parameter type-id='type-id-34' name='wkeylen'/>
- <parameter type-id='type-id-16' name='origin'/>
- <parameter type-id='type-id-41' name='force'/>
- <parameter type-id='type-id-41' name='resumable'/>
- <parameter type-id='type-id-41' name='raw'/>
- <parameter type-id='type-id-1' name='input_fd'/>
- <parameter type-id='type-id-111' name='begin_record'/>
- <parameter type-id='type-id-1' name='cleanup_fd'/>
- <parameter type-id='type-id-71' name='read_bytes'/>
- <parameter type-id='type-id-71' name='errflags'/>
- <parameter type-id='type-id-71' name='action_handle'/>
- <parameter type-id='type-id-64' name='errors'/>
+ <function-decl name='lzc_send_space_resume_redacted' mangled-name='lzc_send_space_resume_redacted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_send_space_resume_redacted'>
+ <parameter type-id='type-id-4' name='snapname'/>
+ <parameter type-id='type-id-4' name='from'/>
+ <parameter type-id='type-id-82' name='flags'/>
+ <parameter type-id='type-id-8' name='resumeobj'/>
+ <parameter type-id='type-id-8' name='resumeoff'/>
+ <parameter type-id='type-id-8' name='resume_bytes'/>
+ <parameter type-id='type-id-4' name='redactbook'/>
+ <parameter type-id='type-id-1' name='fd'/>
+ <parameter type-id='type-id-81' name='spacep'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_rollback' mangled-name='lzc_rollback' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_rollback'>
- <parameter type-id='type-id-16' name='fsname'/>
- <parameter type-id='type-id-37' name='snapnamebuf'/>
- <parameter type-id='type-id-1' name='snapnamelen'/>
+ <function-decl name='lzc_send_resume_redacted' mangled-name='lzc_send_resume_redacted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_send_resume_redacted'>
+ <parameter type-id='type-id-4' name='snapname'/>
+ <parameter type-id='type-id-4' name='from'/>
+ <parameter type-id='type-id-1' name='fd'/>
+ <parameter type-id='type-id-82' name='flags'/>
+ <parameter type-id='type-id-8' name='resumeobj'/>
+ <parameter type-id='type-id-8' name='resumeoff'/>
+ <parameter type-id='type-id-4' name='redactbook'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='fnvlist_lookup_string' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-16'/>
- <return type-id='type-id-37'/>
+ <function-decl name='lzc_send_resume' mangled-name='lzc_send_resume' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_send_resume'>
+ <parameter type-id='type-id-4' name='snapname'/>
+ <parameter type-id='type-id-4' name='from'/>
+ <parameter type-id='type-id-1' name='fd'/>
+ <parameter type-id='type-id-82' name='flags'/>
+ <parameter type-id='type-id-8' name='resumeobj'/>
+ <parameter type-id='type-id-8' name='resumeoff'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_rollback_to' mangled-name='lzc_rollback_to' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_rollback_to'>
- <parameter type-id='type-id-16' name='fsname'/>
- <parameter type-id='type-id-16' name='snapname'/>
+ <function-decl name='lzc_send_redacted' mangled-name='lzc_send_redacted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_send_redacted'>
+ <parameter type-id='type-id-4' name='snapname'/>
+ <parameter type-id='type-id-4' name='from'/>
+ <parameter type-id='type-id-1' name='fd'/>
+ <parameter type-id='type-id-82' name='flags'/>
+ <parameter type-id='type-id-4' name='redactbook'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_bookmark' mangled-name='lzc_bookmark' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_bookmark'>
- <parameter type-id='type-id-29' name='bookmarks'/>
- <parameter type-id='type-id-64' name='errlist'/>
+ <function-decl name='lzc_send' mangled-name='lzc_send' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_send'>
+ <parameter type-id='type-id-4' name='snapname'/>
+ <parameter type-id='type-id-4' name='from'/>
+ <parameter type-id='type-id-1' name='fd'/>
+ <parameter type-id='type-id-82' name='flags'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_get_bookmarks' mangled-name='lzc_get_bookmarks' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_get_bookmarks'>
- <parameter type-id='type-id-16' name='pool_name'/>
- <parameter type-id='type-id-29' name='innvl'/>
- <parameter type-id='type-id-64' name='outnvl'/>
+ <function-decl name='lzc_get_holds' mangled-name='lzc_get_holds' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_get_holds'>
+ <parameter type-id='type-id-4' name='pool'/>
+ <parameter type-id='type-id-16' name='outnvl'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_get_bookmark_props' mangled-name='lzc_get_bookmark_props' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_get_bookmark_props'>
- <parameter type-id='type-id-16' name='bookmark'/>
- <parameter type-id='type-id-64' name='props'/>
+ <function-decl name='lzc_release' mangled-name='lzc_release' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_release'>
+ <parameter type-id='type-id-15' name='holds'/>
+ <parameter type-id='type-id-16' name='errlist'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_destroy_bookmarks' mangled-name='lzc_destroy_bookmarks' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_destroy_bookmarks'>
- <parameter type-id='type-id-29' name='bookmarks'/>
- <parameter type-id='type-id-64' name='errlist'/>
+ <function-decl name='lzc_hold' mangled-name='lzc_hold' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_hold'>
+ <parameter type-id='type-id-15' name='holds'/>
+ <parameter type-id='type-id-1' name='cleanup_fd'/>
+ <parameter type-id='type-id-16' name='errlist'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_channel_program' mangled-name='lzc_channel_program' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_channel_program'>
- <parameter type-id='type-id-16' name='pool'/>
- <parameter type-id='type-id-16' name='program'/>
- <parameter type-id='type-id-23' name='instrlimit'/>
- <parameter type-id='type-id-23' name='memlimit'/>
- <parameter type-id='type-id-29' name='argnvl'/>
- <parameter type-id='type-id-64' name='outnvl'/>
+ <function-decl name='lzc_sync' mangled-name='lzc_sync' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_sync'>
+ <parameter type-id='type-id-4' name='fsname'/>
+ <parameter type-id='type-id-15' name='props'/>
+ <parameter type-id='type-id-16' name='bmarks'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='fnvlist_add_boolean_value' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-16'/>
- <parameter type-id='type-id-51'/>
- <return type-id='type-id-17'/>
+ <function-decl name='lzc_exists' mangled-name='lzc_exists' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_exists'>
+ <parameter type-id='type-id-4' name='dataset'/>
+ <return type-id='type-id-23'/>
</function-decl>
- <function-decl name='lzc_pool_checkpoint' mangled-name='lzc_pool_checkpoint' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_pool_checkpoint'>
- <parameter type-id='type-id-16' name='pool'/>
+ <function-decl name='lzc_snaprange_space' mangled-name='lzc_snaprange_space' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_snaprange_space'>
+ <parameter type-id='type-id-4' name='firstsnap'/>
+ <parameter type-id='type-id-4' name='lastsnap'/>
+ <parameter type-id='type-id-81' name='usedp'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_pool_checkpoint_discard' mangled-name='lzc_pool_checkpoint_discard' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_pool_checkpoint_discard'>
- <parameter type-id='type-id-16' name='pool'/>
+ <function-decl name='lzc_destroy_snaps' mangled-name='lzc_destroy_snaps' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_destroy_snaps'>
+ <parameter type-id='type-id-15' name='snaps'/>
+ <parameter type-id='type-id-23' name='defer'/>
+ <parameter type-id='type-id-16' name='errlist'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_channel_program_nosync' mangled-name='lzc_channel_program_nosync' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_channel_program_nosync'>
- <parameter type-id='type-id-16' name='pool'/>
- <parameter type-id='type-id-16' name='program'/>
- <parameter type-id='type-id-23' name='instrlimit'/>
- <parameter type-id='type-id-23' name='memlimit'/>
- <parameter type-id='type-id-29' name='argnvl'/>
- <parameter type-id='type-id-64' name='outnvl'/>
+ <function-decl name='lzc_snapshot' mangled-name='lzc_snapshot' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_snapshot'>
+ <parameter type-id='type-id-15' name='snaps'/>
+ <parameter type-id='type-id-15' name='props'/>
+ <parameter type-id='type-id-16' name='errlist'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_load_key' mangled-name='lzc_load_key' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_load_key'>
- <parameter type-id='type-id-16' name='fsname'/>
- <parameter type-id='type-id-41' name='noop'/>
- <parameter type-id='type-id-33' name='wkeydata'/>
- <parameter type-id='type-id-34' name='wkeylen'/>
+ <function-decl name='lzc_destroy' mangled-name='lzc_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_destroy'>
+ <parameter type-id='type-id-4' name='fsname'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_unload_key' mangled-name='lzc_unload_key' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_unload_key'>
- <parameter type-id='type-id-16' name='fsname'/>
+ <function-decl name='lzc_rename' mangled-name='lzc_rename' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_rename'>
+ <parameter type-id='type-id-4' name='source'/>
+ <parameter type-id='type-id-4' name='target'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_change_key' mangled-name='lzc_change_key' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_change_key'>
- <parameter type-id='type-id-16' name='fsname'/>
- <parameter type-id='type-id-23' name='crypt_cmd'/>
- <parameter type-id='type-id-29' name='props'/>
- <parameter type-id='type-id-33' name='wkeydata'/>
- <parameter type-id='type-id-34' name='wkeylen'/>
+ <function-decl name='lzc_promote' mangled-name='lzc_promote' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_promote'>
+ <parameter type-id='type-id-4' name='fsname'/>
+ <parameter type-id='type-id-36' name='snapnamebuf'/>
+ <parameter type-id='type-id-1' name='snapnamelen'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='lzc_reopen' mangled-name='lzc_reopen' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_reopen'>
- <parameter type-id='type-id-16' name='pool_name'/>
- <parameter type-id='type-id-41' name='scrub_restart'/>
+ <function-decl name='lzc_clone' mangled-name='lzc_clone' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_clone'>
+ <parameter type-id='type-id-4' name='fsname'/>
+ <parameter type-id='type-id-4' name='origin'/>
+ <parameter type-id='type-id-15' name='props'/>
<return type-id='type-id-1'/>
</function-decl>
- <enum-decl name='pool_initialize_func' id='type-id-112'>
- <underlying-type type-id='type-id-18'/>
- <enumerator name='POOL_INITIALIZE_START' value='0'/>
- <enumerator name='POOL_INITIALIZE_CANCEL' value='1'/>
- <enumerator name='POOL_INITIALIZE_SUSPEND' value='2'/>
- <enumerator name='POOL_INITIALIZE_FUNCS' value='3'/>
+ <enum-decl name='lzc_dataset_type' id='type-id-83'>
+ <underlying-type type-id='type-id-19'/>
+ <enumerator name='LZC_DATSET_TYPE_ZFS' value='2'/>
+ <enumerator name='LZC_DATSET_TYPE_ZVOL' value='3'/>
</enum-decl>
- <typedef-decl name='pool_initialize_func_t' type-id='type-id-112' id='type-id-113'/>
- <function-decl name='lzc_initialize' mangled-name='lzc_initialize' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_initialize'>
- <parameter type-id='type-id-16' name='poolname'/>
- <parameter type-id='type-id-113' name='cmd_type'/>
- <parameter type-id='type-id-29' name='vdevs'/>
- <parameter type-id='type-id-64' name='errlist'/>
+ <function-decl name='lzc_create' mangled-name='lzc_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_create'>
+ <parameter type-id='type-id-4' name='fsname'/>
+ <parameter type-id='type-id-83' name='type'/>
+ <parameter type-id='type-id-15' name='props'/>
+ <parameter type-id='type-id-34' name='wkeydata'/>
+ <parameter type-id='type-id-35' name='wkeylen'/>
<return type-id='type-id-1'/>
</function-decl>
- <enum-decl name='pool_trim_func' id='type-id-114'>
- <underlying-type type-id='type-id-18'/>
- <enumerator name='POOL_TRIM_START' value='0'/>
- <enumerator name='POOL_TRIM_CANCEL' value='1'/>
- <enumerator name='POOL_TRIM_SUSPEND' value='2'/>
- <enumerator name='POOL_TRIM_FUNCS' value='3'/>
- </enum-decl>
- <typedef-decl name='pool_trim_func_t' type-id='type-id-114' id='type-id-115'/>
- <function-decl name='lzc_trim' mangled-name='lzc_trim' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_trim'>
- <parameter type-id='type-id-16' name='poolname'/>
- <parameter type-id='type-id-115' name='cmd_type'/>
- <parameter type-id='type-id-23' name='rate'/>
- <parameter type-id='type-id-41' name='secure'/>
- <parameter type-id='type-id-29' name='vdevs'/>
- <parameter type-id='type-id-64' name='errlist'/>
- <return type-id='type-id-1'/>
+ <type-decl name='void' id='type-id-84'/>
+ <function-decl name='libzfs_core_fini' mangled-name='libzfs_core_fini' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_core_fini'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='lzc_redact' mangled-name='lzc_redact' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_redact'>
- <parameter type-id='type-id-16' name='snapshot'/>
- <parameter type-id='type-id-16' name='bookname'/>
- <parameter type-id='type-id-29' name='snapnv'/>
+ <function-decl name='libzfs_core_init' mangled-name='libzfs_core_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_core_init'>
<return type-id='type-id-1'/>
</function-decl>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-116'>
- <underlying-type type-id='type-id-18'/>
- <enumerator name='ZPOOL_WAIT_CKPT_DISCARD' value='0'/>
- <enumerator name='ZPOOL_WAIT_FREE' value='1'/>
- <enumerator name='ZPOOL_WAIT_INITIALIZE' value='2'/>
- <enumerator name='ZPOOL_WAIT_REPLACE' value='3'/>
- <enumerator name='ZPOOL_WAIT_REMOVE' value='4'/>
- <enumerator name='ZPOOL_WAIT_RESILVER' value='5'/>
- <enumerator name='ZPOOL_WAIT_SCRUB' value='6'/>
- <enumerator name='ZPOOL_WAIT_TRIM' value='7'/>
- <enumerator name='ZPOOL_WAIT_NUM_ACTIVITIES' value='8'/>
- </enum-decl>
- <typedef-decl name='zpool_wait_activity_t' type-id='type-id-116' id='type-id-117'/>
- <pointer-type-def type-id='type-id-41' size-in-bits='64' id='type-id-118'/>
- <function-decl name='lzc_wait' mangled-name='lzc_wait' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_wait'>
- <parameter type-id='type-id-16' name='pool'/>
- <parameter type-id='type-id-117' name='activity'/>
- <parameter type-id='type-id-118' name='waited'/>
- <return type-id='type-id-1'/>
+ <function-decl name='fnvlist_alloc' mangled-name='fnvlist_alloc' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='fnvlist_lookup_boolean_value' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-16'/>
- <return type-id='type-id-51'/>
+ <function-decl name='fnvlist_add_int32' mangled-name='fnvlist_add_int32' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='lzc_wait_tag' mangled-name='lzc_wait_tag' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_wait_tag'>
- <parameter type-id='type-id-16' name='pool'/>
- <parameter type-id='type-id-117' name='activity'/>
- <parameter type-id='type-id-23' name='tag'/>
- <parameter type-id='type-id-118' name='waited'/>
- <return type-id='type-id-1'/>
+ <function-decl name='fnvlist_lookup_boolean_value' mangled-name='fnvlist_lookup_boolean_value' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-119'>
- <underlying-type type-id='type-id-18'/>
- <enumerator name='ZFS_WAIT_DELETEQ' value='0'/>
- <enumerator name='ZFS_WAIT_NUM_ACTIVITIES' value='1'/>
- </enum-decl>
- <typedef-decl name='zfs_wait_activity_t' type-id='type-id-119' id='type-id-120'/>
- <function-decl name='lzc_wait_fs' mangled-name='lzc_wait_fs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_wait_fs'>
- <parameter type-id='type-id-16' name='fs'/>
- <parameter type-id='type-id-120' name='activity'/>
- <parameter type-id='type-id-118' name='waited'/>
- <return type-id='type-id-1'/>
+ <function-decl name='fnvlist_free' mangled-name='fnvlist_free' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <qualified-type-def type-id='type-id-28' const='yes' id='type-id-121'/>
- <pointer-type-def type-id='type-id-121' size-in-bits='64' id='type-id-122'/>
- <function-decl name='lzc_set_bootenv' mangled-name='lzc_set_bootenv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_set_bootenv'>
- <parameter type-id='type-id-16' name='pool'/>
- <parameter type-id='type-id-122' name='env'/>
- <return type-id='type-id-1'/>
+ <function-decl name='__stack_chk_fail' mangled-name='__stack_chk_fail' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='lzc_get_bootenv' mangled-name='lzc_get_bootenv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_get_bootenv'>
- <parameter type-id='type-id-16' name='snapname'/>
- <parameter type-id='type-id-64' name='holdsp'/>
- <return type-id='type-id-1'/>
+ <function-decl name='fnvlist_add_uint64' mangled-name='fnvlist_add_uint64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='zutil_device_path.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libzutil' language='LANG_C99'>
- <function-decl name='zfs_basename' mangled-name='zfs_basename' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_basename'>
- <parameter type-id='type-id-16' name='path'/>
- <return type-id='type-id-16'/>
+ <function-decl name='fnvlist_add_string' mangled-name='fnvlist_add_string' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <typedef-decl name='__ssize_t' type-id='type-id-5' id='type-id-123'/>
- <typedef-decl name='ssize_t' type-id='type-id-123' id='type-id-124'/>
- <function-decl name='zfs_dirnamelen' mangled-name='zfs_dirnamelen' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_dirnamelen'>
- <parameter type-id='type-id-16' name='path'/>
- <return type-id='type-id-124'/>
+ <function-decl name='fnvlist_add_nvlist' mangled-name='fnvlist_add_nvlist' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <typedef-decl name='size_t' type-id='type-id-26' id='type-id-125'/>
- <function-decl name='zfs_resolve_shortname' mangled-name='zfs_resolve_shortname' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_resolve_shortname'>
- <parameter type-id='type-id-16' name='name'/>
- <parameter type-id='type-id-37' name='path'/>
- <parameter type-id='type-id-125' name='len'/>
- <return type-id='type-id-1'/>
+ <function-decl name='fnvlist_add_boolean_value' mangled-name='fnvlist_add_boolean_value' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='getenv' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-16'/>
- <return type-id='type-id-37'/>
+ <function-decl name='nvlist_free' mangled-name='nvlist_free' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <pointer-type-def type-id='type-id-37' size-in-bits='64' id='type-id-126'/>
- <function-decl name='strtok_r' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-37'/>
- <parameter type-id='type-id-16'/>
- <parameter type-id='type-id-126'/>
- <return type-id='type-id-37'/>
+ <function-decl name='fnvlist_add_uint8_array' mangled-name='fnvlist_add_uint8_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='access' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-16'/>
- <parameter type-id='type-id-1'/>
- <return type-id='type-id-1'/>
+ <function-decl name='fnvlist_add_boolean' mangled-name='fnvlist_add_boolean' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <qualified-type-def type-id='type-id-16' const='yes' id='type-id-127'/>
- <pointer-type-def type-id='type-id-127' size-in-bits='64' id='type-id-128'/>
- <function-decl name='zpool_default_search_paths' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-38'/>
- <return type-id='type-id-128'/>
+ <function-decl name='nvlist_next_nvpair' mangled-name='nvlist_next_nvpair' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='zfs_strcmp_pathname' mangled-name='zfs_strcmp_pathname' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_strcmp_pathname'>
- <parameter type-id='type-id-16' name='name'/>
- <parameter type-id='type-id-16' name='cmp'/>
- <parameter type-id='type-id-1' name='wholedisk'/>
- <return type-id='type-id-1'/>
+ <function-decl name='nvpair_name' mangled-name='nvpair_name' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='strlcat' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-37'/>
- <parameter type-id='type-id-16'/>
- <parameter type-id='type-id-26'/>
- <return type-id='type-id-26'/>
+ <function-decl name='strlcpy' mangled-name='strlcpy' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='zfs_append_partition' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-37'/>
- <parameter type-id='type-id-26'/>
- <return type-id='type-id-1'/>
+ <function-decl name='strcspn' mangled-name='strcspn' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='fnvlist_lookup_string' mangled-name='fnvlist_lookup_string' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='libspl_assertf' mangled-name='libspl_assertf' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='strrchr' mangled-name='strrchr' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_nvlist' mangled-name='nvlist_lookup_nvlist' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='fnvlist_dup' mangled-name='fnvlist_dup' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='fnvlist_add_byte_array' mangled-name='fnvlist_add_byte_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_uint64' mangled-name='nvlist_lookup_uint64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='nvlist_unpack' mangled-name='nvlist_unpack' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='__builtin_memset' mangled-name='memset' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='fnvlist_pack' mangled-name='fnvlist_pack' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='malloc' mangled-name='malloc' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='zfs_ioctl_fd' mangled-name='zfs_ioctl_fd' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='__errno_location' mangled-name='__errno_location' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='fnvlist_pack_free' mangled-name='fnvlist_pack_free' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='free' mangled-name='free' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='__read_alias' mangled-name='read' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='strchr' mangled-name='strchr' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='fnvlist_lookup_uint64' mangled-name='fnvlist_lookup_uint64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='fnvlist_unpack' mangled-name='fnvlist_unpack' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='pthread_mutex_lock' mangled-name='pthread_mutex_lock' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='close' mangled-name='close' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='pthread_mutex_unlock' mangled-name='pthread_mutex_unlock' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='__open_alias' mangled-name='open64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='zutil_import.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libzutil' language='LANG_C99'>
- <pointer-type-def type-id='type-id-1' size-in-bits='64' id='type-id-129'/>
- <function-decl name='zpool_read_label' mangled-name='zpool_read_label' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_read_label'>
- <parameter type-id='type-id-1' name='fd'/>
- <parameter type-id='type-id-64' name='config'/>
- <parameter type-id='type-id-129' name='num_labels'/>
+ <abi-instr version='1.0' address-size='64' path='zutil_device_path.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzutil' language='LANG_C99'>
+ <function-decl name='zfs_strcmp_pathname' mangled-name='zfs_strcmp_pathname' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_strcmp_pathname'>
+ <parameter type-id='type-id-4' name='name'/>
+ <parameter type-id='type-id-4' name='cmp'/>
+ <parameter type-id='type-id-1' name='wholedisk'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='ioctl' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-1'/>
- <parameter type-id='type-id-26'/>
- <parameter is-variadic='yes'/>
+ <typedef-decl name='size_t' type-id='type-id-12' id='type-id-85'/>
+ <function-decl name='zfs_resolve_shortname' mangled-name='zfs_resolve_shortname' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_resolve_shortname'>
+ <parameter type-id='type-id-4' name='name'/>
+ <parameter type-id='type-id-36' name='path'/>
+ <parameter type-id='type-id-85' name='len'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='spl_pagesize' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-26'/>
+ <type-decl name='long int' size-in-bits='64' id='type-id-86'/>
+ <typedef-decl name='__ssize_t' type-id='type-id-86' id='type-id-87'/>
+ <typedef-decl name='ssize_t' type-id='type-id-87' id='type-id-88'/>
+ <function-decl name='zfs_dirnamelen' mangled-name='zfs_dirnamelen' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_dirnamelen'>
+ <parameter type-id='type-id-4' name='path'/>
+ <return type-id='type-id-88'/>
</function-decl>
- <pointer-type-def type-id='type-id-73' size-in-bits='64' id='type-id-130'/>
- <function-decl name='posix_memalign' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-130'/>
- <parameter type-id='type-id-26'/>
- <parameter type-id='type-id-26'/>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_basename' mangled-name='zfs_basename' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_basename'>
+ <parameter type-id='type-id-4' name='path'/>
+ <return type-id='type-id-4'/>
+ </function-decl>
+ <function-decl name='__builtin___snprintf_chk' mangled-name='__snprintf_chk' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <class-decl name='aiocb' size-in-bits='1344' is-struct='yes' visibility='default' id='type-id-131'>
+ <function-decl name='getenv' mangled-name='getenv' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='strdup' mangled-name='strdup' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='strtok_r' mangled-name='strtok_r' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='strlen' mangled-name='strlen' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='strcmp' mangled-name='strcmp' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='zfs_append_partition' mangled-name='zfs_append_partition' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='zpool_default_search_paths' mangled-name='zpool_default_search_paths' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='strlcat' mangled-name='strlcat' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='access' mangled-name='access' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='zutil_import.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzutil' language='LANG_C99'>
+ <pointer-type-def type-id='type-id-84' size-in-bits='64' id='type-id-89'/>
+ <class-decl name='importargs' size-in-bits='448' is-struct='yes' visibility='default' id='type-id-90'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='aio_fildes' type-id='type-id-1' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='aio_lio_opcode' type-id='type-id-1' visibility='default'/>
+ <var-decl name='path' type-id='type-id-91' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='aio_reqprio' type-id='type-id-1' visibility='default'/>
+ <var-decl name='paths' type-id='type-id-1' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='aio_buf' type-id='type-id-132' visibility='default'/>
+ <var-decl name='poolname' type-id='type-id-4' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='aio_nbytes' type-id='type-id-125' visibility='default'/>
+ <var-decl name='guid' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='aio_sigevent' type-id='type-id-133' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='768'>
- <var-decl name='__next_prio' type-id='type-id-134' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='832'>
- <var-decl name='__abs_prio' type-id='type-id-1' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='864'>
- <var-decl name='__policy' type-id='type-id-1' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='896'>
- <var-decl name='__error_code' type-id='type-id-1' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='960'>
- <var-decl name='__return_value' type-id='type-id-123' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1024'>
- <var-decl name='aio_offset' type-id='type-id-135' visibility='default'/>
+ <var-decl name='cachefile' type-id='type-id-4' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='1088'>
- <var-decl name='__glibc_reserved' type-id='type-id-136' visibility='default'/>
- </data-member>
- </class-decl>
- <qualified-type-def type-id='type-id-17' volatile='yes' id='type-id-137'/>
- <pointer-type-def type-id='type-id-137' size-in-bits='64' id='type-id-132'/>
- <class-decl name='sigevent' size-in-bits='512' is-struct='yes' visibility='default' id='type-id-133'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='sigev_value' type-id='type-id-138' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='sigev_signo' type-id='type-id-1' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='320'>
+ <var-decl name='can_be_active' type-id='type-id-23' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='96'>
- <var-decl name='sigev_notify' type-id='type-id-1' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='352'>
+ <var-decl name='scan' type-id='type-id-23' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='_sigev_un' type-id='type-id-139' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='384'>
+ <var-decl name='policy' type-id='type-id-15' visibility='default'/>
</data-member>
</class-decl>
- <union-decl name='sigval' size-in-bits='64' visibility='default' id='type-id-140'>
- <data-member access='private'>
- <var-decl name='sival_int' type-id='type-id-1' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='sival_ptr' type-id='type-id-73' visibility='default'/>
- </data-member>
- </union-decl>
- <typedef-decl name='__sigval_t' type-id='type-id-140' id='type-id-138'/>
- <union-decl name='__anonymous_union__' size-in-bits='384' is-anonymous='yes' visibility='default' id='type-id-139'>
- <data-member access='private'>
- <var-decl name='_pad' type-id='type-id-141' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='_tid' type-id='type-id-142' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='_sigev_thread' type-id='type-id-143' visibility='default'/>
- </data-member>
- </union-decl>
-
- <array-type-def dimensions='1' type-id='type-id-1' size-in-bits='384' id='type-id-141'>
- <subrange length='12' type-id='type-id-12' id='type-id-103'/>
-
- </array-type-def>
- <typedef-decl name='__pid_t' type-id='type-id-1' id='type-id-142'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-143'>
+ <pointer-type-def type-id='type-id-36' size-in-bits='64' id='type-id-91'/>
+ <typedef-decl name='importargs_t' type-id='type-id-90' id='type-id-92'/>
+ <pointer-type-def type-id='type-id-92' size-in-bits='64' id='type-id-93'/>
+ <class-decl name='pool_config_ops' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-94'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='_function' type-id='type-id-144' visibility='default'/>
+ <var-decl name='pco_refresh_config' type-id='type-id-95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='_attribute' type-id='type-id-145' visibility='default'/>
+ <var-decl name='pco_pool_active' type-id='type-id-96' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-146' size-in-bits='64' id='type-id-144'/>
- <union-decl name='pthread_attr_t' size-in-bits='448' visibility='default' id='type-id-147'>
- <data-member access='private'>
- <var-decl name='__size' type-id='type-id-148' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='__align' type-id='type-id-5' visibility='default'/>
- </data-member>
- </union-decl>
-
- <array-type-def dimensions='1' type-id='type-id-11' size-in-bits='448' id='type-id-148'>
- <subrange length='56' type-id='type-id-12' id='type-id-149'/>
-
- </array-type-def>
- <typedef-decl name='pthread_attr_t' type-id='type-id-147' id='type-id-150'/>
- <pointer-type-def type-id='type-id-150' size-in-bits='64' id='type-id-145'/>
- <pointer-type-def type-id='type-id-131' size-in-bits='64' id='type-id-134'/>
- <typedef-decl name='__off64_t' type-id='type-id-5' id='type-id-135'/>
-
- <array-type-def dimensions='1' type-id='type-id-11' size-in-bits='256' id='type-id-136'>
- <subrange length='32' type-id='type-id-12' id='type-id-151'/>
-
- </array-type-def>
- <qualified-type-def type-id='type-id-134' const='yes' id='type-id-152'/>
- <pointer-type-def type-id='type-id-152' size-in-bits='64' id='type-id-153'/>
- <pointer-type-def type-id='type-id-133' size-in-bits='64' id='type-id-154'/>
- <function-decl name='lio_listio' mangled-name='lio_listio64' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-1'/>
- <parameter type-id='type-id-153'/>
- <parameter type-id='type-id-1'/>
- <parameter type-id='type-id-154'/>
+ <typedef-decl name='refresh_config_func_t' type-id='type-id-97' id='type-id-98'/>
+ <pointer-type-def type-id='type-id-98' size-in-bits='64' id='type-id-95'/>
+ <typedef-decl name='pool_active_func_t' type-id='type-id-99' id='type-id-100'/>
+ <pointer-type-def type-id='type-id-100' size-in-bits='64' id='type-id-96'/>
+ <qualified-type-def type-id='type-id-94' const='yes' id='type-id-101'/>
+ <typedef-decl name='pool_config_ops_t' type-id='type-id-101' id='type-id-102'/>
+ <pointer-type-def type-id='type-id-102' size-in-bits='64' id='type-id-103'/>
+ <function-decl name='zpool_find_config' mangled-name='zpool_find_config' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_find_config'>
+ <parameter type-id='type-id-89' name='hdl'/>
+ <parameter type-id='type-id-4' name='target'/>
+ <parameter type-id='type-id-16' name='configp'/>
+ <parameter type-id='type-id-93' name='args'/>
+ <parameter type-id='type-id-103' name='pco'/>
<return type-id='type-id-1'/>
</function-decl>
- <qualified-type-def type-id='type-id-131' const='yes' id='type-id-155'/>
- <pointer-type-def type-id='type-id-155' size-in-bits='64' id='type-id-156'/>
- <function-decl name='aio_error' mangled-name='aio_error64' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-156'/>
+ <function-decl name='zpool_search_import' mangled-name='zpool_search_import' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_search_import'>
+ <parameter type-id='type-id-89' name='hdl'/>
+ <parameter type-id='type-id-93' name='import'/>
+ <parameter type-id='type-id-103' name='pco'/>
+ <return type-id='type-id-15'/>
+ </function-decl>
+ <pointer-type-def type-id='type-id-1' size-in-bits='64' id='type-id-104'/>
+ <function-decl name='zpool_read_label' mangled-name='zpool_read_label' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_read_label'>
+ <parameter type-id='type-id-1' name='fd'/>
+ <parameter type-id='type-id-16' name='config'/>
+ <parameter type-id='type-id-104' name='num_labels'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='aio_return' mangled-name='aio_return64' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-134'/>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='pread64' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-1'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-26'/>
- <parameter type-id='type-id-5'/>
- <return type-id='type-id-5'/>
- </function-decl>
- <pointer-type-def type-id='type-id-74' size-in-bits='64' id='type-id-157'/>
- <pointer-type-def type-id='type-id-6' size-in-bits='64' id='type-id-158'/>
- <function-decl name='nvlist_lookup_nvlist_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-16'/>
- <parameter type-id='type-id-157'/>
- <parameter type-id='type-id-158'/>
+ <function-decl name='nvpair_value_nvlist' mangled-name='nvpair_value_nvlist' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='strtoull' mangled-name='strtoull' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_string' mangled-name='nvlist_lookup_string' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='strpbrk' mangled-name='strpbrk' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='__fxstat64' mangled-name='__fxstat64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='calloc' mangled-name='calloc' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='avl_create' mangled-name='avl_create' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='fnvlist_lookup_nvlist' mangled-name='fnvlist_lookup_nvlist' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='pthread_mutex_init' mangled-name='pthread_mutex_init' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='pthread_mutex_destroy' mangled-name='pthread_mutex_destroy' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='fnvpair_value_nvlist' mangled-name='fnvpair_value_nvlist' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='nvlist_alloc' mangled-name='nvlist_alloc' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='nvlist_add_string' mangled-name='nvlist_add_string' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='nvlist_add_nvlist' mangled-name='nvlist_add_nvlist' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='dcgettext' mangled-name='dcgettext' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='strerror' mangled-name='strerror' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='__realpath_chk' mangled-name='__realpath_chk' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='strndup' mangled-name='strndup' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='zfs_basename' mangled-name='zfs_basename' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='zfs_dirnamelen' mangled-name='zfs_dirnamelen' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='__xstat' mangled-name='__xstat64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='avl_destroy_nodes' mangled-name='avl_destroy_nodes' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='zpool_find_import_blkid' mangled-name='zpool_find_import_blkid' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='nvlist_empty' mangled-name='nvlist_empty' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='geteuid' mangled-name='geteuid' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_nvlist_array' mangled-name='nvlist_lookup_nvlist_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='opendir' mangled-name='opendir' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='readdir64' mangled-name='readdir64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='closedir' mangled-name='closedir' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='__asprintf_chk' mangled-name='__asprintf_chk' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='avl_find' mangled-name='avl_find' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='avl_insert' mangled-name='avl_insert' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='ioctl' mangled-name='ioctl' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='__pread64_alias' mangled-name='pread64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='spl_pagesize' mangled-name='spl_pagesize' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='posix_memalign' mangled-name='posix_memalign' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='aio_error' mangled-name='aio_error64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='aio_return' mangled-name='aio_return64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='lio_listio' mangled-name='lio_listio64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='exit' mangled-name='exit' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='__builtin___vsnprintf_chk' mangled-name='__vsnprintf_chk' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='__fprintf_chk' mangled-name='__fprintf_chk' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='update_vdev_config_dev_strs' mangled-name='update_vdev_config_dev_strs' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='strncmp' mangled-name='strncmp' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='nvlist_add_uint64' mangled-name='nvlist_add_uint64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='nvlist_dup' mangled-name='nvlist_dup' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='nvlist_add_nvlist_array' mangled-name='nvlist_add_nvlist_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='nvlist_remove' mangled-name='nvlist_remove' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_uint64_array' mangled-name='nvlist_lookup_uint64_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='nvlist_add_uint64_array' mangled-name='nvlist_add_uint64_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='sysconf' mangled-name='sysconf' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='tpool_create' mangled-name='tpool_create' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='avl_first' mangled-name='avl_first' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='tpool_dispatch' mangled-name='tpool_dispatch' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='avl_walk' mangled-name='avl_walk' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='tpool_wait' mangled-name='tpool_wait' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='tpool_destroy' mangled-name='tpool_destroy' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='avl_destroy' mangled-name='avl_destroy' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-type size-in-bits='64' id='type-id-99'>
+ <parameter type-id='type-id-89'/>
+ <parameter type-id='type-id-4'/>
+ <parameter type-id='type-id-8'/>
+ <parameter type-id='type-id-24'/>
<return type-id='type-id-1'/>
+ </function-type>
+ <function-type size-in-bits='64' id='type-id-97'>
+ <parameter type-id='type-id-89'/>
+ <parameter type-id='type-id-15'/>
+ <return type-id='type-id-15'/>
+ </function-type>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='zutil_nicenum.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzutil' language='LANG_C99'>
+ <function-decl name='zfs_nicebytes' mangled-name='zfs_nicebytes' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_nicebytes'>
+ <parameter type-id='type-id-8' name='num'/>
+ <parameter type-id='type-id-36' name='buf'/>
+ <parameter type-id='type-id-85' name='buflen'/>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='zfs_niceraw' mangled-name='zfs_niceraw' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_niceraw'>
+ <parameter type-id='type-id-8' name='num'/>
+ <parameter type-id='type-id-36' name='buf'/>
+ <parameter type-id='type-id-85' name='buflen'/>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='zfs_nicetime' mangled-name='zfs_nicetime' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_nicetime'>
+ <parameter type-id='type-id-8' name='num'/>
+ <parameter type-id='type-id-36' name='buf'/>
+ <parameter type-id='type-id-85' name='buflen'/>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='zfs_nicenum' mangled-name='zfs_nicenum' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_nicenum'>
+ <parameter type-id='type-id-8' name='num'/>
+ <parameter type-id='type-id-36' name='buf'/>
+ <parameter type-id='type-id-85' name='buflen'/>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <enum-decl name='zfs_nicenum_format' id='type-id-105'>
+ <underlying-type type-id='type-id-19'/>
+ <enumerator name='ZFS_NICENUM_1024' value='0'/>
+ <enumerator name='ZFS_NICENUM_BYTES' value='1'/>
+ <enumerator name='ZFS_NICENUM_TIME' value='2'/>
+ <enumerator name='ZFS_NICENUM_RAW' value='3'/>
+ <enumerator name='ZFS_NICENUM_RAWTIME' value='4'/>
+ </enum-decl>
+ <function-decl name='zfs_nicenum_format' mangled-name='zfs_nicenum_format' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_nicenum_format'>
+ <parameter type-id='type-id-8' name='num'/>
+ <parameter type-id='type-id-36' name='buf'/>
+ <parameter type-id='type-id-85' name='buflen'/>
+ <parameter type-id='type-id-105' name='format'/>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='zfs_isnumber' mangled-name='zfs_isnumber' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_isnumber'>
+ <parameter type-id='type-id-4' name='str'/>
+ <return type-id='type-id-23'/>
+ </function-decl>
+ <function-decl name='powl' mangled-name='powl' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='__builtin_snprintf' mangled-name='snprintf' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='nvlist_lookup_string' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-16'/>
- <parameter type-id='type-id-126'/>
+ <function-decl name='__ctype_b_loc' mangled-name='__ctype_b_loc' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='zutil_pool.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzutil' language='LANG_C99'>
+ <pointer-type-def type-id='type-id-16' size-in-bits='64' id='type-id-106'/>
+ <pointer-type-def type-id='type-id-35' size-in-bits='64' id='type-id-107'/>
+ <function-decl name='zpool_history_unpack' mangled-name='zpool_history_unpack' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_history_unpack'>
+ <parameter type-id='type-id-36' name='buf'/>
+ <parameter type-id='type-id-8' name='bytes_read'/>
+ <parameter type-id='type-id-81' name='leftover'/>
+ <parameter type-id='type-id-106' name='records'/>
+ <parameter type-id='type-id-107' name='numrecords'/>
<return type-id='type-id-1'/>
</function-decl>
- <class-decl name='importargs' size-in-bits='448' is-struct='yes' visibility='default' id='type-id-159'>
+ <class-decl name='ddt_stat' size-in-bits='512' is-struct='yes' visibility='default' id='type-id-108'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='path' type-id='type-id-126' visibility='default'/>
+ <var-decl name='dds_blocks' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='paths' type-id='type-id-1' visibility='default'/>
+ <var-decl name='dds_lsize' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='poolname' type-id='type-id-16' visibility='default'/>
+ <var-decl name='dds_psize' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='guid' type-id='type-id-23' visibility='default'/>
+ <var-decl name='dds_dsize' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='cachefile' type-id='type-id-16' visibility='default'/>
+ <var-decl name='dds_ref_blocks' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='can_be_active' type-id='type-id-41' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='352'>
- <var-decl name='scan' type-id='type-id-41' visibility='default'/>
+ <var-decl name='dds_ref_lsize' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='policy' type-id='type-id-29' visibility='default'/>
- </data-member>
- </class-decl>
- <typedef-decl name='importargs_t' type-id='type-id-159' id='type-id-160'/>
- <pointer-type-def type-id='type-id-160' size-in-bits='64' id='type-id-161'/>
- <class-decl name='pool_config_ops' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-162'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='pco_refresh_config' type-id='type-id-163' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='pco_pool_active' type-id='type-id-164' visibility='default'/>
- </data-member>
- </class-decl>
- <typedef-decl name='refresh_config_func_t' type-id='type-id-165' id='type-id-166'/>
- <pointer-type-def type-id='type-id-166' size-in-bits='64' id='type-id-163'/>
- <typedef-decl name='pool_active_func_t' type-id='type-id-167' id='type-id-168'/>
- <pointer-type-def type-id='type-id-168' size-in-bits='64' id='type-id-164'/>
- <qualified-type-def type-id='type-id-162' const='yes' id='type-id-169'/>
- <typedef-decl name='pool_config_ops_t' type-id='type-id-169' id='type-id-170'/>
- <pointer-type-def type-id='type-id-170' size-in-bits='64' id='type-id-172'/>
- <function-decl name='zpool_search_import' mangled-name='zpool_search_import' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_search_import'>
- <parameter type-id='type-id-73' name='hdl'/>
- <parameter type-id='type-id-161' name='import'/>
- <parameter type-id='type-id-172' name='pco'/>
- <return type-id='type-id-29'/>
- </function-decl>
- <function-decl name='dcgettext' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-16'/>
- <parameter type-id='type-id-16'/>
- <parameter type-id='type-id-1'/>
- <return type-id='type-id-37'/>
- </function-decl>
- <union-decl name='__anonymous_union__' size-in-bits='32' is-anonymous='yes' visibility='default' id='type-id-173'>
- <data-member access='private'>
- <var-decl name='__size' type-id='type-id-174' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='__align' type-id='type-id-1' visibility='default'/>
- </data-member>
- </union-decl>
-
- <array-type-def dimensions='1' type-id='type-id-11' size-in-bits='32' id='type-id-174'>
- <subrange length='4' type-id='type-id-12' id='type-id-92'/>
-
- </array-type-def>
- <qualified-type-def type-id='type-id-173' const='yes' id='type-id-175'/>
- <pointer-type-def type-id='type-id-175' size-in-bits='64' id='type-id-176'/>
- <function-decl name='pthread_mutex_init' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-14'/>
- <parameter type-id='type-id-176'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <class-decl name='libpc_handle' size-in-bits='8448' is-struct='yes' visibility='default' id='type-id-177'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='lpc_printerr' type-id='type-id-41' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='lpc_open_access_error' type-id='type-id-41' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='lpc_desc_active' type-id='type-id-41' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='96'>
- <var-decl name='lpc_desc' type-id='type-id-178' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='8320'>
- <var-decl name='lpc_ops' type-id='type-id-172' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='8384'>
- <var-decl name='lpc_lib_handle' type-id='type-id-73' visibility='default'/>
- </data-member>
- </class-decl>
-
- <array-type-def dimensions='1' type-id='type-id-11' size-in-bits='8192' id='type-id-178'>
- <subrange length='1024' type-id='type-id-12' id='type-id-179'/>
-
- </array-type-def>
- <pointer-type-def type-id='type-id-177' size-in-bits='64' id='type-id-180'/>
- <class-decl name='avl_tree' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-181'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='avl_root' type-id='type-id-182' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='avl_compar' type-id='type-id-183' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='avl_offset' type-id='type-id-125' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='avl_numnodes' type-id='type-id-184' visibility='default'/>
+ <var-decl name='dds_ref_psize' type-id='type-id-8' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='avl_size' type-id='type-id-125' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='448'>
+ <var-decl name='dds_ref_dsize' type-id='type-id-8' visibility='default'/>
</data-member>
</class-decl>
- <class-decl name='avl_node' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-185'>
+ <typedef-decl name='ddt_stat_t' type-id='type-id-108' id='type-id-109'/>
+ <qualified-type-def type-id='type-id-109' const='yes' id='type-id-110'/>
+ <pointer-type-def type-id='type-id-110' size-in-bits='64' id='type-id-111'/>
+ <class-decl name='ddt_histogram' size-in-bits='32768' is-struct='yes' visibility='default' id='type-id-112'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='avl_child' type-id='type-id-186' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='avl_pcb' type-id='type-id-187' visibility='default'/>
+ <var-decl name='ddh_stat' type-id='type-id-113' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-185' size-in-bits='64' id='type-id-182'/>
- <array-type-def dimensions='1' type-id='type-id-182' size-in-bits='128' id='type-id-186'>
- <subrange length='2' type-id='type-id-12' id='type-id-62'/>
+ <array-type-def dimensions='1' type-id='type-id-109' size-in-bits='32768' id='type-id-113'>
+ <subrange length='64' type-id='type-id-12' id='type-id-114'/>
</array-type-def>
- <typedef-decl name='uintptr_t' type-id='type-id-26' id='type-id-187'/>
- <pointer-type-def type-id='type-id-188' size-in-bits='64' id='type-id-183'/>
- <typedef-decl name='ulong_t' type-id='type-id-26' id='type-id-184'/>
- <pointer-type-def type-id='type-id-181' size-in-bits='64' id='type-id-189'/>
- <pointer-type-def type-id='type-id-189' size-in-bits='64' id='type-id-190'/>
- <function-decl name='zpool_find_import_blkid' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-180'/>
- <parameter type-id='type-id-14'/>
- <parameter type-id='type-id-190'/>
- <return type-id='type-id-1'/>
+ <typedef-decl name='ddt_histogram_t' type-id='type-id-112' id='type-id-115'/>
+ <qualified-type-def type-id='type-id-115' const='yes' id='type-id-116'/>
+ <pointer-type-def type-id='type-id-116' size-in-bits='64' id='type-id-117'/>
+ <function-decl name='zpool_dump_ddt' mangled-name='zpool_dump_ddt' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_dump_ddt'>
+ <parameter type-id='type-id-111' name='dds_total'/>
+ <parameter type-id='type-id-117' name='ddh'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='avl_create' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-189'/>
- <parameter type-id='type-id-183'/>
- <parameter type-id='type-id-26'/>
- <parameter type-id='type-id-26'/>
- <return type-id='type-id-17'/>
+ <function-decl name='realloc' mangled-name='realloc' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='dirname' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-37'/>
- <return type-id='type-id-37'/>
+ <function-decl name='__builtin_putchar' mangled-name='putchar' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='realpath' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-16'/>
- <parameter type-id='type-id-37'/>
- <return type-id='type-id-37'/>
+ <function-decl name='__builtin_puts' mangled-name='puts' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='pthread_mutex_destroy' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-14'/>
- <return type-id='type-id-1'/>
+ <function-decl name='__printf_chk' mangled-name='__printf_chk' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='nvlist_alloc' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-74'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-1'/>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_nicenum' mangled-name='zfs_nicenum' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='fnvpair_value_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-70'/>
- <return type-id='type-id-35'/>
+ <function-decl name='zfs_nicebytes' mangled-name='zfs_nicebytes' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='fnvlist_lookup_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-16'/>
- <return type-id='type-id-35'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='os/linux/zutil_device_path_os.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzutil' language='LANG_C99'>
+ <function-decl name='is_mpath_whole_disk' mangled-name='is_mpath_whole_disk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='is_mpath_whole_disk'>
+ <parameter type-id='type-id-4' name='path'/>
+ <return type-id='type-id-23'/>
</function-decl>
- <function-decl name='nvlist_add_string' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-16'/>
- <parameter type-id='type-id-16'/>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_get_underlying_path' mangled-name='zfs_get_underlying_path' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_underlying_path'>
+ <parameter type-id='type-id-4' name='dev_name'/>
+ <return type-id='type-id-36'/>
</function-decl>
- <function-decl name='nvlist_add_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-16'/>
- <parameter type-id='type-id-35'/>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_dev_is_whole_disk' mangled-name='zfs_dev_is_whole_disk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_dev_is_whole_disk'>
+ <parameter type-id='type-id-4' name='dev_name'/>
+ <return type-id='type-id-23'/>
</function-decl>
- <function-decl name='avl_destroy_nodes' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-189'/>
- <parameter type-id='type-id-130'/>
- <return type-id='type-id-73'/>
+ <function-decl name='zfs_dev_is_dm' mangled-name='zfs_dev_is_dm' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_dev_is_dm'>
+ <parameter type-id='type-id-4' name='dev_name'/>
+ <return type-id='type-id-23'/>
</function-decl>
- <function-decl name='nvlist_empty' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-35'/>
- <return type-id='type-id-51'/>
+ <function-decl name='zfs_get_enclosure_sysfs_path' mangled-name='zfs_get_enclosure_sysfs_path' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_enclosure_sysfs_path'>
+ <parameter type-id='type-id-4' name='dev_name'/>
+ <return type-id='type-id-36'/>
</function-decl>
- <function-decl name='geteuid' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-6'/>
+ <function-decl name='zfs_strip_path' mangled-name='zfs_strip_path' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_strip_path'>
+ <parameter type-id='type-id-36' name='path'/>
+ <return type-id='type-id-36'/>
</function-decl>
- <function-decl name='zpool_find_config' mangled-name='zpool_find_config' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_find_config'>
- <parameter type-id='type-id-73' name='hdl'/>
- <parameter type-id='type-id-16' name='target'/>
- <parameter type-id='type-id-64' name='configp'/>
- <parameter type-id='type-id-161' name='args'/>
- <parameter type-id='type-id-172' name='pco'/>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_strip_partition' mangled-name='zfs_strip_partition' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_strip_partition'>
+ <parameter type-id='type-id-36' name='path'/>
+ <return type-id='type-id-36'/>
</function-decl>
- <function-decl name='nvpair_value_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-70'/>
- <parameter type-id='type-id-74'/>
+ <function-decl name='zfs_append_partition' mangled-name='zfs_append_partition' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_append_partition'>
+ <parameter type-id='type-id-36' name='path'/>
+ <parameter type-id='type-id-85' name='max_len'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='sysconf' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-1'/>
- <return type-id='type-id-5'/>
+ <function-decl name='udev_device_get_property_value' mangled-name='udev_device_get_property_value' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <class-decl name='tpool' size-in-bits='2496' is-struct='yes' visibility='default' is-declaration-only='yes' id='type-id-191'/>
- <pointer-type-def type-id='type-id-191' size-in-bits='64' id='type-id-192'/>
- <function-decl name='tpool_create' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-145'/>
- <return type-id='type-id-192'/>
+ <function-decl name='udev_new' mangled-name='udev_new' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='avl_first' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-189'/>
- <return type-id='type-id-73'/>
+ <function-decl name='udev_device_new_from_subsystem_sysname' mangled-name='udev_device_new_from_subsystem_sysname' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <pointer-type-def type-id='type-id-193' size-in-bits='64' id='type-id-194'/>
- <function-decl name='tpool_dispatch' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-192'/>
- <parameter type-id='type-id-194'/>
- <parameter type-id='type-id-73'/>
- <return type-id='type-id-1'/>
+ <function-decl name='udev_device_unref' mangled-name='udev_device_unref' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='avl_walk' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-189'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-1'/>
- <return type-id='type-id-73'/>
+ <function-decl name='__realpath_alias' mangled-name='realpath' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='tpool_wait' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-192'/>
- <return type-id='type-id-17'/>
+ <function-decl name='efi_alloc_and_init' mangled-name='efi_alloc_and_init' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='tpool_destroy' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-192'/>
- <return type-id='type-id-17'/>
+ <function-decl name='efi_free' mangled-name='efi_free' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='avl_destroy' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-189'/>
- <return type-id='type-id-17'/>
+ <function-decl name='__readlink_alias' mangled-name='readlink' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='nvlist_remove' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-16'/>
- <parameter type-id='type-id-69'/>
- <return type-id='type-id-1'/>
+ <function-decl name='strstr' mangled-name='strstr' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='nvlist_add_uint64' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-16'/>
- <parameter type-id='type-id-26'/>
- <return type-id='type-id-1'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='os/linux/zutil_import_os.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzutil' language='LANG_C99'>
+ <function-decl name='update_vdev_config_dev_strs' mangled-name='update_vdev_config_dev_strs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='update_vdev_config_dev_strs'>
+ <parameter type-id='type-id-15' name='nv'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <pointer-type-def type-id='type-id-38' size-in-bits='64' id='type-id-195'/>
- <function-decl name='nvlist_lookup_uint64_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-16'/>
- <parameter type-id='type-id-195'/>
- <parameter type-id='type-id-158'/>
+ <function-decl name='zpool_label_disk_wait' mangled-name='zpool_label_disk_wait' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_label_disk_wait'>
+ <parameter type-id='type-id-4' name='path'/>
+ <parameter type-id='type-id-1' name='timeout_ms'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_add_uint64_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-16'/>
- <parameter type-id='type-id-38'/>
- <parameter type-id='type-id-6'/>
+ <class-decl name='udev_device' is-struct='yes' visibility='default' is-declaration-only='yes' id='type-id-118'/>
+ <pointer-type-def type-id='type-id-118' size-in-bits='64' id='type-id-119'/>
+ <function-decl name='zfs_device_get_devid' mangled-name='zfs_device_get_devid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_device_get_devid'>
+ <parameter type-id='type-id-119' name='dev'/>
+ <parameter type-id='type-id-36' name='bufptr'/>
+ <parameter type-id='type-id-85' name='buflen'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_dup' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-74'/>
- <parameter type-id='type-id-1'/>
+ <qualified-type-def type-id='type-id-4' const='yes' id='type-id-120'/>
+ <pointer-type-def type-id='type-id-120' size-in-bits='64' id='type-id-121'/>
+ <pointer-type-def type-id='type-id-85' size-in-bits='64' id='type-id-122'/>
+ <function-decl name='zpool_default_search_paths' mangled-name='zpool_default_search_paths' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_default_search_paths'>
+ <parameter type-id='type-id-122' name='count'/>
+ <return type-id='type-id-121'/>
+ </function-decl>
+ <function-decl name='zfs_dev_flush' mangled-name='zfs_dev_flush' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_dev_flush'>
+ <parameter type-id='type-id-1' name='fd'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='nvlist_add_nvlist_array' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-16'/>
- <parameter type-id='type-id-74'/>
- <parameter type-id='type-id-6'/>
+ <function-decl name='zfs_device_get_physical' mangled-name='zfs_device_get_physical' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_device_get_physical'>
+ <parameter type-id='type-id-119' name='dev'/>
+ <parameter type-id='type-id-36' name='bufptr'/>
+ <parameter type-id='type-id-85' name='buflen'/>
<return type-id='type-id-1'/>
</function-decl>
- <class-decl name='__dirstream' is-struct='yes' visibility='default' is-declaration-only='yes' id='type-id-196'/>
- <pointer-type-def type-id='type-id-196' size-in-bits='64' id='type-id-197'/>
- <function-decl name='opendir' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-16'/>
- <return type-id='type-id-197'/>
+ <function-decl name='strtoul' mangled-name='strtoul' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <class-decl name='dirent64' size-in-bits='2240' is-struct='yes' visibility='default' id='type-id-198'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='d_ino' type-id='type-id-199' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='d_off' type-id='type-id-135' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='d_reclen' type-id='type-id-200' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='144'>
- <var-decl name='d_type' type-id='type-id-30' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='152'>
- <var-decl name='d_name' type-id='type-id-43' visibility='default'/>
- </data-member>
- </class-decl>
- <typedef-decl name='__ino64_t' type-id='type-id-26' id='type-id-199'/>
- <type-decl name='unsigned short int' size-in-bits='16' id='type-id-200'/>
- <pointer-type-def type-id='type-id-198' size-in-bits='64' id='type-id-201'/>
- <function-decl name='readdir64' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-197'/>
- <return type-id='type-id-201'/>
- </function-decl>
- <function-decl name='closedir' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-197'/>
- <return type-id='type-id-1'/>
+ <function-decl name='strncasecmp' mangled-name='strncasecmp' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='asprintf' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-126'/>
- <parameter type-id='type-id-16'/>
- <parameter is-variadic='yes'/>
- <return type-id='type-id-1'/>
+ <function-decl name='nvlist_remove_all' mangled-name='nvlist_remove_all' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='avl_find' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-189'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-38'/>
- <return type-id='type-id-73'/>
- </function-decl>
- <function-decl name='avl_insert' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-189'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-26'/>
- <return type-id='type-id-17'/>
- </function-decl>
- <function-decl name='update_vdev_config_dev_strs' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-35'/>
- <return type-id='type-id-17'/>
- </function-decl>
- <function-type size-in-bits='64' id='type-id-167'>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-16'/>
- <parameter type-id='type-id-23'/>
- <parameter type-id='type-id-118'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-188'>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-73'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-165'>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-29'/>
- <return type-id='type-id-29'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-146'>
- <parameter type-id='type-id-138'/>
- <return type-id='type-id-17'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-193'>
- <parameter type-id='type-id-73'/>
- <return type-id='type-id-17'/>
- </function-type>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='zutil_nicenum.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libzutil' language='LANG_C99'>
- <function-decl name='zfs_isnumber' mangled-name='zfs_isnumber' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_isnumber'>
- <parameter type-id='type-id-16' name='str'/>
- <return type-id='type-id-41'/>
+ <function-decl name='zfs_get_underlying_path' mangled-name='zfs_get_underlying_path' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <enum-decl name='zfs_nicenum_format' id='type-id-202'>
- <underlying-type type-id='type-id-18'/>
- <enumerator name='ZFS_NICENUM_1024' value='0'/>
- <enumerator name='ZFS_NICENUM_BYTES' value='1'/>
- <enumerator name='ZFS_NICENUM_TIME' value='2'/>
- <enumerator name='ZFS_NICENUM_RAW' value='3'/>
- <enumerator name='ZFS_NICENUM_RAWTIME' value='4'/>
- </enum-decl>
- <function-decl name='zfs_nicenum_format' mangled-name='zfs_nicenum_format' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_nicenum_format'>
- <parameter type-id='type-id-23' name='num'/>
- <parameter type-id='type-id-37' name='buf'/>
- <parameter type-id='type-id-125' name='buflen'/>
- <parameter type-id='type-id-202' name='format'/>
- <return type-id='type-id-17'/>
+ <function-decl name='zfs_get_enclosure_sysfs_path' mangled-name='zfs_get_enclosure_sysfs_path' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='zfs_nicenum' mangled-name='zfs_nicenum' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_nicenum'>
- <parameter type-id='type-id-23' name='num'/>
- <parameter type-id='type-id-37' name='buf'/>
- <parameter type-id='type-id-125' name='buflen'/>
- <return type-id='type-id-17'/>
+ <function-decl name='clock_gettime' mangled-name='clock_gettime' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='zfs_nicetime' mangled-name='zfs_nicetime' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_nicetime'>
- <parameter type-id='type-id-23' name='num'/>
- <parameter type-id='type-id-37' name='buf'/>
- <parameter type-id='type-id-125' name='buflen'/>
- <return type-id='type-id-17'/>
+ <function-decl name='sched_yield' mangled-name='sched_yield' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='zfs_niceraw' mangled-name='zfs_niceraw' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_niceraw'>
- <parameter type-id='type-id-23' name='num'/>
- <parameter type-id='type-id-37' name='buf'/>
- <parameter type-id='type-id-125' name='buflen'/>
- <return type-id='type-id-17'/>
+ <function-decl name='usleep' mangled-name='usleep' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='zfs_nicebytes' mangled-name='zfs_nicebytes' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_nicebytes'>
- <parameter type-id='type-id-23' name='num'/>
- <parameter type-id='type-id-37' name='buf'/>
- <parameter type-id='type-id-125' name='buflen'/>
- <return type-id='type-id-17'/>
+ <function-decl name='udev_unref' mangled-name='udev_unref' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='udev_list_entry_get_name' mangled-name='udev_list_entry_get_name' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='udev_device_get_devlinks_list_entry' mangled-name='udev_device_get_devlinks_list_entry' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='udev_list_entry_get_next' mangled-name='udev_list_entry_get_next' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='udev_device_get_parent_with_subsystem_devtype' mangled-name='udev_device_get_parent_with_subsystem_devtype' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='blkid_get_cache' mangled-name='blkid_get_cache' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='blkid_probe_all_new' mangled-name='blkid_probe_all_new' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='blkid_dev_iterate_begin' mangled-name='blkid_dev_iterate_begin' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='blkid_dev_set_search' mangled-name='blkid_dev_set_search' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='zutil_alloc' mangled-name='zutil_alloc' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='blkid_dev_next' mangled-name='blkid_dev_next' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='blkid_dev_devname' mangled-name='blkid_dev_devname' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='zutil_strdup' mangled-name='zutil_strdup' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='blkid_dev_iterate_end' mangled-name='blkid_dev_iterate_end' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='blkid_put_cache' mangled-name='blkid_put_cache' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='label_paths' mangled-name='label_paths' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='sscanf' mangled-name='sscanf' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='zpool_read_label' mangled-name='zpool_read_label' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='zutil_pool.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libzutil' language='LANG_C99'>
- <class-decl name='ddt_stat' size-in-bits='512' is-struct='yes' visibility='default' id='type-id-203'>
+ <abi-instr version='1.0' address-size='64' path='os/linux/zutil_compat.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzutil' language='LANG_C99'>
+ <class-decl name='zfs_cmd' size-in-bits='109952' is-struct='yes' visibility='default' id='type-id-123'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='dds_blocks' type-id='type-id-23' visibility='default'/>
+ <var-decl name='zc_name' type-id='type-id-124' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='dds_lsize' type-id='type-id-23' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='32768'>
+ <var-decl name='zc_nvlist_src' type-id='type-id-8' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='dds_psize' type-id='type-id-23' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='32832'>
+ <var-decl name='zc_nvlist_src_size' type-id='type-id-8' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='dds_dsize' type-id='type-id-23' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='32896'>
+ <var-decl name='zc_nvlist_dst' type-id='type-id-8' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='dds_ref_blocks' type-id='type-id-23' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='32960'>
+ <var-decl name='zc_nvlist_dst_size' type-id='type-id-8' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='dds_ref_lsize' type-id='type-id-23' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='33024'>
+ <var-decl name='zc_nvlist_dst_filled' type-id='type-id-23' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='dds_ref_psize' type-id='type-id-23' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='33056'>
+ <var-decl name='zc_pad2' type-id='type-id-1' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='dds_ref_dsize' type-id='type-id-23' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='33088'>
+ <var-decl name='zc_history' type-id='type-id-8' visibility='default'/>
</data-member>
- </class-decl>
- <typedef-decl name='ddt_stat_t' type-id='type-id-203' id='type-id-204'/>
- <qualified-type-def type-id='type-id-204' const='yes' id='type-id-205'/>
- <pointer-type-def type-id='type-id-205' size-in-bits='64' id='type-id-206'/>
- <class-decl name='ddt_histogram' size-in-bits='32768' is-struct='yes' visibility='default' id='type-id-207'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='ddh_stat' type-id='type-id-208' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='33152'>
+ <var-decl name='zc_value' type-id='type-id-125' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='98688'>
+ <var-decl name='zc_string' type-id='type-id-53' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='100736'>
+ <var-decl name='zc_guid' type-id='type-id-8' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='100800'>
+ <var-decl name='zc_nvlist_conf' type-id='type-id-8' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='100864'>
+ <var-decl name='zc_nvlist_conf_size' type-id='type-id-8' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='100928'>
+ <var-decl name='zc_cookie' type-id='type-id-8' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='100992'>
+ <var-decl name='zc_objset_type' type-id='type-id-8' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='101056'>
+ <var-decl name='zc_perm_action' type-id='type-id-8' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='101120'>
+ <var-decl name='zc_history_len' type-id='type-id-8' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='101184'>
+ <var-decl name='zc_history_offset' type-id='type-id-8' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='101248'>
+ <var-decl name='zc_obj' type-id='type-id-8' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='101312'>
+ <var-decl name='zc_iflags' type-id='type-id-8' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='101376'>
+ <var-decl name='zc_share' type-id='type-id-126' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='101632'>
+ <var-decl name='zc_objset_stats' type-id='type-id-127' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='103936'>
+ <var-decl name='zc_begin_record' type-id='type-id-40' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='106368'>
+ <var-decl name='zc_inject_record' type-id='type-id-128' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='109184'>
+ <var-decl name='zc_defer_destroy' type-id='type-id-7' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='109216'>
+ <var-decl name='zc_flags' type-id='type-id-7' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='109248'>
+ <var-decl name='zc_action_handle' type-id='type-id-8' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='109312'>
+ <var-decl name='zc_cleanup_fd' type-id='type-id-1' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='109344'>
+ <var-decl name='zc_simple' type-id='type-id-33' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='109352'>
+ <var-decl name='zc_pad' type-id='type-id-74' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='109376'>
+ <var-decl name='zc_sendobj' type-id='type-id-8' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='109440'>
+ <var-decl name='zc_fromobj' type-id='type-id-8' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='109504'>
+ <var-decl name='zc_createtxg' type-id='type-id-8' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='109568'>
+ <var-decl name='zc_stat' type-id='type-id-129' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='109888'>
+ <var-decl name='zc_zoneid' type-id='type-id-8' visibility='default'/>
</data-member>
</class-decl>
- <array-type-def dimensions='1' type-id='type-id-204' size-in-bits='32768' id='type-id-208'>
- <subrange length='64' type-id='type-id-12' id='type-id-209'/>
+ <array-type-def dimensions='1' type-id='type-id-2' size-in-bits='32768' id='type-id-124'>
+ <subrange length='4096' type-id='type-id-12' id='type-id-130'/>
</array-type-def>
- <typedef-decl name='ddt_histogram_t' type-id='type-id-207' id='type-id-210'/>
- <qualified-type-def type-id='type-id-210' const='yes' id='type-id-211'/>
- <pointer-type-def type-id='type-id-211' size-in-bits='64' id='type-id-212'/>
- <function-decl name='zpool_dump_ddt' mangled-name='zpool_dump_ddt' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_dump_ddt'>
- <parameter type-id='type-id-206' name='dds_total'/>
- <parameter type-id='type-id-212' name='ddh'/>
- <return type-id='type-id-17'/>
- </function-decl>
- <function-decl name='zfs_nicenum' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-26'/>
- <parameter type-id='type-id-37'/>
- <parameter type-id='type-id-26'/>
- <return type-id='type-id-17'/>
- </function-decl>
- <function-decl name='zfs_nicebytes' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-26'/>
- <parameter type-id='type-id-37'/>
- <parameter type-id='type-id-26'/>
- <return type-id='type-id-17'/>
- </function-decl>
- <pointer-type-def type-id='type-id-64' size-in-bits='64' id='type-id-213'/>
- <pointer-type-def type-id='type-id-34' size-in-bits='64' id='type-id-214'/>
- <function-decl name='zpool_history_unpack' mangled-name='zpool_history_unpack' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_history_unpack'>
- <parameter type-id='type-id-37' name='buf'/>
- <parameter type-id='type-id-23' name='bytes_read'/>
- <parameter type-id='type-id-71' name='leftover'/>
- <parameter type-id='type-id-213' name='records'/>
- <parameter type-id='type-id-214' name='numrecords'/>
- <return type-id='type-id-1'/>
- </function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='os/linux/zutil_device_path_os.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libzutil' language='LANG_C99'>
- <function-decl name='zfs_append_partition' mangled-name='zfs_append_partition' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_append_partition'>
- <parameter type-id='type-id-37' name='path'/>
- <parameter type-id='type-id-125' name='max_len'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_strip_partition' mangled-name='zfs_strip_partition' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_strip_partition'>
- <parameter type-id='type-id-37' name='path'/>
- <return type-id='type-id-37'/>
- </function-decl>
- <function-decl name='zfs_strip_path' mangled-name='zfs_strip_path' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_strip_path'>
- <parameter type-id='type-id-37'/>
- <return type-id='type-id-37'/>
- </function-decl>
- <function-decl name='zfs_get_enclosure_sysfs_path' mangled-name='zfs_get_enclosure_sysfs_path' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_enclosure_sysfs_path'>
- <parameter type-id='type-id-16' name='dev_name'/>
- <return type-id='type-id-37'/>
- </function-decl>
- <class-decl name='dirent' size-in-bits='2240' is-struct='yes' visibility='default' id='type-id-215'>
+
+ <array-type-def dimensions='1' type-id='type-id-2' size-in-bits='65536' id='type-id-125'>
+ <subrange length='8192' type-id='type-id-12' id='type-id-131'/>
+
+ </array-type-def>
+ <class-decl name='zfs_share' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-132'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='z_exportdata' type-id='type-id-8' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='z_sharedata' type-id='type-id-8' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='z_sharetype' type-id='type-id-8' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='z_sharemax' type-id='type-id-8' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='zfs_share_t' type-id='type-id-132' id='type-id-126'/>
+ <class-decl name='dmu_objset_stats' size-in-bits='2304' is-struct='yes' visibility='default' id='type-id-133'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='d_ino' type-id='type-id-199' visibility='default'/>
+ <var-decl name='dds_num_clones' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='d_off' type-id='type-id-135' visibility='default'/>
+ <var-decl name='dds_creation_txg' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='d_reclen' type-id='type-id-200' visibility='default'/>
+ <var-decl name='dds_guid' type-id='type-id-8' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='dds_type' type-id='type-id-52' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='224'>
+ <var-decl name='dds_is_snapshot' type-id='type-id-33' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='232'>
+ <var-decl name='dds_inconsistent' type-id='type-id-33' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='144'>
- <var-decl name='d_type' type-id='type-id-30' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='240'>
+ <var-decl name='dds_redacted' type-id='type-id-33' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='152'>
- <var-decl name='d_name' type-id='type-id-43' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='248'>
+ <var-decl name='dds_origin' type-id='type-id-53' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-215' size-in-bits='64' id='type-id-216'/>
- <function-decl name='readdir' mangled-name='readdir64' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-197'/>
- <return type-id='type-id-216'/>
- </function-decl>
- <function-decl name='readlink' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-16'/>
- <parameter type-id='type-id-37'/>
- <parameter type-id='type-id-26'/>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='zfs_dev_is_dm' mangled-name='zfs_dev_is_dm' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_dev_is_dm'>
- <parameter type-id='type-id-16' name='dev_name'/>
- <return type-id='type-id-41'/>
- </function-decl>
- <function-decl name='zfs_dev_is_whole_disk' mangled-name='zfs_dev_is_whole_disk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_dev_is_whole_disk'>
- <parameter type-id='type-id-16' name='dev_name'/>
- <return type-id='type-id-41'/>
- </function-decl>
- <class-decl name='dk_gpt' size-in-bits='1920' is-struct='yes' visibility='default' id='type-id-217'>
+ <typedef-decl name='dmu_objset_stats_t' type-id='type-id-133' id='type-id-127'/>
+ <class-decl name='zinject_record' size-in-bits='2816' is-struct='yes' visibility='default' id='type-id-134'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='efi_version' type-id='type-id-34' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='efi_nparts' type-id='type-id-34' visibility='default'/>
+ <var-decl name='zi_objset' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='efi_part_size' type-id='type-id-34' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='96'>
- <var-decl name='efi_lbasize' type-id='type-id-34' visibility='default'/>
+ <var-decl name='zi_object' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='efi_last_lba' type-id='type-id-218' visibility='default'/>
+ <var-decl name='zi_start' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='efi_first_u_lba' type-id='type-id-218' visibility='default'/>
+ <var-decl name='zi_end' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='efi_last_u_lba' type-id='type-id-218' visibility='default'/>
+ <var-decl name='zi_guid' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='efi_disk_uguid' type-id='type-id-219' visibility='default'/>
+ <var-decl name='zi_level' type-id='type-id-7' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='352'>
+ <var-decl name='zi_error' type-id='type-id-7' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='384'>
+ <var-decl name='zi_type' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='efi_flags' type-id='type-id-34' visibility='default'/>
+ <var-decl name='zi_freq' type-id='type-id-7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='480'>
- <var-decl name='efi_reserved1' type-id='type-id-34' visibility='default'/>
+ <var-decl name='zi_failfast' type-id='type-id-7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='efi_altern_lba' type-id='type-id-218' visibility='default'/>
+ <var-decl name='zi_func' type-id='type-id-53' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='576'>
- <var-decl name='efi_reserved' type-id='type-id-220' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='2560'>
+ <var-decl name='zi_iotype' type-id='type-id-7' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='960'>
- <var-decl name='efi_parts' type-id='type-id-221' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='2592'>
+ <var-decl name='zi_duration' type-id='type-id-6' visibility='default'/>
</data-member>
- </class-decl>
- <type-decl name='long long int' size-in-bits='64' id='type-id-222'/>
- <typedef-decl name='longlong_t' type-id='type-id-222' id='type-id-223'/>
- <typedef-decl name='diskaddr_t' type-id='type-id-223' id='type-id-218'/>
- <class-decl name='uuid' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-219'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='time_low' type-id='type-id-22' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='2624'>
+ <var-decl name='zi_timer' type-id='type-id-8' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='time_mid' type-id='type-id-224' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='2688'>
+ <var-decl name='zi_nlanes' type-id='type-id-8' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='48'>
- <var-decl name='time_hi_and_version' type-id='type-id-224' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='2752'>
+ <var-decl name='zi_cmd' type-id='type-id-7' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='2784'>
+ <var-decl name='zi_dvas' type-id='type-id-7' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='zinject_record_t' type-id='type-id-134' id='type-id-128'/>
+ <class-decl name='zfs_stat' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-135'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='zs_gen' type-id='type-id-8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='clock_seq_hi_and_reserved' type-id='type-id-32' visibility='default'/>
+ <var-decl name='zs_mode' type-id='type-id-8' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='72'>
- <var-decl name='clock_seq_low' type-id='type-id-32' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='zs_links' type-id='type-id-8' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='80'>
- <var-decl name='node_addr' type-id='type-id-105' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='zs_ctime' type-id='type-id-136' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='__uint16_t' type-id='type-id-200' id='type-id-225'/>
- <typedef-decl name='uint16_t' type-id='type-id-225' id='type-id-224'/>
- <array-type-def dimensions='1' type-id='type-id-34' size-in-bits='384' id='type-id-220'>
- <subrange length='12' type-id='type-id-12' id='type-id-103'/>
+ <array-type-def dimensions='1' type-id='type-id-8' size-in-bits='128' id='type-id-136'>
+ <subrange length='2' type-id='type-id-12' id='type-id-137'/>
</array-type-def>
- <class-decl name='dk_part' size-in-bits='960' is-struct='yes' visibility='default' id='type-id-226'>
+ <typedef-decl name='zfs_stat_t' type-id='type-id-135' id='type-id-129'/>
+ <typedef-decl name='zfs_cmd_t' type-id='type-id-123' id='type-id-138'/>
+ <pointer-type-def type-id='type-id-138' size-in-bits='64' id='type-id-139'/>
+ <function-decl name='zfs_ioctl_fd' mangled-name='zfs_ioctl_fd' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_ioctl_fd'>
+ <parameter type-id='type-id-1' name='fd'/>
+ <parameter type-id='type-id-12' name='request'/>
+ <parameter type-id='type-id-139' name='zc'/>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='../../module/avl/avl.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libavl' language='LANG_C99'>
+ <class-decl name='avl_tree' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-140'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='p_start' type-id='type-id-218' visibility='default'/>
+ <var-decl name='avl_root' type-id='type-id-141' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='p_size' type-id='type-id-218' visibility='default'/>
+ <var-decl name='avl_compar' type-id='type-id-142' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='p_guid' type-id='type-id-219' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='p_tag' type-id='type-id-227' visibility='default'/>
+ <var-decl name='avl_offset' type-id='type-id-85' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='272'>
- <var-decl name='p_flag' type-id='type-id-227' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='avl_numnodes' type-id='type-id-143' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='288'>
- <var-decl name='p_name' type-id='type-id-228' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='avl_pad' type-id='type-id-85' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='576'>
- <var-decl name='p_uguid' type-id='type-id-219' visibility='default'/>
+ </class-decl>
+ <class-decl name='avl_node' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-144'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='avl_child' type-id='type-id-145' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='704'>
- <var-decl name='p_resv' type-id='type-id-229' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='avl_pcb' type-id='type-id-146' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='ushort_t' type-id='type-id-200' id='type-id-227'/>
-
- <array-type-def dimensions='1' type-id='type-id-11' size-in-bits='288' id='type-id-228'>
- <subrange length='36' type-id='type-id-12' id='type-id-230'/>
-
- </array-type-def>
-
- <array-type-def dimensions='1' type-id='type-id-34' size-in-bits='256' id='type-id-229'>
- <subrange length='8' type-id='type-id-12' id='type-id-102'/>
+ <pointer-type-def type-id='type-id-144' size-in-bits='64' id='type-id-141'/>
- </array-type-def>
-
- <array-type-def dimensions='1' type-id='type-id-226' size-in-bits='960' id='type-id-221'>
- <subrange length='1' type-id='type-id-12' id='type-id-231'/>
+ <array-type-def dimensions='1' type-id='type-id-141' size-in-bits='128' id='type-id-145'>
+ <subrange length='2' type-id='type-id-12' id='type-id-137'/>
</array-type-def>
- <pointer-type-def type-id='type-id-217' size-in-bits='64' id='type-id-232'/>
- <pointer-type-def type-id='type-id-232' size-in-bits='64' id='type-id-233'/>
- <function-decl name='efi_alloc_and_init' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-1'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-233'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='efi_free' mangled-name='efi_free' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_free'>
- <parameter type-id='type-id-232'/>
- <return type-id='type-id-17'/>
- </function-decl>
- <function-decl name='zfs_get_underlying_path' mangled-name='zfs_get_underlying_path' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_underlying_path'>
- <parameter type-id='type-id-16' name='dev_name'/>
- <return type-id='type-id-37'/>
- </function-decl>
- <function-decl name='is_mpath_whole_disk' mangled-name='is_mpath_whole_disk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='is_mpath_whole_disk'>
- <parameter type-id='type-id-16' name='path'/>
- <return type-id='type-id-41'/>
- </function-decl>
- <class-decl name='udev' is-struct='yes' visibility='default' is-declaration-only='yes' id='type-id-234'/>
- <pointer-type-def type-id='type-id-234' size-in-bits='64' id='type-id-235'/>
- <function-decl name='udev_new' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-235'/>
- </function-decl>
- <class-decl name='udev_device' is-struct='yes' visibility='default' is-declaration-only='yes' id='type-id-236'/>
- <pointer-type-def type-id='type-id-236' size-in-bits='64' id='type-id-237'/>
- <function-decl name='udev_device_new_from_subsystem_sysname' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-235'/>
- <parameter type-id='type-id-16'/>
- <parameter type-id='type-id-16'/>
- <return type-id='type-id-237'/>
- </function-decl>
- <function-decl name='udev_device_get_property_value' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-237'/>
- <parameter type-id='type-id-16'/>
- <return type-id='type-id-16'/>
- </function-decl>
- <function-decl name='udev_device_unref' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-237'/>
- <return type-id='type-id-237'/>
- </function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='os/linux/zutil_import_os.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libzutil' language='LANG_C99'>
- <function-decl name='zfs_dev_flush' mangled-name='zfs_dev_flush' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_dev_flush'>
- <parameter type-id='type-id-1' name='fd'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zutil_strdup' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-180'/>
- <parameter type-id='type-id-16'/>
- <return type-id='type-id-37'/>
- </function-decl>
- <function-decl name='zpool_read_label' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-1'/>
- <parameter type-id='type-id-74'/>
- <parameter type-id='type-id-129'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='label_paths' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-180'/>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-126'/>
- <parameter type-id='type-id-126'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zpool_label_disk_wait' mangled-name='zpool_label_disk_wait' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_label_disk_wait'>
- <parameter type-id='type-id-16' name='path'/>
- <parameter type-id='type-id-1' name='timeout_ms'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zutil_alloc' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-180'/>
- <parameter type-id='type-id-26'/>
- <return type-id='type-id-73'/>
- </function-decl>
- <class-decl name='timespec' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-238'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='tv_sec' type-id='type-id-239' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='tv_nsec' type-id='type-id-240' visibility='default'/>
- </data-member>
- </class-decl>
- <typedef-decl name='__time_t' type-id='type-id-5' id='type-id-239'/>
- <typedef-decl name='__syscall_slong_t' type-id='type-id-5' id='type-id-240'/>
- <pointer-type-def type-id='type-id-238' size-in-bits='64' id='type-id-241'/>
- <function-decl name='clock_gettime' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-1'/>
- <parameter type-id='type-id-241'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='usleep' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-6'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <class-decl name='udev_list_entry' is-struct='yes' visibility='default' is-declaration-only='yes' id='type-id-242'/>
- <pointer-type-def type-id='type-id-242' size-in-bits='64' id='type-id-243'/>
- <function-decl name='udev_device_get_devlinks_list_entry' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-237'/>
- <return type-id='type-id-243'/>
- </function-decl>
- <function-decl name='udev_list_entry_get_name' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-243'/>
- <return type-id='type-id-16'/>
- </function-decl>
- <function-decl name='udev_list_entry_get_next' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-243'/>
- <return type-id='type-id-243'/>
- </function-decl>
- <function-decl name='udev_unref' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-235'/>
- <return type-id='type-id-235'/>
- </function-decl>
- <pointer-type-def type-id='type-id-125' size-in-bits='64' id='type-id-244'/>
- <function-decl name='zpool_default_search_paths' mangled-name='zpool_default_search_paths' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_default_search_paths'>
- <parameter type-id='type-id-244' name='count'/>
- <return type-id='type-id-128'/>
- </function-decl>
- <class-decl name='blkid_struct_cache' is-struct='yes' visibility='default' is-declaration-only='yes' id='type-id-245'/>
- <pointer-type-def type-id='type-id-245' size-in-bits='64' id='type-id-246'/>
- <pointer-type-def type-id='type-id-246' size-in-bits='64' id='type-id-247'/>
- <function-decl name='blkid_get_cache' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-247'/>
- <parameter type-id='type-id-16'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='blkid_probe_all_new' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-246'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <class-decl name='blkid_struct_dev_iterate' is-struct='yes' visibility='default' is-declaration-only='yes' id='type-id-248'/>
- <pointer-type-def type-id='type-id-248' size-in-bits='64' id='type-id-249'/>
- <function-decl name='blkid_dev_iterate_begin' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-246'/>
- <return type-id='type-id-249'/>
- </function-decl>
- <function-decl name='blkid_dev_set_search' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-249'/>
- <parameter type-id='type-id-16'/>
- <parameter type-id='type-id-16'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='blkid_dev_iterate_end' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-249'/>
- <return type-id='type-id-17'/>
- </function-decl>
- <class-decl name='blkid_struct_dev' is-struct='yes' visibility='default' is-declaration-only='yes' id='type-id-250'/>
- <pointer-type-def type-id='type-id-250' size-in-bits='64' id='type-id-251'/>
- <pointer-type-def type-id='type-id-251' size-in-bits='64' id='type-id-252'/>
- <function-decl name='blkid_dev_next' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-249'/>
- <parameter type-id='type-id-252'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='blkid_put_cache' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-246'/>
- <return type-id='type-id-17'/>
- </function-decl>
- <function-decl name='blkid_dev_devname' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-251'/>
- <return type-id='type-id-16'/>
- </function-decl>
- <function-decl name='zfs_device_get_devid' mangled-name='zfs_device_get_devid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_device_get_devid'>
- <parameter type-id='type-id-237' name='dev'/>
- <parameter type-id='type-id-37' name='bufptr'/>
- <parameter type-id='type-id-125' name='buflen'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='udev_device_get_parent_with_subsystem_devtype' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-237'/>
- <parameter type-id='type-id-16'/>
- <parameter type-id='type-id-16'/>
- <return type-id='type-id-237'/>
- </function-decl>
- <function-decl name='zfs_device_get_physical' mangled-name='zfs_device_get_physical' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_device_get_physical'>
- <parameter type-id='type-id-237' name='dev'/>
- <parameter type-id='type-id-37' name='bufptr'/>
- <parameter type-id='type-id-125' name='buflen'/>
- <return type-id='type-id-1'/>
+ <typedef-decl name='uintptr_t' type-id='type-id-12' id='type-id-146'/>
+ <pointer-type-def type-id='type-id-147' size-in-bits='64' id='type-id-142'/>
+ <typedef-decl name='ulong_t' type-id='type-id-12' id='type-id-143'/>
+ <typedef-decl name='avl_tree_t' type-id='type-id-140' id='type-id-148'/>
+ <pointer-type-def type-id='type-id-148' size-in-bits='64' id='type-id-149'/>
+ <pointer-type-def type-id='type-id-89' size-in-bits='64' id='type-id-150'/>
+ <function-decl name='avl_destroy_nodes' mangled-name='avl_destroy_nodes' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_destroy_nodes'>
+ <parameter type-id='type-id-149' name='tree'/>
+ <parameter type-id='type-id-150' name='cookie'/>
+ <return type-id='type-id-89'/>
</function-decl>
- <function-decl name='update_vdev_config_dev_strs' mangled-name='update_vdev_config_dev_strs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='update_vdev_config_dev_strs'>
- <parameter type-id='type-id-29' name='nv'/>
- <return type-id='type-id-17'/>
+ <function-decl name='avl_is_empty' mangled-name='avl_is_empty' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_is_empty'>
+ <parameter type-id='type-id-149' name='tree'/>
+ <return type-id='type-id-23'/>
</function-decl>
- <function-decl name='nvlist_remove_all' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-35'/>
- <parameter type-id='type-id-16'/>
- <return type-id='type-id-1'/>
+ <function-decl name='avl_numnodes' mangled-name='avl_numnodes' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_numnodes'>
+ <parameter type-id='type-id-149' name='tree'/>
+ <return type-id='type-id-143'/>
</function-decl>
- <function-decl name='sched_yield' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='avl_destroy' mangled-name='avl_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_destroy'>
+ <parameter type-id='type-id-149' name='tree'/>
+ <return type-id='type-id-84'/>
</function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='os/linux/zutil_compat.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libzutil' language='LANG_C99'>
- <typedef-decl name='zfs_cmd_t' type-id='type-id-39' id='type-id-253'/>
- <pointer-type-def type-id='type-id-253' size-in-bits='64' id='type-id-254'/>
- <function-decl name='zfs_ioctl_fd' mangled-name='zfs_ioctl_fd' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_ioctl_fd'>
- <parameter type-id='type-id-1' name='fd'/>
- <parameter type-id='type-id-26' name='request'/>
- <parameter type-id='type-id-254' name='zc'/>
- <return type-id='type-id-1'/>
+ <function-decl name='avl_create' mangled-name='avl_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_create'>
+ <parameter type-id='type-id-149' name='tree'/>
+ <parameter type-id='type-id-142' name='compar'/>
+ <parameter type-id='type-id-85' name='size'/>
+ <parameter type-id='type-id-85' name='offset'/>
+ <return type-id='type-id-84'/>
</function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='../../module/avl/avl.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libavl' language='LANG_C99'>
- <typedef-decl name='avl_tree_t' type-id='type-id-181' id='type-id-255'/>
- <pointer-type-def type-id='type-id-255' size-in-bits='64' id='type-id-256'/>
- <function-decl name='avl_walk' mangled-name='avl_walk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_walk'>
- <parameter type-id='type-id-256' name='tree'/>
- <parameter type-id='type-id-73' name='oldnode'/>
- <parameter type-id='type-id-1' name='left'/>
- <return type-id='type-id-73'/>
+ <function-decl name='avl_swap' mangled-name='avl_swap' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_swap'>
+ <parameter type-id='type-id-149' name='tree1'/>
+ <parameter type-id='type-id-149' name='tree2'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='avl_first' mangled-name='avl_first' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_first'>
- <parameter type-id='type-id-256' name='tree'/>
- <return type-id='type-id-73'/>
+ <function-decl name='avl_update' mangled-name='avl_update' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_update'>
+ <parameter type-id='type-id-149' name='t'/>
+ <parameter type-id='type-id-89' name='obj'/>
+ <return type-id='type-id-23'/>
</function-decl>
- <function-decl name='avl_last' mangled-name='avl_last' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_last'>
- <parameter type-id='type-id-256' name='tree'/>
- <return type-id='type-id-73'/>
+ <function-decl name='avl_update_gt' mangled-name='avl_update_gt' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_update_gt'>
+ <parameter type-id='type-id-149' name='t'/>
+ <parameter type-id='type-id-89' name='obj'/>
+ <return type-id='type-id-23'/>
</function-decl>
- <typedef-decl name='avl_index_t' type-id='type-id-187' id='type-id-257'/>
- <function-decl name='avl_nearest' mangled-name='avl_nearest' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_nearest'>
- <parameter type-id='type-id-256' name='tree'/>
- <parameter type-id='type-id-257' name='where'/>
- <parameter type-id='type-id-1' name='direction'/>
- <return type-id='type-id-73'/>
+ <function-decl name='avl_update_lt' mangled-name='avl_update_lt' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_update_lt'>
+ <parameter type-id='type-id-149' name='t'/>
+ <parameter type-id='type-id-89' name='obj'/>
+ <return type-id='type-id-23'/>
</function-decl>
- <pointer-type-def type-id='type-id-257' size-in-bits='64' id='type-id-258'/>
- <function-decl name='avl_find' mangled-name='avl_find' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_find'>
- <parameter type-id='type-id-256' name='tree'/>
- <parameter type-id='type-id-73' name='value'/>
- <parameter type-id='type-id-258' name='where'/>
- <return type-id='type-id-73'/>
+ <function-decl name='avl_remove' mangled-name='avl_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_remove'>
+ <parameter type-id='type-id-149' name='tree'/>
+ <parameter type-id='type-id-89' name='data'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='avl_insert' mangled-name='avl_insert' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_insert'>
- <parameter type-id='type-id-256' name='tree'/>
- <parameter type-id='type-id-73' name='new_data'/>
- <parameter type-id='type-id-257' name='where'/>
- <return type-id='type-id-17'/>
+ <function-decl name='avl_add' mangled-name='avl_add' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_add'>
+ <parameter type-id='type-id-149' name='tree'/>
+ <parameter type-id='type-id-89' name='new_node'/>
+ <return type-id='type-id-84'/>
</function-decl>
<function-decl name='avl_insert_here' mangled-name='avl_insert_here' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_insert_here'>
- <parameter type-id='type-id-256' name='tree'/>
- <parameter type-id='type-id-73' name='new_data'/>
- <parameter type-id='type-id-73' name='here'/>
+ <parameter type-id='type-id-149' name='tree'/>
+ <parameter type-id='type-id-89' name='new_data'/>
+ <parameter type-id='type-id-89' name='here'/>
<parameter type-id='type-id-1' name='direction'/>
- <return type-id='type-id-17'/>
- </function-decl>
- <function-decl name='avl_add' mangled-name='avl_add' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_add'>
- <parameter type-id='type-id-256' name='tree'/>
- <parameter type-id='type-id-73' name='new_node'/>
- <return type-id='type-id-17'/>
- </function-decl>
- <function-decl name='avl_remove' mangled-name='avl_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_remove'>
- <parameter type-id='type-id-256' name='tree'/>
- <parameter type-id='type-id-73' name='data'/>
- <return type-id='type-id-17'/>
- </function-decl>
- <function-decl name='avl_update_lt' mangled-name='avl_update_lt' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_update_lt'>
- <parameter type-id='type-id-256' name='t'/>
- <parameter type-id='type-id-73' name='obj'/>
- <return type-id='type-id-41'/>
- </function-decl>
- <function-decl name='avl_update_gt' mangled-name='avl_update_gt' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_update_gt'>
- <parameter type-id='type-id-256' name='t'/>
- <parameter type-id='type-id-73' name='obj'/>
- <return type-id='type-id-41'/>
- </function-decl>
- <function-decl name='avl_update' mangled-name='avl_update' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_update'>
- <parameter type-id='type-id-256' name='t'/>
- <parameter type-id='type-id-73' name='obj'/>
- <return type-id='type-id-41'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='avl_swap' mangled-name='avl_swap' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_swap'>
- <parameter type-id='type-id-256' name='tree1'/>
- <parameter type-id='type-id-256' name='tree2'/>
- <return type-id='type-id-17'/>
+ <typedef-decl name='avl_index_t' type-id='type-id-146' id='type-id-151'/>
+ <function-decl name='avl_insert' mangled-name='avl_insert' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_insert'>
+ <parameter type-id='type-id-149' name='tree'/>
+ <parameter type-id='type-id-89' name='new_data'/>
+ <parameter type-id='type-id-151' name='where'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='avl_create' mangled-name='avl_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_create'>
- <parameter type-id='type-id-256' name='tree'/>
- <parameter type-id='type-id-183' name='compar'/>
- <parameter type-id='type-id-125' name='size'/>
- <parameter type-id='type-id-125' name='offset'/>
- <return type-id='type-id-17'/>
+ <pointer-type-def type-id='type-id-151' size-in-bits='64' id='type-id-152'/>
+ <function-decl name='avl_find' mangled-name='avl_find' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_find'>
+ <parameter type-id='type-id-149' name='tree'/>
+ <parameter type-id='type-id-89' name='value'/>
+ <parameter type-id='type-id-152' name='where'/>
+ <return type-id='type-id-89'/>
</function-decl>
- <function-decl name='avl_destroy' mangled-name='avl_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_destroy'>
- <parameter type-id='type-id-256' name='tree'/>
- <return type-id='type-id-17'/>
+ <function-decl name='avl_nearest' mangled-name='avl_nearest' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_nearest'>
+ <parameter type-id='type-id-149' name='tree'/>
+ <parameter type-id='type-id-151' name='where'/>
+ <parameter type-id='type-id-1' name='direction'/>
+ <return type-id='type-id-89'/>
</function-decl>
- <function-decl name='avl_numnodes' mangled-name='avl_numnodes' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_numnodes'>
- <parameter type-id='type-id-256' name='tree'/>
- <return type-id='type-id-184'/>
+ <function-decl name='avl_last' mangled-name='avl_last' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_last'>
+ <parameter type-id='type-id-149' name='tree'/>
+ <return type-id='type-id-89'/>
</function-decl>
- <function-decl name='avl_is_empty' mangled-name='avl_is_empty' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_is_empty'>
- <parameter type-id='type-id-256' name='tree'/>
- <return type-id='type-id-41'/>
+ <function-decl name='avl_first' mangled-name='avl_first' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_first'>
+ <parameter type-id='type-id-149' name='tree'/>
+ <return type-id='type-id-89'/>
</function-decl>
- <function-decl name='avl_destroy_nodes' mangled-name='avl_destroy_nodes' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_destroy_nodes'>
- <parameter type-id='type-id-256' name='tree'/>
- <parameter type-id='type-id-130' name='cookie'/>
- <return type-id='type-id-73'/>
+ <function-decl name='avl_walk' mangled-name='avl_walk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_walk'>
+ <parameter type-id='type-id-149' name='tree'/>
+ <parameter type-id='type-id-89' name='oldnode'/>
+ <parameter type-id='type-id-1' name='left'/>
+ <return type-id='type-id-89'/>
</function-decl>
+ <function-type size-in-bits='64' id='type-id-147'>
+ <parameter type-id='type-id-89'/>
+ <parameter type-id='type-id-89'/>
+ <return type-id='type-id-1'/>
+ </function-type>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='thread_pool.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libtpool' language='LANG_C99'>
- <class-decl name='tpool' size-in-bits='2496' is-struct='yes' visibility='default' id='type-id-191'>
+ <abi-instr version='1.0' address-size='64' path='thread_pool.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libtpool' language='LANG_C99'>
+ <class-decl name='tpool' size-in-bits='2496' is-struct='yes' visibility='default' id='type-id-153'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='tp_forw' type-id='type-id-259' visibility='default'/>
+ <var-decl name='tp_forw' type-id='type-id-154' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='tp_back' type-id='type-id-259' visibility='default'/>
+ <var-decl name='tp_back' type-id='type-id-154' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='tp_mutex' type-id='type-id-260' visibility='default'/>
+ <var-decl name='tp_mutex' type-id='type-id-155' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='tp_busycv' type-id='type-id-261' visibility='default'/>
+ <var-decl name='tp_busycv' type-id='type-id-156' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='832'>
- <var-decl name='tp_workcv' type-id='type-id-261' visibility='default'/>
+ <var-decl name='tp_workcv' type-id='type-id-156' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1216'>
- <var-decl name='tp_waitcv' type-id='type-id-261' visibility='default'/>
+ <var-decl name='tp_waitcv' type-id='type-id-156' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1600'>
- <var-decl name='tp_active' type-id='type-id-262' visibility='default'/>
+ <var-decl name='tp_active' type-id='type-id-157' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1664'>
- <var-decl name='tp_head' type-id='type-id-263' visibility='default'/>
+ <var-decl name='tp_head' type-id='type-id-158' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1728'>
- <var-decl name='tp_tail' type-id='type-id-263' visibility='default'/>
+ <var-decl name='tp_tail' type-id='type-id-158' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1792'>
- <var-decl name='tp_attr' type-id='type-id-150' visibility='default'/>
+ <var-decl name='tp_attr' type-id='type-id-159' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2240'>
<var-decl name='tp_flags' type-id='type-id-1' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2272'>
- <var-decl name='tp_linger' type-id='type-id-34' visibility='default'/>
+ <var-decl name='tp_linger' type-id='type-id-35' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2304'>
<var-decl name='tp_njobs' type-id='type-id-1' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2336'>
<var-decl name='tp_minimum' type-id='type-id-1' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2368'>
<var-decl name='tp_maximum' type-id='type-id-1' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2400'>
<var-decl name='tp_current' type-id='type-id-1' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2432'>
<var-decl name='tp_idle' type-id='type-id-1' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='tpool_t' type-id='type-id-191' id='type-id-264'/>
- <pointer-type-def type-id='type-id-264' size-in-bits='64' id='type-id-259'/>
- <typedef-decl name='pthread_mutex_t' type-id='type-id-2' id='type-id-260'/>
- <union-decl name='__anonymous_union__' size-in-bits='384' is-anonymous='yes' visibility='default' id='type-id-265'>
+ <typedef-decl name='tpool_t' type-id='type-id-153' id='type-id-160'/>
+ <pointer-type-def type-id='type-id-160' size-in-bits='64' id='type-id-154'/>
+ <union-decl name='__anonymous_union__' size-in-bits='320' is-anonymous='yes' visibility='default' id='type-id-161'>
<data-member access='private'>
- <var-decl name='__data' type-id='type-id-266' visibility='default'/>
+ <var-decl name='__data' type-id='type-id-162' visibility='default'/>
</data-member>
<data-member access='private'>
- <var-decl name='__size' type-id='type-id-267' visibility='default'/>
+ <var-decl name='__size' type-id='type-id-163' visibility='default'/>
</data-member>
<data-member access='private'>
- <var-decl name='__align' type-id='type-id-222' visibility='default'/>
+ <var-decl name='__align' type-id='type-id-86' visibility='default'/>
</data-member>
</union-decl>
- <class-decl name='__pthread_cond_s' size-in-bits='384' is-struct='yes' visibility='default' id='type-id-266'>
- <member-type access='public'>
- <union-decl name='__anonymous_union__' size-in-bits='64' is-anonymous='yes' visibility='default' id='type-id-268'>
- <data-member access='private'>
- <var-decl name='__g1_start' type-id='type-id-269' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='__g1_start32' type-id='type-id-270' visibility='default'/>
- </data-member>
- </union-decl>
- </member-type>
+ <class-decl name='__pthread_mutex_s' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-162'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='' type-id='type-id-271' visibility='default'/>
+ <var-decl name='__lock' type-id='type-id-1' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='__g_refs' type-id='type-id-272' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='32'>
+ <var-decl name='__count' type-id='type-id-10' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='__g_size' type-id='type-id-272' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='__owner' type-id='type-id-1' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='__g1_orig_size' type-id='type-id-6' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='96'>
+ <var-decl name='__nusers' type-id='type-id-10' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='288'>
- <var-decl name='__wrefs' type-id='type-id-6' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='__kind' type-id='type-id-1' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='__g_signals' type-id='type-id-272' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='160'>
+ <var-decl name='__spins' type-id='type-id-164' visibility='default'/>
</data-member>
- </class-decl>
- <union-decl name='__anonymous_union__' size-in-bits='64' is-anonymous='yes' visibility='default' id='type-id-271'>
- <data-member access='private'>
- <var-decl name='__wseq' type-id='type-id-269' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='176'>
+ <var-decl name='__elision' type-id='type-id-164' visibility='default'/>
</data-member>
- <data-member access='private'>
- <var-decl name='__wseq32' type-id='type-id-270' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='__list' type-id='type-id-165' visibility='default'/>
</data-member>
- </union-decl>
- <type-decl name='long long unsigned int' size-in-bits='64' id='type-id-269'/>
- <class-decl name='__anonymous_struct__' size-in-bits='64' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-270'>
+ </class-decl>
+ <type-decl name='short int' size-in-bits='16' id='type-id-164'/>
+ <class-decl name='__pthread_internal_list' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-166'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='__low' type-id='type-id-6' visibility='default'/>
+ <var-decl name='__prev' type-id='type-id-167' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='__high' type-id='type-id-6' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='__next' type-id='type-id-167' visibility='default'/>
</data-member>
</class-decl>
+ <pointer-type-def type-id='type-id-166' size-in-bits='64' id='type-id-167'/>
+ <typedef-decl name='__pthread_list_t' type-id='type-id-166' id='type-id-165'/>
- <array-type-def dimensions='1' type-id='type-id-6' size-in-bits='64' id='type-id-272'>
- <subrange length='2' type-id='type-id-12' id='type-id-62'/>
-
- </array-type-def>
-
- <array-type-def dimensions='1' type-id='type-id-11' size-in-bits='384' id='type-id-267'>
- <subrange length='48' type-id='type-id-12' id='type-id-273'/>
+ <array-type-def dimensions='1' type-id='type-id-2' size-in-bits='320' id='type-id-163'>
+ <subrange length='40' type-id='type-id-12' id='type-id-168'/>
</array-type-def>
- <typedef-decl name='pthread_cond_t' type-id='type-id-265' id='type-id-261'/>
- <class-decl name='tpool_active' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-274'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='tpa_next' type-id='type-id-262' visibility='default'/>
+ <typedef-decl name='pthread_mutex_t' type-id='type-id-161' id='type-id-155'/>
+ <union-decl name='__anonymous_union__' size-in-bits='384' is-anonymous='yes' visibility='default' id='type-id-169'>
+ <data-member access='private'>
+ <var-decl name='__data' type-id='type-id-170' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='tpa_tid' type-id='type-id-275' visibility='default'/>
+ <data-member access='private'>
+ <var-decl name='__size' type-id='type-id-171' visibility='default'/>
</data-member>
- </class-decl>
- <typedef-decl name='tpool_active_t' type-id='type-id-274' id='type-id-276'/>
- <pointer-type-def type-id='type-id-276' size-in-bits='64' id='type-id-262'/>
- <typedef-decl name='pthread_t' type-id='type-id-26' id='type-id-275'/>
- <class-decl name='tpool_job' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-277'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='tpj_next' type-id='type-id-263' visibility='default'/>
+ <data-member access='private'>
+ <var-decl name='__align' type-id='type-id-172' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='tpj_func' type-id='type-id-194' visibility='default'/>
+ </union-decl>
+ <class-decl name='__pthread_cond_s' size-in-bits='384' is-struct='yes' visibility='default' id='type-id-170'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='' type-id='type-id-173' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='tpj_arg' type-id='type-id-73' visibility='default'/>
+ <var-decl name='__g_refs' type-id='type-id-174' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='__g_size' type-id='type-id-174' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='__g1_orig_size' type-id='type-id-10' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='288'>
+ <var-decl name='__wrefs' type-id='type-id-10' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='320'>
+ <var-decl name='__g_signals' type-id='type-id-174' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='tpool_job_t' type-id='type-id-277' id='type-id-278'/>
- <pointer-type-def type-id='type-id-278' size-in-bits='64' id='type-id-263'/>
- <function-decl name='tpool_create' mangled-name='tpool_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_create'>
- <parameter type-id='type-id-34' name='min_threads'/>
- <parameter type-id='type-id-34' name='max_threads'/>
- <parameter type-id='type-id-34' name='linger'/>
- <parameter type-id='type-id-145' name='attr'/>
- <return type-id='type-id-259'/>
- </function-decl>
- <qualified-type-def type-id='type-id-147' const='yes' id='type-id-279'/>
- <pointer-type-def type-id='type-id-279' size-in-bits='64' id='type-id-280'/>
- <function-decl name='pthread_attr_getstack' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-280'/>
- <parameter type-id='type-id-130'/>
- <parameter type-id='type-id-38'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <pointer-type-def type-id='type-id-265' size-in-bits='64' id='type-id-281'/>
- <function-decl name='pthread_cond_init' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-281'/>
- <parameter type-id='type-id-176'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <pointer-type-def type-id='type-id-147' size-in-bits='64' id='type-id-282'/>
- <function-decl name='pthread_attr_init' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-282'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <class-decl name='__anonymous_struct__' size-in-bits='1024' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-283'>
+ <union-decl name='__anonymous_union__' size-in-bits='64' is-anonymous='yes' visibility='default' id='type-id-173'>
+ <data-member access='private'>
+ <var-decl name='__wseq' type-id='type-id-175' visibility='default'/>
+ </data-member>
+ <data-member access='private'>
+ <var-decl name='__wseq32' type-id='type-id-176' visibility='default'/>
+ </data-member>
+ </union-decl>
+ <type-decl name='long long unsigned int' size-in-bits='64' id='type-id-175'/>
+ <class-decl name='__anonymous_struct__' size-in-bits='64' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-176'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='__bits' type-id='type-id-284' visibility='default'/>
+ <var-decl name='__low' type-id='type-id-10' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='32'>
+ <var-decl name='__high' type-id='type-id-10' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='__cpu_mask' type-id='type-id-26' id='type-id-285'/>
- <array-type-def dimensions='1' type-id='type-id-285' size-in-bits='1024' id='type-id-284'>
- <subrange length='16' type-id='type-id-12' id='type-id-104'/>
+ <array-type-def dimensions='1' type-id='type-id-10' size-in-bits='64' id='type-id-174'>
+ <subrange length='2' type-id='type-id-12' id='type-id-137'/>
</array-type-def>
- <pointer-type-def type-id='type-id-283' size-in-bits='64' id='type-id-286'/>
- <function-decl name='pthread_attr_getaffinity_np' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-280'/>
- <parameter type-id='type-id-26'/>
- <parameter type-id='type-id-286'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <qualified-type-def type-id='type-id-283' const='yes' id='type-id-287'/>
- <pointer-type-def type-id='type-id-287' size-in-bits='64' id='type-id-288'/>
- <function-decl name='pthread_attr_setaffinity_np' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-282'/>
- <parameter type-id='type-id-26'/>
- <parameter type-id='type-id-288'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='pthread_attr_getdetachstate' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-280'/>
- <parameter type-id='type-id-129'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='pthread_attr_setdetachstate' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-282'/>
- <parameter type-id='type-id-1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='pthread_attr_getguardsize' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-280'/>
- <parameter type-id='type-id-38'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='pthread_attr_setguardsize' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-282'/>
- <parameter type-id='type-id-26'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='pthread_attr_getinheritsched' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-280'/>
- <parameter type-id='type-id-129'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='pthread_attr_setinheritsched' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-282'/>
- <parameter type-id='type-id-1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <class-decl name='sched_param' size-in-bits='32' is-struct='yes' visibility='default' id='type-id-289'>
+
+ <array-type-def dimensions='1' type-id='type-id-2' size-in-bits='384' id='type-id-171'>
+ <subrange length='48' type-id='type-id-12' id='type-id-177'/>
+
+ </array-type-def>
+ <type-decl name='long long int' size-in-bits='64' id='type-id-172'/>
+ <typedef-decl name='pthread_cond_t' type-id='type-id-169' id='type-id-156'/>
+ <class-decl name='tpool_active' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-178'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='sched_priority' type-id='type-id-1' visibility='default'/>
+ <var-decl name='tpa_next' type-id='type-id-157' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='tpa_tid' type-id='type-id-179' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-289' size-in-bits='64' id='type-id-290'/>
- <function-decl name='pthread_attr_getschedparam' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-280'/>
- <parameter type-id='type-id-290'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <qualified-type-def type-id='type-id-289' const='yes' id='type-id-291'/>
- <pointer-type-def type-id='type-id-291' size-in-bits='64' id='type-id-292'/>
- <function-decl name='pthread_attr_setschedparam' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-282'/>
- <parameter type-id='type-id-292'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='pthread_attr_getschedpolicy' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-280'/>
- <parameter type-id='type-id-129'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='pthread_attr_setschedpolicy' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-282'/>
- <parameter type-id='type-id-1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='pthread_attr_getscope' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-280'/>
- <parameter type-id='type-id-129'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='pthread_attr_setscope' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-282'/>
- <parameter type-id='type-id-1'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='pthread_attr_setstack' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-282'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-26'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='pthread_attr_destroy' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-282'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='tpool_dispatch' mangled-name='tpool_dispatch' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_dispatch'>
- <parameter type-id='type-id-259' name='tpool'/>
- <parameter type-id='type-id-194' name='func'/>
- <parameter type-id='type-id-73' name='arg'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='pthread_cond_signal' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-281'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <class-decl name='__anonymous_struct__' size-in-bits='1024' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-293'>
+ <typedef-decl name='tpool_active_t' type-id='type-id-178' id='type-id-180'/>
+ <pointer-type-def type-id='type-id-180' size-in-bits='64' id='type-id-157'/>
+ <typedef-decl name='pthread_t' type-id='type-id-12' id='type-id-179'/>
+ <class-decl name='tpool_job' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-181'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='__val' type-id='type-id-294' visibility='default'/>
+ <var-decl name='tpj_next' type-id='type-id-158' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='tpj_func' type-id='type-id-182' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='tpj_arg' type-id='type-id-89' visibility='default'/>
</data-member>
</class-decl>
+ <typedef-decl name='tpool_job_t' type-id='type-id-181' id='type-id-183'/>
+ <pointer-type-def type-id='type-id-183' size-in-bits='64' id='type-id-158'/>
+ <pointer-type-def type-id='type-id-184' size-in-bits='64' id='type-id-182'/>
+ <union-decl name='pthread_attr_t' size-in-bits='448' visibility='default' id='type-id-185'>
+ <data-member access='private'>
+ <var-decl name='__size' type-id='type-id-186' visibility='default'/>
+ </data-member>
+ <data-member access='private'>
+ <var-decl name='__align' type-id='type-id-86' visibility='default'/>
+ </data-member>
+ </union-decl>
- <array-type-def dimensions='1' type-id='type-id-26' size-in-bits='1024' id='type-id-294'>
- <subrange length='16' type-id='type-id-12' id='type-id-104'/>
+ <array-type-def dimensions='1' type-id='type-id-2' size-in-bits='448' id='type-id-186'>
+ <subrange length='56' type-id='type-id-12' id='type-id-187'/>
</array-type-def>
- <qualified-type-def type-id='type-id-293' const='yes' id='type-id-295'/>
- <pointer-type-def type-id='type-id-293' size-in-bits='64' id='type-id-296'/>
- <function-decl name='pthread_sigmask' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-1'/>
- <parameter type-id='type-id-288'/>
- <parameter type-id='type-id-296'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <pointer-type-def type-id='type-id-297' size-in-bits='64' id='type-id-298'/>
- <function-decl name='pthread_create' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-38'/>
- <parameter type-id='type-id-280'/>
- <parameter type-id='type-id-298'/>
- <parameter type-id='type-id-73'/>
+ <typedef-decl name='pthread_attr_t' type-id='type-id-185' id='type-id-159'/>
+ <function-decl name='tpool_member' mangled-name='tpool_member' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_member'>
+ <parameter type-id='type-id-154' name='tpool'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='tpool_destroy' mangled-name='tpool_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_destroy'>
- <parameter type-id='type-id-259' name='tpool'/>
- <return type-id='type-id-17'/>
+ <function-decl name='tpool_resume' mangled-name='tpool_resume' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_resume'>
+ <parameter type-id='type-id-154' name='tpool'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='pthread_cond_broadcast' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-281'/>
+ <function-decl name='tpool_suspended' mangled-name='tpool_suspended' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_suspended'>
+ <parameter type-id='type-id-154' name='tpool'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='pthread_cancel' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-26'/>
- <return type-id='type-id-1'/>
+ <function-decl name='tpool_suspend' mangled-name='tpool_suspend' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_suspend'>
+ <parameter type-id='type-id-154' name='tpool'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='pthread_cond_wait' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-281'/>
- <parameter type-id='type-id-14'/>
- <return type-id='type-id-1'/>
+ <function-decl name='tpool_wait' mangled-name='tpool_wait' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_wait'>
+ <parameter type-id='type-id-154' name='tpool'/>
+ <return type-id='type-id-84'/>
</function-decl>
<function-decl name='tpool_abandon' mangled-name='tpool_abandon' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_abandon'>
- <parameter type-id='type-id-259' name='tpool'/>
- <return type-id='type-id-17'/>
- </function-decl>
- <function-decl name='tpool_wait' mangled-name='tpool_wait' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_wait'>
- <parameter type-id='type-id-259' name='tpool'/>
- <return type-id='type-id-17'/>
+ <parameter type-id='type-id-154' name='tpool'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='tpool_suspend' mangled-name='tpool_suspend' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_suspend'>
- <parameter type-id='type-id-259' name='tpool'/>
- <return type-id='type-id-17'/>
+ <function-decl name='tpool_destroy' mangled-name='tpool_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_destroy'>
+ <parameter type-id='type-id-154' name='tpool'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='tpool_suspended' mangled-name='tpool_suspended' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_suspended'>
- <parameter type-id='type-id-259' name='tpool'/>
+ <function-decl name='tpool_dispatch' mangled-name='tpool_dispatch' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_dispatch'>
+ <parameter type-id='type-id-154' name='tpool'/>
+ <parameter type-id='type-id-182' name='func'/>
+ <parameter type-id='type-id-89' name='arg'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='tpool_resume' mangled-name='tpool_resume' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_resume'>
- <parameter type-id='type-id-259' name='tpool'/>
- <return type-id='type-id-17'/>
- </function-decl>
- <function-decl name='tpool_member' mangled-name='tpool_member' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_member'>
- <parameter type-id='type-id-259' name='tpool'/>
- <return type-id='type-id-1'/>
+ <pointer-type-def type-id='type-id-159' size-in-bits='64' id='type-id-188'/>
+ <function-decl name='tpool_create' mangled-name='tpool_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_create'>
+ <parameter type-id='type-id-35' name='min_threads'/>
+ <parameter type-id='type-id-35' name='max_threads'/>
+ <parameter type-id='type-id-35' name='linger'/>
+ <parameter type-id='type-id-188' name='attr'/>
+ <return type-id='type-id-154'/>
</function-decl>
- <function-decl name='pthread_self' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-26'/>
+ <function-decl name='pthread_self' mangled-name='pthread_self' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <qualified-type-def type-id='type-id-238' const='yes' id='type-id-299'/>
- <pointer-type-def type-id='type-id-299' size-in-bits='64' id='type-id-300'/>
- <function-decl name='pthread_cond_timedwait' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-281'/>
- <parameter type-id='type-id-14'/>
- <parameter type-id='type-id-300'/>
- <return type-id='type-id-1'/>
+ <function-decl name='pthread_cond_broadcast' mangled-name='pthread_cond_broadcast' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='pthread_setcanceltype' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-1'/>
- <parameter type-id='type-id-129'/>
- <return type-id='type-id-1'/>
+ <function-decl name='__sigsetjmp' mangled-name='__sigsetjmp' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='pthread_setcancelstate' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-1'/>
- <parameter type-id='type-id-129'/>
- <return type-id='type-id-1'/>
+ <function-decl name='__pthread_register_cancel' mangled-name='__pthread_register_cancel' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-type size-in-bits='64' id='type-id-297'>
- <parameter type-id='type-id-73'/>
- <return type-id='type-id-73'/>
- </function-type>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='assert.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libspl' language='LANG_C99'>
- <var-decl name='libspl_assert_ok' type-id='type-id-1' mangled-name='libspl_assert_ok' visibility='default' elf-symbol-id='libspl_assert_ok'/>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='atomic.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libspl' language='LANG_C99'>
- <qualified-type-def type-id='type-id-32' volatile='yes' id='type-id-301'/>
- <pointer-type-def type-id='type-id-301' size-in-bits='64' id='type-id-302'/>
- <function-decl name='atomic_inc_8' mangled-name='atomic_inc_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_8'>
- <parameter type-id='type-id-302' name='target'/>
- <return type-id='type-id-17'/>
- </function-decl>
- <typedef-decl name='uchar_t' type-id='type-id-30' id='type-id-303'/>
- <qualified-type-def type-id='type-id-303' volatile='yes' id='type-id-304'/>
- <pointer-type-def type-id='type-id-304' size-in-bits='64' id='type-id-305'/>
- <function-decl name='atomic_inc_uchar' mangled-name='atomic_inc_uchar' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_uchar'>
- <parameter type-id='type-id-305' name='target'/>
- <return type-id='type-id-17'/>
- </function-decl>
- <qualified-type-def type-id='type-id-224' volatile='yes' id='type-id-306'/>
- <pointer-type-def type-id='type-id-306' size-in-bits='64' id='type-id-307'/>
- <function-decl name='atomic_inc_16' mangled-name='atomic_inc_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_16'>
- <parameter type-id='type-id-307' name='target'/>
- <return type-id='type-id-17'/>
+ <function-decl name='pthread_cond_wait' mangled-name='pthread_cond_wait' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <qualified-type-def type-id='type-id-227' volatile='yes' id='type-id-308'/>
- <pointer-type-def type-id='type-id-308' size-in-bits='64' id='type-id-309'/>
- <function-decl name='atomic_inc_ushort' mangled-name='atomic_inc_ushort' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_ushort'>
- <parameter type-id='type-id-309' name='target'/>
- <return type-id='type-id-17'/>
+ <function-decl name='__pthread_unregister_cancel' mangled-name='__pthread_unregister_cancel' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <qualified-type-def type-id='type-id-22' volatile='yes' id='type-id-310'/>
- <pointer-type-def type-id='type-id-310' size-in-bits='64' id='type-id-311'/>
- <function-decl name='atomic_inc_32' mangled-name='atomic_inc_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_32'>
- <parameter type-id='type-id-311' name='target'/>
- <return type-id='type-id-17'/>
+ <function-decl name='__pthread_unwind_next' mangled-name='__pthread_unwind_next' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <qualified-type-def type-id='type-id-34' volatile='yes' id='type-id-312'/>
- <pointer-type-def type-id='type-id-312' size-in-bits='64' id='type-id-313'/>
- <function-decl name='atomic_inc_uint' mangled-name='atomic_inc_uint' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_uint'>
- <parameter type-id='type-id-313' name='target'/>
- <return type-id='type-id-17'/>
+ <function-decl name='pthread_cancel' mangled-name='pthread_cancel' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <qualified-type-def type-id='type-id-184' volatile='yes' id='type-id-314'/>
- <pointer-type-def type-id='type-id-314' size-in-bits='64' id='type-id-315'/>
- <function-decl name='atomic_inc_ulong' mangled-name='atomic_inc_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_ulong'>
- <parameter type-id='type-id-315' name='target'/>
- <return type-id='type-id-17'/>
+ <function-decl name='pthread_cond_signal' mangled-name='pthread_cond_signal' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <qualified-type-def type-id='type-id-23' volatile='yes' id='type-id-316'/>
- <pointer-type-def type-id='type-id-316' size-in-bits='64' id='type-id-317'/>
- <function-decl name='atomic_inc_64' mangled-name='atomic_inc_64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_64'>
- <parameter type-id='type-id-317' name='target'/>
- <return type-id='type-id-17'/>
+ <function-decl name='pthread_attr_init' mangled-name='pthread_attr_init' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_dec_8' mangled-name='atomic_dec_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_8'>
- <parameter type-id='type-id-302' name='target'/>
- <return type-id='type-id-17'/>
+ <function-decl name='pthread_attr_getaffinity_np' mangled-name='pthread_attr_getaffinity_np' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_dec_uchar' mangled-name='atomic_dec_uchar' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_uchar'>
- <parameter type-id='type-id-305' name='target'/>
- <return type-id='type-id-17'/>
+ <function-decl name='pthread_attr_destroy' mangled-name='pthread_attr_destroy' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_dec_16' mangled-name='atomic_dec_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_16'>
- <parameter type-id='type-id-307' name='target'/>
- <return type-id='type-id-17'/>
+ <function-decl name='pthread_attr_setaffinity_np' mangled-name='pthread_attr_setaffinity_np' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_dec_ushort' mangled-name='atomic_dec_ushort' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_ushort'>
- <parameter type-id='type-id-309' name='target'/>
- <return type-id='type-id-17'/>
+ <function-decl name='pthread_attr_getdetachstate' mangled-name='pthread_attr_getdetachstate' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_dec_32' mangled-name='atomic_dec_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_32'>
- <parameter type-id='type-id-311' name='target'/>
- <return type-id='type-id-17'/>
+ <function-decl name='pthread_attr_setdetachstate' mangled-name='pthread_attr_setdetachstate' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_dec_uint' mangled-name='atomic_dec_uint' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_uint'>
- <parameter type-id='type-id-313' name='target'/>
- <return type-id='type-id-17'/>
+ <function-decl name='pthread_attr_getguardsize' mangled-name='pthread_attr_getguardsize' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_dec_ulong' mangled-name='atomic_dec_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_ulong'>
- <parameter type-id='type-id-315' name='target'/>
- <return type-id='type-id-17'/>
+ <function-decl name='pthread_attr_setguardsize' mangled-name='pthread_attr_setguardsize' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_dec_64' mangled-name='atomic_dec_64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_64'>
- <parameter type-id='type-id-317' name='target'/>
- <return type-id='type-id-17'/>
+ <function-decl name='pthread_attr_getinheritsched' mangled-name='pthread_attr_getinheritsched' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <type-decl name='signed char' size-in-bits='8' id='type-id-318'/>
- <typedef-decl name='__int8_t' type-id='type-id-318' id='type-id-319'/>
- <typedef-decl name='int8_t' type-id='type-id-319' id='type-id-320'/>
- <function-decl name='atomic_add_8' mangled-name='atomic_add_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_8'>
- <parameter type-id='type-id-302' name='target'/>
- <parameter type-id='type-id-320' name='bits'/>
- <return type-id='type-id-17'/>
+ <function-decl name='pthread_attr_setinheritsched' mangled-name='pthread_attr_setinheritsched' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_add_char' mangled-name='atomic_add_char' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_char'>
- <parameter type-id='type-id-305' name='target'/>
- <parameter type-id='type-id-318' name='bits'/>
- <return type-id='type-id-17'/>
+ <function-decl name='pthread_attr_getschedparam' mangled-name='pthread_attr_getschedparam' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_add_16' mangled-name='atomic_add_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_16'>
- <parameter type-id='type-id-307' name='target'/>
- <parameter type-id='type-id-66' name='bits'/>
- <return type-id='type-id-17'/>
+ <function-decl name='pthread_attr_setschedparam' mangled-name='pthread_attr_setschedparam' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_add_short' mangled-name='atomic_add_short' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_short'>
- <parameter type-id='type-id-309' name='target'/>
- <parameter type-id='type-id-7' name='bits'/>
- <return type-id='type-id-17'/>
+ <function-decl name='pthread_attr_getschedpolicy' mangled-name='pthread_attr_getschedpolicy' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_add_32' mangled-name='atomic_add_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_32'>
- <parameter type-id='type-id-311' name='target'/>
- <parameter type-id='type-id-21' name='bits'/>
- <return type-id='type-id-17'/>
+ <function-decl name='pthread_attr_setschedpolicy' mangled-name='pthread_attr_setschedpolicy' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_add_int' mangled-name='atomic_add_int' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_int'>
- <parameter type-id='type-id-313' name='target'/>
- <parameter type-id='type-id-1' name='bits'/>
- <return type-id='type-id-17'/>
+ <function-decl name='pthread_attr_getscope' mangled-name='pthread_attr_getscope' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_add_long' mangled-name='atomic_add_long' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_long'>
- <parameter type-id='type-id-315' name='target'/>
- <parameter type-id='type-id-5' name='bits'/>
- <return type-id='type-id-17'/>
+ <function-decl name='pthread_attr_setscope' mangled-name='pthread_attr_setscope' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <typedef-decl name='__int64_t' type-id='type-id-5' id='type-id-321'/>
- <typedef-decl name='int64_t' type-id='type-id-321' id='type-id-322'/>
- <function-decl name='atomic_add_64' mangled-name='atomic_add_64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_64'>
- <parameter type-id='type-id-317' name='target'/>
- <parameter type-id='type-id-322' name='bits'/>
- <return type-id='type-id-17'/>
+ <function-decl name='pthread_attr_getstack' mangled-name='pthread_attr_getstack' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_add_ptr' mangled-name='atomic_add_ptr' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_ptr'>
- <parameter type-id='type-id-132' name='target'/>
- <parameter type-id='type-id-124' name='bits'/>
- <return type-id='type-id-17'/>
+ <function-decl name='pthread_attr_setstack' mangled-name='pthread_attr_setstack' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_sub_8' mangled-name='atomic_sub_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_8'>
- <parameter type-id='type-id-302' name='target'/>
- <parameter type-id='type-id-320' name='bits'/>
- <return type-id='type-id-17'/>
+ <function-decl name='pthread_cond_init' mangled-name='pthread_cond_init' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_sub_char' mangled-name='atomic_sub_char' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_char'>
- <parameter type-id='type-id-305' name='target'/>
- <parameter type-id='type-id-318' name='bits'/>
- <return type-id='type-id-17'/>
+ <function-decl name='pthread_sigmask' mangled-name='pthread_sigmask' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_sub_16' mangled-name='atomic_sub_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_16'>
- <parameter type-id='type-id-307' name='target'/>
- <parameter type-id='type-id-66' name='bits'/>
- <return type-id='type-id-17'/>
+ <function-decl name='pthread_create' mangled-name='pthread_create' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_sub_short' mangled-name='atomic_sub_short' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_short'>
- <parameter type-id='type-id-309' name='target'/>
- <parameter type-id='type-id-7' name='bits'/>
- <return type-id='type-id-17'/>
+ <function-decl name='pthread_cond_timedwait' mangled-name='pthread_cond_timedwait' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_sub_32' mangled-name='atomic_sub_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_32'>
- <parameter type-id='type-id-311' name='target'/>
- <parameter type-id='type-id-21' name='bits'/>
- <return type-id='type-id-17'/>
+ <function-decl name='pthread_setcanceltype' mangled-name='pthread_setcanceltype' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_sub_int' mangled-name='atomic_sub_int' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_int'>
- <parameter type-id='type-id-313' name='target'/>
- <parameter type-id='type-id-1' name='bits'/>
- <return type-id='type-id-17'/>
+ <function-decl name='pthread_setcancelstate' mangled-name='pthread_setcancelstate' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_sub_long' mangled-name='atomic_sub_long' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_long'>
- <parameter type-id='type-id-315' name='target'/>
- <parameter type-id='type-id-5' name='bits'/>
- <return type-id='type-id-17'/>
+ <function-type size-in-bits='64' id='type-id-184'>
+ <parameter type-id='type-id-89'/>
+ <return type-id='type-id-84'/>
+ </function-type>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='assert.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
+ <var-decl name='libspl_assert_ok' type-id='type-id-1' mangled-name='libspl_assert_ok' visibility='default' elf-symbol-id='libspl_assert_ok'/>
+ <function-decl name='libspl_assertf' mangled-name='libspl_assertf' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libspl_assertf'>
+ <parameter type-id='type-id-4' name='file'/>
+ <parameter type-id='type-id-4' name='func'/>
+ <parameter type-id='type-id-1' name='line'/>
+ <parameter type-id='type-id-4' name='format'/>
+ <parameter is-variadic='yes'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_sub_64' mangled-name='atomic_sub_64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_64'>
- <parameter type-id='type-id-317' name='target'/>
- <parameter type-id='type-id-322' name='bits'/>
- <return type-id='type-id-17'/>
+ <function-decl name='__vfprintf_chk' mangled-name='__vfprintf_chk' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_sub_ptr' mangled-name='atomic_sub_ptr' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_ptr'>
- <parameter type-id='type-id-132' name='target'/>
- <parameter type-id='type-id-124' name='bits'/>
- <return type-id='type-id-17'/>
+ <function-decl name='__builtin_fputc' mangled-name='fputc' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_or_8' mangled-name='atomic_or_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_8'>
- <parameter type-id='type-id-302' name='target'/>
- <parameter type-id='type-id-32' name='bits'/>
- <return type-id='type-id-17'/>
+ <function-decl name='abort' mangled-name='abort' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_or_uchar' mangled-name='atomic_or_uchar' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_uchar'>
- <parameter type-id='type-id-305' name='target'/>
- <parameter type-id='type-id-303' name='bits'/>
- <return type-id='type-id-17'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='atomic.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
+ <function-decl name='membar_consumer' mangled-name='membar_consumer' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='membar_consumer'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_or_16' mangled-name='atomic_or_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_16'>
- <parameter type-id='type-id-307' name='target'/>
- <parameter type-id='type-id-224' name='bits'/>
- <return type-id='type-id-17'/>
+ <function-decl name='membar_producer' mangled-name='membar_producer' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='membar_producer'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_or_ushort' mangled-name='atomic_or_ushort' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_ushort'>
- <parameter type-id='type-id-309' name='target'/>
- <parameter type-id='type-id-227' name='bits'/>
- <return type-id='type-id-17'/>
+ <function-decl name='membar_enter' mangled-name='membar_enter' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='membar_enter'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_or_32' mangled-name='atomic_or_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_32'>
- <parameter type-id='type-id-311' name='target'/>
- <parameter type-id='type-id-22' name='bits'/>
- <return type-id='type-id-17'/>
+ <qualified-type-def type-id='type-id-143' volatile='yes' id='type-id-189'/>
+ <pointer-type-def type-id='type-id-189' size-in-bits='64' id='type-id-190'/>
+ <function-decl name='atomic_clear_long_excl' mangled-name='atomic_clear_long_excl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_clear_long_excl'>
+ <parameter type-id='type-id-190' name='target'/>
+ <parameter type-id='type-id-35' name='value'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='atomic_or_uint' mangled-name='atomic_or_uint' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_uint'>
- <parameter type-id='type-id-313' name='target'/>
- <parameter type-id='type-id-34' name='bits'/>
- <return type-id='type-id-17'/>
+ <function-decl name='atomic_set_long_excl' mangled-name='atomic_set_long_excl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_set_long_excl'>
+ <parameter type-id='type-id-190' name='target'/>
+ <parameter type-id='type-id-35' name='value'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='atomic_or_ulong' mangled-name='atomic_or_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_ulong'>
- <parameter type-id='type-id-315' name='target'/>
- <parameter type-id='type-id-184' name='bits'/>
- <return type-id='type-id-17'/>
+ <qualified-type-def type-id='type-id-84' volatile='yes' id='type-id-191'/>
+ <pointer-type-def type-id='type-id-191' size-in-bits='64' id='type-id-192'/>
+ <function-decl name='atomic_swap_ptr' mangled-name='atomic_swap_ptr' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_ptr'>
+ <parameter type-id='type-id-192' name='target'/>
+ <parameter type-id='type-id-89' name='bits'/>
+ <return type-id='type-id-89'/>
</function-decl>
- <function-decl name='atomic_or_64' mangled-name='atomic_or_64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_64'>
- <parameter type-id='type-id-317' name='target'/>
- <parameter type-id='type-id-23' name='bits'/>
- <return type-id='type-id-17'/>
+ <function-decl name='atomic_swap_ulong' mangled-name='atomic_swap_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_ulong'>
+ <parameter type-id='type-id-190' name='target'/>
+ <parameter type-id='type-id-143' name='bits'/>
+ <return type-id='type-id-143'/>
</function-decl>
- <function-decl name='atomic_and_8' mangled-name='atomic_and_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_8'>
- <parameter type-id='type-id-302' name='target'/>
- <parameter type-id='type-id-32' name='bits'/>
- <return type-id='type-id-17'/>
+ <qualified-type-def type-id='type-id-7' volatile='yes' id='type-id-193'/>
+ <pointer-type-def type-id='type-id-193' size-in-bits='64' id='type-id-194'/>
+ <function-decl name='atomic_swap_32' mangled-name='atomic_swap_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_32'>
+ <parameter type-id='type-id-194' name='target'/>
+ <parameter type-id='type-id-7' name='bits'/>
+ <return type-id='type-id-7'/>
</function-decl>
- <function-decl name='atomic_and_uchar' mangled-name='atomic_and_uchar' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_uchar'>
- <parameter type-id='type-id-305' name='target'/>
- <parameter type-id='type-id-303' name='bits'/>
- <return type-id='type-id-17'/>
+ <type-decl name='unsigned short int' size-in-bits='16' id='type-id-195'/>
+ <typedef-decl name='__uint16_t' type-id='type-id-195' id='type-id-196'/>
+ <typedef-decl name='uint16_t' type-id='type-id-196' id='type-id-197'/>
+ <qualified-type-def type-id='type-id-197' volatile='yes' id='type-id-198'/>
+ <pointer-type-def type-id='type-id-198' size-in-bits='64' id='type-id-199'/>
+ <function-decl name='atomic_swap_16' mangled-name='atomic_swap_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_16'>
+ <parameter type-id='type-id-199' name='target'/>
+ <parameter type-id='type-id-197' name='bits'/>
+ <return type-id='type-id-197'/>
</function-decl>
- <function-decl name='atomic_and_16' mangled-name='atomic_and_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_16'>
- <parameter type-id='type-id-307' name='target'/>
- <parameter type-id='type-id-224' name='bits'/>
- <return type-id='type-id-17'/>
+ <qualified-type-def type-id='type-id-33' volatile='yes' id='type-id-200'/>
+ <pointer-type-def type-id='type-id-200' size-in-bits='64' id='type-id-201'/>
+ <function-decl name='atomic_swap_8' mangled-name='atomic_swap_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_8'>
+ <parameter type-id='type-id-201' name='target'/>
+ <parameter type-id='type-id-33' name='bits'/>
+ <return type-id='type-id-33'/>
</function-decl>
- <function-decl name='atomic_and_ushort' mangled-name='atomic_and_ushort' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_ushort'>
- <parameter type-id='type-id-309' name='target'/>
- <parameter type-id='type-id-227' name='bits'/>
- <return type-id='type-id-17'/>
+ <function-decl name='atomic_cas_ptr' mangled-name='atomic_cas_ptr' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_ptr'>
+ <parameter type-id='type-id-192' name='target'/>
+ <parameter type-id='type-id-89' name='exp'/>
+ <parameter type-id='type-id-89' name='des'/>
+ <return type-id='type-id-89'/>
</function-decl>
- <function-decl name='atomic_and_32' mangled-name='atomic_and_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_32'>
- <parameter type-id='type-id-311' name='target'/>
- <parameter type-id='type-id-22' name='bits'/>
- <return type-id='type-id-17'/>
+ <function-decl name='atomic_and_ulong_nv' mangled-name='atomic_and_ulong_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_ulong_nv'>
+ <parameter type-id='type-id-190' name='target'/>
+ <parameter type-id='type-id-143' name='bits'/>
+ <return type-id='type-id-143'/>
</function-decl>
- <function-decl name='atomic_and_uint' mangled-name='atomic_and_uint' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_uint'>
- <parameter type-id='type-id-313' name='target'/>
- <parameter type-id='type-id-34' name='bits'/>
- <return type-id='type-id-17'/>
+ <function-decl name='atomic_and_32_nv' mangled-name='atomic_and_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_32_nv'>
+ <parameter type-id='type-id-194' name='target'/>
+ <parameter type-id='type-id-7' name='bits'/>
+ <return type-id='type-id-7'/>
</function-decl>
- <function-decl name='atomic_and_ulong' mangled-name='atomic_and_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_ulong'>
- <parameter type-id='type-id-315' name='target'/>
- <parameter type-id='type-id-184' name='bits'/>
- <return type-id='type-id-17'/>
+ <function-decl name='atomic_and_16_nv' mangled-name='atomic_and_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_16_nv'>
+ <parameter type-id='type-id-199' name='target'/>
+ <parameter type-id='type-id-197' name='bits'/>
+ <return type-id='type-id-197'/>
</function-decl>
- <function-decl name='atomic_and_64' mangled-name='atomic_and_64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_64'>
- <parameter type-id='type-id-317' name='target'/>
- <parameter type-id='type-id-23' name='bits'/>
- <return type-id='type-id-17'/>
+ <function-decl name='atomic_and_8_nv' mangled-name='atomic_and_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_8_nv'>
+ <parameter type-id='type-id-201' name='target'/>
+ <parameter type-id='type-id-33' name='bits'/>
+ <return type-id='type-id-33'/>
</function-decl>
- <function-decl name='atomic_inc_8_nv' mangled-name='atomic_inc_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_8_nv'>
- <parameter type-id='type-id-302' name='target'/>
- <return type-id='type-id-32'/>
+ <function-decl name='atomic_or_ulong_nv' mangled-name='atomic_or_ulong_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_ulong_nv'>
+ <parameter type-id='type-id-190' name='target'/>
+ <parameter type-id='type-id-143' name='bits'/>
+ <return type-id='type-id-143'/>
</function-decl>
- <function-decl name='atomic_inc_uchar_nv' mangled-name='atomic_inc_uchar_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_uchar_nv'>
- <parameter type-id='type-id-305' name='target'/>
- <return type-id='type-id-303'/>
+ <function-decl name='atomic_or_32_nv' mangled-name='atomic_or_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_32_nv'>
+ <parameter type-id='type-id-194' name='target'/>
+ <parameter type-id='type-id-7' name='bits'/>
+ <return type-id='type-id-7'/>
</function-decl>
- <function-decl name='atomic_inc_16_nv' mangled-name='atomic_inc_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_16_nv'>
- <parameter type-id='type-id-307' name='target'/>
- <return type-id='type-id-224'/>
+ <function-decl name='atomic_or_16_nv' mangled-name='atomic_or_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_16_nv'>
+ <parameter type-id='type-id-199' name='target'/>
+ <parameter type-id='type-id-197' name='bits'/>
+ <return type-id='type-id-197'/>
</function-decl>
- <function-decl name='atomic_inc_ushort_nv' mangled-name='atomic_inc_ushort_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_ushort_nv'>
- <parameter type-id='type-id-309' name='target'/>
- <return type-id='type-id-227'/>
+ <function-decl name='atomic_or_8_nv' mangled-name='atomic_or_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_8_nv'>
+ <parameter type-id='type-id-201' name='target'/>
+ <parameter type-id='type-id-33' name='bits'/>
+ <return type-id='type-id-33'/>
</function-decl>
- <function-decl name='atomic_inc_32_nv' mangled-name='atomic_inc_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_32_nv'>
- <parameter type-id='type-id-311' name='target'/>
- <return type-id='type-id-22'/>
+ <function-decl name='atomic_sub_ptr_nv' mangled-name='atomic_sub_ptr_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_ptr_nv'>
+ <parameter type-id='type-id-192' name='target'/>
+ <parameter type-id='type-id-88' name='bits'/>
+ <return type-id='type-id-89'/>
</function-decl>
- <function-decl name='atomic_inc_uint_nv' mangled-name='atomic_inc_uint_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_uint_nv'>
- <parameter type-id='type-id-313' name='target'/>
- <return type-id='type-id-34'/>
+ <function-decl name='atomic_sub_long_nv' mangled-name='atomic_sub_long_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_long_nv'>
+ <parameter type-id='type-id-190' name='target'/>
+ <parameter type-id='type-id-86' name='bits'/>
+ <return type-id='type-id-143'/>
</function-decl>
- <function-decl name='atomic_inc_ulong_nv' mangled-name='atomic_inc_ulong_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_ulong_nv'>
- <parameter type-id='type-id-315' name='target'/>
- <return type-id='type-id-184'/>
+ <function-decl name='atomic_sub_32_nv' mangled-name='atomic_sub_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_32_nv'>
+ <parameter type-id='type-id-194' name='target'/>
+ <parameter type-id='type-id-6' name='bits'/>
+ <return type-id='type-id-7'/>
</function-decl>
- <function-decl name='atomic_inc_64_nv' mangled-name='atomic_inc_64_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_64_nv'>
- <parameter type-id='type-id-317' name='target'/>
- <return type-id='type-id-23'/>
+ <typedef-decl name='__int16_t' type-id='type-id-164' id='type-id-202'/>
+ <typedef-decl name='int16_t' type-id='type-id-202' id='type-id-203'/>
+ <function-decl name='atomic_sub_16_nv' mangled-name='atomic_sub_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_16_nv'>
+ <parameter type-id='type-id-199' name='target'/>
+ <parameter type-id='type-id-203' name='bits'/>
+ <return type-id='type-id-197'/>
</function-decl>
- <function-decl name='atomic_dec_8_nv' mangled-name='atomic_dec_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_8_nv'>
- <parameter type-id='type-id-302' name='target'/>
- <return type-id='type-id-32'/>
+ <type-decl name='signed char' size-in-bits='8' id='type-id-204'/>
+ <typedef-decl name='__int8_t' type-id='type-id-204' id='type-id-205'/>
+ <typedef-decl name='int8_t' type-id='type-id-205' id='type-id-206'/>
+ <function-decl name='atomic_sub_8_nv' mangled-name='atomic_sub_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_8_nv'>
+ <parameter type-id='type-id-201' name='target'/>
+ <parameter type-id='type-id-206' name='bits'/>
+ <return type-id='type-id-33'/>
</function-decl>
- <function-decl name='atomic_dec_uchar_nv' mangled-name='atomic_dec_uchar_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_uchar_nv'>
- <parameter type-id='type-id-305' name='target'/>
- <return type-id='type-id-303'/>
+ <function-decl name='atomic_add_ptr_nv' mangled-name='atomic_add_ptr_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_ptr_nv'>
+ <parameter type-id='type-id-192' name='target'/>
+ <parameter type-id='type-id-88' name='bits'/>
+ <return type-id='type-id-89'/>
</function-decl>
- <function-decl name='atomic_dec_16_nv' mangled-name='atomic_dec_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_16_nv'>
- <parameter type-id='type-id-307' name='target'/>
- <return type-id='type-id-224'/>
+ <function-decl name='atomic_add_long_nv' mangled-name='atomic_add_long_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_long_nv'>
+ <parameter type-id='type-id-190' name='target'/>
+ <parameter type-id='type-id-86' name='bits'/>
+ <return type-id='type-id-143'/>
</function-decl>
- <function-decl name='atomic_dec_ushort_nv' mangled-name='atomic_dec_ushort_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_ushort_nv'>
- <parameter type-id='type-id-309' name='target'/>
- <return type-id='type-id-227'/>
+ <function-decl name='atomic_add_32_nv' mangled-name='atomic_add_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_32_nv'>
+ <parameter type-id='type-id-194' name='target'/>
+ <parameter type-id='type-id-6' name='bits'/>
+ <return type-id='type-id-7'/>
</function-decl>
- <function-decl name='atomic_dec_32_nv' mangled-name='atomic_dec_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_32_nv'>
- <parameter type-id='type-id-311' name='target'/>
- <return type-id='type-id-22'/>
+ <function-decl name='atomic_add_16_nv' mangled-name='atomic_add_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_16_nv'>
+ <parameter type-id='type-id-199' name='target'/>
+ <parameter type-id='type-id-203' name='bits'/>
+ <return type-id='type-id-197'/>
</function-decl>
- <function-decl name='atomic_dec_uint_nv' mangled-name='atomic_dec_uint_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_uint_nv'>
- <parameter type-id='type-id-313' name='target'/>
- <return type-id='type-id-34'/>
+ <function-decl name='atomic_add_8_nv' mangled-name='atomic_add_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_8_nv'>
+ <parameter type-id='type-id-201' name='target'/>
+ <parameter type-id='type-id-206' name='bits'/>
+ <return type-id='type-id-33'/>
</function-decl>
<function-decl name='atomic_dec_ulong_nv' mangled-name='atomic_dec_ulong_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_ulong_nv'>
- <parameter type-id='type-id-315' name='target'/>
- <return type-id='type-id-184'/>
+ <parameter type-id='type-id-190' name='target'/>
+ <return type-id='type-id-143'/>
</function-decl>
- <function-decl name='atomic_dec_64_nv' mangled-name='atomic_dec_64_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_64_nv'>
- <parameter type-id='type-id-317' name='target'/>
- <return type-id='type-id-23'/>
- </function-decl>
- <function-decl name='atomic_add_8_nv' mangled-name='atomic_add_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_8_nv'>
- <parameter type-id='type-id-302' name='target'/>
- <parameter type-id='type-id-320' name='bits'/>
- <return type-id='type-id-32'/>
+ <function-decl name='atomic_dec_32_nv' mangled-name='atomic_dec_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_32_nv'>
+ <parameter type-id='type-id-194' name='target'/>
+ <return type-id='type-id-7'/>
</function-decl>
- <function-decl name='atomic_add_char_nv' mangled-name='atomic_add_char_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_char_nv'>
- <parameter type-id='type-id-305' name='target'/>
- <parameter type-id='type-id-318' name='bits'/>
- <return type-id='type-id-303'/>
+ <function-decl name='atomic_dec_16_nv' mangled-name='atomic_dec_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_16_nv'>
+ <parameter type-id='type-id-199' name='target'/>
+ <return type-id='type-id-197'/>
</function-decl>
- <function-decl name='atomic_add_16_nv' mangled-name='atomic_add_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_16_nv'>
- <parameter type-id='type-id-307' name='target'/>
- <parameter type-id='type-id-66' name='bits'/>
- <return type-id='type-id-224'/>
+ <function-decl name='atomic_dec_8_nv' mangled-name='atomic_dec_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_8_nv'>
+ <parameter type-id='type-id-201' name='target'/>
+ <return type-id='type-id-33'/>
</function-decl>
- <function-decl name='atomic_add_short_nv' mangled-name='atomic_add_short_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_short_nv'>
- <parameter type-id='type-id-309' name='target'/>
- <parameter type-id='type-id-7' name='bits'/>
- <return type-id='type-id-227'/>
+ <function-decl name='atomic_inc_ulong_nv' mangled-name='atomic_inc_ulong_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_ulong_nv'>
+ <parameter type-id='type-id-190' name='target'/>
+ <return type-id='type-id-143'/>
</function-decl>
- <function-decl name='atomic_add_32_nv' mangled-name='atomic_add_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_32_nv'>
- <parameter type-id='type-id-311' name='target'/>
- <parameter type-id='type-id-21' name='bits'/>
- <return type-id='type-id-22'/>
+ <function-decl name='atomic_inc_32_nv' mangled-name='atomic_inc_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_32_nv'>
+ <parameter type-id='type-id-194' name='target'/>
+ <return type-id='type-id-7'/>
</function-decl>
- <function-decl name='atomic_add_int_nv' mangled-name='atomic_add_int_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_int_nv'>
- <parameter type-id='type-id-313' name='target'/>
- <parameter type-id='type-id-1' name='bits'/>
- <return type-id='type-id-34'/>
+ <function-decl name='atomic_inc_16_nv' mangled-name='atomic_inc_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_16_nv'>
+ <parameter type-id='type-id-199' name='target'/>
+ <return type-id='type-id-197'/>
</function-decl>
- <function-decl name='atomic_add_long_nv' mangled-name='atomic_add_long_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_long_nv'>
- <parameter type-id='type-id-315' name='target'/>
- <parameter type-id='type-id-5' name='bits'/>
- <return type-id='type-id-184'/>
+ <function-decl name='atomic_inc_8_nv' mangled-name='atomic_inc_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_8_nv'>
+ <parameter type-id='type-id-201' name='target'/>
+ <return type-id='type-id-33'/>
</function-decl>
- <function-decl name='atomic_add_64_nv' mangled-name='atomic_add_64_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_64_nv'>
- <parameter type-id='type-id-317' name='target'/>
- <parameter type-id='type-id-322' name='bits'/>
- <return type-id='type-id-23'/>
+ <function-decl name='atomic_and_ulong' mangled-name='atomic_and_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_ulong'>
+ <parameter type-id='type-id-190' name='target'/>
+ <parameter type-id='type-id-143' name='bits'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_add_ptr_nv' mangled-name='atomic_add_ptr_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_ptr_nv'>
- <parameter type-id='type-id-132' name='target'/>
- <parameter type-id='type-id-124' name='bits'/>
- <return type-id='type-id-73'/>
+ <function-decl name='atomic_and_32' mangled-name='atomic_and_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_32'>
+ <parameter type-id='type-id-194' name='target'/>
+ <parameter type-id='type-id-7' name='bits'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_sub_8_nv' mangled-name='atomic_sub_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_8_nv'>
- <parameter type-id='type-id-302' name='target'/>
- <parameter type-id='type-id-320' name='bits'/>
- <return type-id='type-id-32'/>
+ <function-decl name='atomic_and_16' mangled-name='atomic_and_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_16'>
+ <parameter type-id='type-id-199' name='target'/>
+ <parameter type-id='type-id-197' name='bits'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_sub_char_nv' mangled-name='atomic_sub_char_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_char_nv'>
- <parameter type-id='type-id-305' name='target'/>
- <parameter type-id='type-id-318' name='bits'/>
- <return type-id='type-id-303'/>
+ <function-decl name='atomic_and_8' mangled-name='atomic_and_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_8'>
+ <parameter type-id='type-id-201' name='target'/>
+ <parameter type-id='type-id-33' name='bits'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_sub_16_nv' mangled-name='atomic_sub_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_16_nv'>
- <parameter type-id='type-id-307' name='target'/>
- <parameter type-id='type-id-66' name='bits'/>
- <return type-id='type-id-224'/>
+ <function-decl name='atomic_or_ulong' mangled-name='atomic_or_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_ulong'>
+ <parameter type-id='type-id-190' name='target'/>
+ <parameter type-id='type-id-143' name='bits'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_sub_short_nv' mangled-name='atomic_sub_short_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_short_nv'>
- <parameter type-id='type-id-309' name='target'/>
+ <function-decl name='atomic_or_32' mangled-name='atomic_or_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_32'>
+ <parameter type-id='type-id-194' name='target'/>
<parameter type-id='type-id-7' name='bits'/>
- <return type-id='type-id-227'/>
- </function-decl>
- <function-decl name='atomic_sub_32_nv' mangled-name='atomic_sub_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_32_nv'>
- <parameter type-id='type-id-311' name='target'/>
- <parameter type-id='type-id-21' name='bits'/>
- <return type-id='type-id-22'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_sub_int_nv' mangled-name='atomic_sub_int_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_int_nv'>
- <parameter type-id='type-id-313' name='target'/>
- <parameter type-id='type-id-1' name='bits'/>
- <return type-id='type-id-34'/>
+ <function-decl name='atomic_or_16' mangled-name='atomic_or_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_16'>
+ <parameter type-id='type-id-199' name='target'/>
+ <parameter type-id='type-id-197' name='bits'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_sub_long_nv' mangled-name='atomic_sub_long_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_long_nv'>
- <parameter type-id='type-id-315' name='target'/>
- <parameter type-id='type-id-5' name='bits'/>
- <return type-id='type-id-184'/>
+ <function-decl name='atomic_or_8' mangled-name='atomic_or_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_8'>
+ <parameter type-id='type-id-201' name='target'/>
+ <parameter type-id='type-id-33' name='bits'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_sub_64_nv' mangled-name='atomic_sub_64_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_64_nv'>
- <parameter type-id='type-id-317' name='target'/>
- <parameter type-id='type-id-322' name='bits'/>
- <return type-id='type-id-23'/>
+ <function-decl name='atomic_sub_ptr' mangled-name='atomic_sub_ptr' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_ptr'>
+ <parameter type-id='type-id-192' name='target'/>
+ <parameter type-id='type-id-88' name='bits'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_sub_ptr_nv' mangled-name='atomic_sub_ptr_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_ptr_nv'>
- <parameter type-id='type-id-132' name='target'/>
- <parameter type-id='type-id-124' name='bits'/>
- <return type-id='type-id-73'/>
+ <function-decl name='atomic_sub_long' mangled-name='atomic_sub_long' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_long'>
+ <parameter type-id='type-id-190' name='target'/>
+ <parameter type-id='type-id-86' name='bits'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_or_8_nv' mangled-name='atomic_or_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_8_nv'>
- <parameter type-id='type-id-302' name='target'/>
- <parameter type-id='type-id-32' name='bits'/>
- <return type-id='type-id-32'/>
+ <function-decl name='atomic_sub_32' mangled-name='atomic_sub_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_32'>
+ <parameter type-id='type-id-194' name='target'/>
+ <parameter type-id='type-id-6' name='bits'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_or_uchar_nv' mangled-name='atomic_or_uchar_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_uchar_nv'>
- <parameter type-id='type-id-305' name='target'/>
- <parameter type-id='type-id-303' name='bits'/>
- <return type-id='type-id-303'/>
+ <function-decl name='atomic_sub_16' mangled-name='atomic_sub_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_16'>
+ <parameter type-id='type-id-199' name='target'/>
+ <parameter type-id='type-id-203' name='bits'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_or_16_nv' mangled-name='atomic_or_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_16_nv'>
- <parameter type-id='type-id-307' name='target'/>
- <parameter type-id='type-id-224' name='bits'/>
- <return type-id='type-id-224'/>
+ <function-decl name='atomic_sub_8' mangled-name='atomic_sub_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_8'>
+ <parameter type-id='type-id-201' name='target'/>
+ <parameter type-id='type-id-206' name='bits'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_or_ushort_nv' mangled-name='atomic_or_ushort_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_ushort_nv'>
- <parameter type-id='type-id-309' name='target'/>
- <parameter type-id='type-id-227' name='bits'/>
- <return type-id='type-id-227'/>
+ <function-decl name='atomic_add_ptr' mangled-name='atomic_add_ptr' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_ptr'>
+ <parameter type-id='type-id-192' name='target'/>
+ <parameter type-id='type-id-88' name='bits'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_or_32_nv' mangled-name='atomic_or_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_32_nv'>
- <parameter type-id='type-id-311' name='target'/>
- <parameter type-id='type-id-22' name='bits'/>
- <return type-id='type-id-22'/>
+ <function-decl name='atomic_add_long' mangled-name='atomic_add_long' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_long'>
+ <parameter type-id='type-id-190' name='target'/>
+ <parameter type-id='type-id-86' name='bits'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_or_uint_nv' mangled-name='atomic_or_uint_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_uint_nv'>
- <parameter type-id='type-id-313' name='target'/>
- <parameter type-id='type-id-34' name='bits'/>
- <return type-id='type-id-34'/>
+ <function-decl name='atomic_add_32' mangled-name='atomic_add_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_32'>
+ <parameter type-id='type-id-194' name='target'/>
+ <parameter type-id='type-id-6' name='bits'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_or_ulong_nv' mangled-name='atomic_or_ulong_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_ulong_nv'>
- <parameter type-id='type-id-315' name='target'/>
- <parameter type-id='type-id-184' name='bits'/>
- <return type-id='type-id-184'/>
+ <function-decl name='atomic_add_16' mangled-name='atomic_add_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_16'>
+ <parameter type-id='type-id-199' name='target'/>
+ <parameter type-id='type-id-203' name='bits'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_or_64_nv' mangled-name='atomic_or_64_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_64_nv'>
- <parameter type-id='type-id-317' name='target'/>
- <parameter type-id='type-id-23' name='bits'/>
- <return type-id='type-id-23'/>
+ <function-decl name='atomic_add_8' mangled-name='atomic_add_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_8'>
+ <parameter type-id='type-id-201' name='target'/>
+ <parameter type-id='type-id-206' name='bits'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_and_8_nv' mangled-name='atomic_and_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_8_nv'>
- <parameter type-id='type-id-302' name='target'/>
- <parameter type-id='type-id-32' name='bits'/>
- <return type-id='type-id-32'/>
+ <function-decl name='atomic_dec_ulong' mangled-name='atomic_dec_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_ulong'>
+ <parameter type-id='type-id-190' name='target'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_and_uchar_nv' mangled-name='atomic_and_uchar_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_uchar_nv'>
- <parameter type-id='type-id-305' name='target'/>
- <parameter type-id='type-id-303' name='bits'/>
- <return type-id='type-id-303'/>
+ <function-decl name='atomic_dec_32' mangled-name='atomic_dec_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_32'>
+ <parameter type-id='type-id-194' name='target'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_and_16_nv' mangled-name='atomic_and_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_16_nv'>
- <parameter type-id='type-id-307' name='target'/>
- <parameter type-id='type-id-224' name='bits'/>
- <return type-id='type-id-224'/>
+ <function-decl name='atomic_dec_16' mangled-name='atomic_dec_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_16'>
+ <parameter type-id='type-id-199' name='target'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_and_ushort_nv' mangled-name='atomic_and_ushort_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_ushort_nv'>
- <parameter type-id='type-id-309' name='target'/>
- <parameter type-id='type-id-227' name='bits'/>
- <return type-id='type-id-227'/>
+ <function-decl name='atomic_dec_8' mangled-name='atomic_dec_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_8'>
+ <parameter type-id='type-id-201' name='target'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_and_32_nv' mangled-name='atomic_and_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_32_nv'>
- <parameter type-id='type-id-311' name='target'/>
- <parameter type-id='type-id-22' name='bits'/>
- <return type-id='type-id-22'/>
+ <function-decl name='atomic_inc_ulong' mangled-name='atomic_inc_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_ulong'>
+ <parameter type-id='type-id-190' name='target'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_and_uint_nv' mangled-name='atomic_and_uint_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_uint_nv'>
- <parameter type-id='type-id-313' name='target'/>
- <parameter type-id='type-id-34' name='bits'/>
- <return type-id='type-id-34'/>
+ <function-decl name='atomic_inc_32' mangled-name='atomic_inc_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_32'>
+ <parameter type-id='type-id-194' name='target'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_and_ulong_nv' mangled-name='atomic_and_ulong_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_ulong_nv'>
- <parameter type-id='type-id-315' name='target'/>
- <parameter type-id='type-id-184' name='bits'/>
- <return type-id='type-id-184'/>
+ <function-decl name='atomic_inc_16' mangled-name='atomic_inc_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_16'>
+ <parameter type-id='type-id-199' name='target'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_and_64_nv' mangled-name='atomic_and_64_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_64_nv'>
- <parameter type-id='type-id-317' name='target'/>
- <parameter type-id='type-id-23' name='bits'/>
- <return type-id='type-id-23'/>
+ <function-decl name='atomic_inc_8' mangled-name='atomic_inc_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_8'>
+ <parameter type-id='type-id-201' name='target'/>
+ <return type-id='type-id-84'/>
</function-decl>
<function-decl name='atomic_cas_8' mangled-name='atomic_cas_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_8'>
- <parameter type-id='type-id-302' name='target'/>
- <parameter type-id='type-id-32' name='exp'/>
- <parameter type-id='type-id-32' name='des'/>
- <return type-id='type-id-32'/>
- </function-decl>
- <function-decl name='atomic_cas_uchar' mangled-name='atomic_cas_uchar' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_uchar'>
- <parameter type-id='type-id-305' name='target'/>
- <parameter type-id='type-id-303' name='exp'/>
- <parameter type-id='type-id-303' name='des'/>
- <return type-id='type-id-303'/>
+ <parameter type-id='type-id-201' name='target'/>
+ <parameter type-id='type-id-33' name='exp'/>
+ <parameter type-id='type-id-33' name='des'/>
+ <return type-id='type-id-33'/>
</function-decl>
<function-decl name='atomic_cas_16' mangled-name='atomic_cas_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_16'>
- <parameter type-id='type-id-307' name='target'/>
- <parameter type-id='type-id-224' name='exp'/>
- <parameter type-id='type-id-224' name='des'/>
- <return type-id='type-id-224'/>
- </function-decl>
- <function-decl name='atomic_cas_ushort' mangled-name='atomic_cas_ushort' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_ushort'>
- <parameter type-id='type-id-309' name='target'/>
- <parameter type-id='type-id-227' name='exp'/>
- <parameter type-id='type-id-227' name='des'/>
- <return type-id='type-id-227'/>
+ <parameter type-id='type-id-199' name='target'/>
+ <parameter type-id='type-id-197' name='exp'/>
+ <parameter type-id='type-id-197' name='des'/>
+ <return type-id='type-id-197'/>
</function-decl>
<function-decl name='atomic_cas_32' mangled-name='atomic_cas_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_32'>
- <parameter type-id='type-id-311' name='target'/>
- <parameter type-id='type-id-22' name='exp'/>
- <parameter type-id='type-id-22' name='des'/>
- <return type-id='type-id-22'/>
- </function-decl>
- <function-decl name='atomic_cas_uint' mangled-name='atomic_cas_uint' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_uint'>
- <parameter type-id='type-id-313' name='target'/>
- <parameter type-id='type-id-34' name='exp'/>
- <parameter type-id='type-id-34' name='des'/>
- <return type-id='type-id-34'/>
+ <parameter type-id='type-id-194' name='target'/>
+ <parameter type-id='type-id-7' name='exp'/>
+ <parameter type-id='type-id-7' name='des'/>
+ <return type-id='type-id-7'/>
</function-decl>
<function-decl name='atomic_cas_ulong' mangled-name='atomic_cas_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_ulong'>
- <parameter type-id='type-id-315' name='target'/>
- <parameter type-id='type-id-184' name='exp'/>
- <parameter type-id='type-id-184' name='des'/>
- <return type-id='type-id-184'/>
- </function-decl>
- <function-decl name='atomic_cas_64' mangled-name='atomic_cas_64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_64'>
- <parameter type-id='type-id-317' name='target'/>
- <parameter type-id='type-id-23' name='exp'/>
- <parameter type-id='type-id-23' name='des'/>
- <return type-id='type-id-23'/>
- </function-decl>
- <function-decl name='atomic_cas_ptr' mangled-name='atomic_cas_ptr' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_ptr'>
- <parameter type-id='type-id-132' name='target'/>
- <parameter type-id='type-id-73' name='exp'/>
- <parameter type-id='type-id-73' name='des'/>
- <return type-id='type-id-73'/>
- </function-decl>
- <function-decl name='atomic_swap_8' mangled-name='atomic_swap_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_8'>
- <parameter type-id='type-id-302' name='target'/>
- <parameter type-id='type-id-32' name='bits'/>
- <return type-id='type-id-32'/>
- </function-decl>
- <function-decl name='atomic_swap_uchar' mangled-name='atomic_swap_uchar' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_uchar'>
- <parameter type-id='type-id-305' name='target'/>
- <parameter type-id='type-id-303' name='bits'/>
- <return type-id='type-id-303'/>
+ <parameter type-id='type-id-190' name='target'/>
+ <parameter type-id='type-id-143' name='exp'/>
+ <parameter type-id='type-id-143' name='des'/>
+ <return type-id='type-id-143'/>
</function-decl>
- <function-decl name='atomic_swap_16' mangled-name='atomic_swap_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_16'>
- <parameter type-id='type-id-307' name='target'/>
- <parameter type-id='type-id-224' name='bits'/>
- <return type-id='type-id-224'/>
- </function-decl>
- <function-decl name='atomic_swap_ushort' mangled-name='atomic_swap_ushort' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_ushort'>
- <parameter type-id='type-id-309' name='target'/>
- <parameter type-id='type-id-227' name='bits'/>
- <return type-id='type-id-227'/>
- </function-decl>
- <function-decl name='atomic_swap_32' mangled-name='atomic_swap_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_32'>
- <parameter type-id='type-id-311' name='target'/>
- <parameter type-id='type-id-22' name='bits'/>
- <return type-id='type-id-22'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='getexecname.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
+ <function-decl name='getexecname' mangled-name='getexecname' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getexecname'>
+ <return type-id='type-id-4'/>
</function-decl>
- <function-decl name='atomic_swap_uint' mangled-name='atomic_swap_uint' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_uint'>
- <parameter type-id='type-id-313' name='target'/>
- <parameter type-id='type-id-34' name='bits'/>
- <return type-id='type-id-34'/>
+ <function-decl name='getexecname_impl' mangled-name='getexecname_impl' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_swap_ulong' mangled-name='atomic_swap_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_ulong'>
- <parameter type-id='type-id-315' name='target'/>
- <parameter type-id='type-id-184' name='bits'/>
- <return type-id='type-id-184'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='os/linux/gethostid.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
+ <function-decl name='get_system_hostid' mangled-name='get_system_hostid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='get_system_hostid'>
+ <return type-id='type-id-12'/>
</function-decl>
- <function-decl name='atomic_swap_64' mangled-name='atomic_swap_64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_64'>
- <parameter type-id='type-id-317' name='target'/>
- <parameter type-id='type-id-23' name='bits'/>
- <return type-id='type-id-23'/>
+ <function-decl name='fopen' mangled-name='fopen64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_swap_ptr' mangled-name='atomic_swap_ptr' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_ptr'>
- <parameter type-id='type-id-132' name='target'/>
- <parameter type-id='type-id-73' name='bits'/>
- <return type-id='type-id-73'/>
+ <function-decl name='fscanf' mangled-name='fscanf' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_set_long_excl' mangled-name='atomic_set_long_excl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_set_long_excl'>
- <parameter type-id='type-id-315' name='target'/>
- <parameter type-id='type-id-34' name='value'/>
- <return type-id='type-id-1'/>
+ <function-decl name='fclose' mangled-name='fclose' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='atomic_clear_long_excl' mangled-name='atomic_clear_long_excl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_clear_long_excl'>
- <parameter type-id='type-id-315' name='target'/>
- <parameter type-id='type-id-34' name='value'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='os/linux/getmntany.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
+ <class-decl name='extmnttab' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-207'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='mnt_special' type-id='type-id-36' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='mnt_mountp' type-id='type-id-36' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='mnt_fstype' type-id='type-id-36' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='mnt_mntopts' type-id='type-id-36' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='mnt_major' type-id='type-id-35' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='288'>
+ <var-decl name='mnt_minor' type-id='type-id-35' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <pointer-type-def type-id='type-id-207' size-in-bits='64' id='type-id-208'/>
+ <class-decl name='stat64' size-in-bits='1152' is-struct='yes' visibility='default' id='type-id-209'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='st_dev' type-id='type-id-210' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='st_ino' type-id='type-id-211' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='st_nlink' type-id='type-id-212' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='st_mode' type-id='type-id-213' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='224'>
+ <var-decl name='st_uid' type-id='type-id-214' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='st_gid' type-id='type-id-215' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='288'>
+ <var-decl name='__pad0' type-id='type-id-1' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='320'>
+ <var-decl name='st_rdev' type-id='type-id-210' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='384'>
+ <var-decl name='st_size' type-id='type-id-216' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='448'>
+ <var-decl name='st_blksize' type-id='type-id-217' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='512'>
+ <var-decl name='st_blocks' type-id='type-id-218' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='576'>
+ <var-decl name='st_atim' type-id='type-id-219' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='704'>
+ <var-decl name='st_mtim' type-id='type-id-219' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='832'>
+ <var-decl name='st_ctim' type-id='type-id-219' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='960'>
+ <var-decl name='__glibc_reserved' type-id='type-id-220' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='__dev_t' type-id='type-id-12' id='type-id-210'/>
+ <typedef-decl name='__ino64_t' type-id='type-id-12' id='type-id-211'/>
+ <typedef-decl name='__nlink_t' type-id='type-id-12' id='type-id-212'/>
+ <typedef-decl name='__mode_t' type-id='type-id-10' id='type-id-213'/>
+ <typedef-decl name='__uid_t' type-id='type-id-10' id='type-id-214'/>
+ <typedef-decl name='__gid_t' type-id='type-id-10' id='type-id-215'/>
+ <typedef-decl name='__off_t' type-id='type-id-86' id='type-id-216'/>
+ <typedef-decl name='__blksize_t' type-id='type-id-86' id='type-id-217'/>
+ <typedef-decl name='__blkcnt64_t' type-id='type-id-86' id='type-id-218'/>
+ <class-decl name='timespec' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-219'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='tv_sec' type-id='type-id-221' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='tv_nsec' type-id='type-id-222' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='__time_t' type-id='type-id-86' id='type-id-221'/>
+ <typedef-decl name='__syscall_slong_t' type-id='type-id-86' id='type-id-222'/>
+
+ <array-type-def dimensions='1' type-id='type-id-222' size-in-bits='192' id='type-id-220'>
+ <subrange length='3' type-id='type-id-12' id='type-id-75'/>
+
+ </array-type-def>
+ <pointer-type-def type-id='type-id-209' size-in-bits='64' id='type-id-223'/>
+ <function-decl name='getextmntent' mangled-name='getextmntent' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getextmntent'>
+ <parameter type-id='type-id-4' name='path'/>
+ <parameter type-id='type-id-208' name='entry'/>
+ <parameter type-id='type-id-223' name='statbuf'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='membar_enter' mangled-name='membar_enter' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='membar_enter'>
- <return type-id='type-id-17'/>
- </function-decl>
- <function-decl name='membar_exit' mangled-name='membar_exit' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='membar_exit'>
- <return type-id='type-id-17'/>
- </function-decl>
- <function-decl name='membar_producer' mangled-name='membar_producer' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='membar_producer'>
- <return type-id='type-id-17'/>
- </function-decl>
- <function-decl name='membar_consumer' mangled-name='membar_consumer' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='membar_consumer'>
- <return type-id='type-id-17'/>
- </function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='getexecname.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libspl' language='LANG_C99'>
- <function-decl name='getexecname' mangled-name='getexecname' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getexecname'>
- <return type-id='type-id-16'/>
- </function-decl>
- <function-decl name='getexecname_impl' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-37'/>
- <return type-id='type-id-5'/>
- </function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='os/linux/gethostid.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libspl' language='LANG_C99'>
- <function-decl name='get_system_hostid' mangled-name='get_system_hostid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='get_system_hostid'>
- <return type-id='type-id-26'/>
- </function-decl>
- <class-decl name='_IO_FILE' size-in-bits='1728' is-struct='yes' visibility='default' id='type-id-323'>
+ <class-decl name='_IO_FILE' size-in-bits='1728' is-struct='yes' visibility='default' id='type-id-224'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='_flags' type-id='type-id-1' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='_IO_read_ptr' type-id='type-id-37' visibility='default'/>
+ <var-decl name='_IO_read_ptr' type-id='type-id-36' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='_IO_read_end' type-id='type-id-37' visibility='default'/>
+ <var-decl name='_IO_read_end' type-id='type-id-36' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='_IO_read_base' type-id='type-id-37' visibility='default'/>
+ <var-decl name='_IO_read_base' type-id='type-id-36' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='_IO_write_base' type-id='type-id-37' visibility='default'/>
+ <var-decl name='_IO_write_base' type-id='type-id-36' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='_IO_write_ptr' type-id='type-id-37' visibility='default'/>
+ <var-decl name='_IO_write_ptr' type-id='type-id-36' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='_IO_write_end' type-id='type-id-37' visibility='default'/>
+ <var-decl name='_IO_write_end' type-id='type-id-36' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='_IO_buf_base' type-id='type-id-37' visibility='default'/>
+ <var-decl name='_IO_buf_base' type-id='type-id-36' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='_IO_buf_end' type-id='type-id-37' visibility='default'/>
+ <var-decl name='_IO_buf_end' type-id='type-id-36' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='576'>
- <var-decl name='_IO_save_base' type-id='type-id-37' visibility='default'/>
+ <var-decl name='_IO_save_base' type-id='type-id-36' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='640'>
- <var-decl name='_IO_backup_base' type-id='type-id-37' visibility='default'/>
+ <var-decl name='_IO_backup_base' type-id='type-id-36' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='704'>
- <var-decl name='_IO_save_end' type-id='type-id-37' visibility='default'/>
+ <var-decl name='_IO_save_end' type-id='type-id-36' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='768'>
- <var-decl name='_markers' type-id='type-id-324' visibility='default'/>
+ <var-decl name='_markers' type-id='type-id-225' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='832'>
- <var-decl name='_chain' type-id='type-id-325' visibility='default'/>
+ <var-decl name='_chain' type-id='type-id-226' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='896'>
<var-decl name='_fileno' type-id='type-id-1' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='928'>
<var-decl name='_flags2' type-id='type-id-1' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='960'>
- <var-decl name='_old_offset' type-id='type-id-326' visibility='default'/>
+ <var-decl name='_old_offset' type-id='type-id-216' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1024'>
- <var-decl name='_cur_column' type-id='type-id-200' visibility='default'/>
+ <var-decl name='_cur_column' type-id='type-id-195' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1040'>
- <var-decl name='_vtable_offset' type-id='type-id-318' visibility='default'/>
+ <var-decl name='_vtable_offset' type-id='type-id-204' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1048'>
- <var-decl name='_shortbuf' type-id='type-id-327' visibility='default'/>
+ <var-decl name='_shortbuf' type-id='type-id-227' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1152'>
- <var-decl name='_offset' type-id='type-id-135' visibility='default'/>
+ <var-decl name='_offset' type-id='type-id-228' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1216'>
- <var-decl name='_codecvt' type-id='type-id-328' visibility='default'/>
+ <var-decl name='__pad1' type-id='type-id-89' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1280'>
- <var-decl name='_wide_data' type-id='type-id-329' visibility='default'/>
+ <var-decl name='__pad2' type-id='type-id-89' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1344'>
- <var-decl name='_freeres_list' type-id='type-id-325' visibility='default'/>
+ <var-decl name='__pad3' type-id='type-id-89' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1408'>
- <var-decl name='_freeres_buf' type-id='type-id-73' visibility='default'/>
+ <var-decl name='__pad4' type-id='type-id-89' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1472'>
- <var-decl name='__pad5' type-id='type-id-125' visibility='default'/>
+ <var-decl name='__pad5' type-id='type-id-85' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1536'>
<var-decl name='_mode' type-id='type-id-1' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1568'>
- <var-decl name='_unused2' type-id='type-id-330' visibility='default'/>
+ <var-decl name='_unused2' type-id='type-id-229' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='_IO_marker' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-230'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='_next' type-id='type-id-225' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='_sbuf' type-id='type-id-226' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='_pos' type-id='type-id-1' visibility='default'/>
</data-member>
</class-decl>
- <class-decl name='_IO_marker' is-struct='yes' visibility='default' is-declaration-only='yes' id='type-id-331'/>
- <pointer-type-def type-id='type-id-331' size-in-bits='64' id='type-id-324'/>
- <pointer-type-def type-id='type-id-323' size-in-bits='64' id='type-id-325'/>
- <typedef-decl name='__off_t' type-id='type-id-5' id='type-id-326'/>
+ <pointer-type-def type-id='type-id-230' size-in-bits='64' id='type-id-225'/>
+ <pointer-type-def type-id='type-id-224' size-in-bits='64' id='type-id-226'/>
- <array-type-def dimensions='1' type-id='type-id-11' size-in-bits='8' id='type-id-327'>
+ <array-type-def dimensions='1' type-id='type-id-2' size-in-bits='8' id='type-id-227'>
<subrange length='1' type-id='type-id-12' id='type-id-231'/>
</array-type-def>
- <class-decl name='_IO_codecvt' is-struct='yes' visibility='default' is-declaration-only='yes' id='type-id-332'/>
- <pointer-type-def type-id='type-id-332' size-in-bits='64' id='type-id-328'/>
- <class-decl name='_IO_wide_data' is-struct='yes' visibility='default' is-declaration-only='yes' id='type-id-333'/>
- <pointer-type-def type-id='type-id-333' size-in-bits='64' id='type-id-329'/>
+ <typedef-decl name='__off64_t' type-id='type-id-86' id='type-id-228'/>
- <array-type-def dimensions='1' type-id='type-id-11' size-in-bits='160' id='type-id-330'>
- <subrange length='20' type-id='type-id-12' id='type-id-334'/>
+ <array-type-def dimensions='1' type-id='type-id-2' size-in-bits='160' id='type-id-229'>
+ <subrange length='20' type-id='type-id-12' id='type-id-232'/>
</array-type-def>
- <function-decl name='fclose' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-325'/>
- <return type-id='type-id-1'/>
- </function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='os/linux/getmntany.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libspl' language='LANG_C99'>
- <typedef-decl name='FILE' type-id='type-id-323' id='type-id-335'/>
- <pointer-type-def type-id='type-id-335' size-in-bits='64' id='type-id-336'/>
- <class-decl name='mnttab' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-337'>
+ <typedef-decl name='FILE' type-id='type-id-224' id='type-id-233'/>
+ <pointer-type-def type-id='type-id-233' size-in-bits='64' id='type-id-234'/>
+ <class-decl name='mnttab' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-235'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='mnt_special' type-id='type-id-37' visibility='default'/>
+ <var-decl name='mnt_special' type-id='type-id-36' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='mnt_mountp' type-id='type-id-37' visibility='default'/>
+ <var-decl name='mnt_mountp' type-id='type-id-36' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='mnt_fstype' type-id='type-id-37' visibility='default'/>
+ <var-decl name='mnt_fstype' type-id='type-id-36' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='mnt_mntopts' type-id='type-id-37' visibility='default'/>
+ <var-decl name='mnt_mntopts' type-id='type-id-36' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-337' size-in-bits='64' id='type-id-338'/>
+ <pointer-type-def type-id='type-id-235' size-in-bits='64' id='type-id-236'/>
<function-decl name='getmntany' mangled-name='getmntany' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getmntany'>
- <parameter type-id='type-id-336' name='fp'/>
- <parameter type-id='type-id-338' name='mgetp'/>
- <parameter type-id='type-id-338' name='mrefp'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <class-decl name='mntent' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-339'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='mnt_fsname' type-id='type-id-37' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='mnt_dir' type-id='type-id-37' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='mnt_type' type-id='type-id-37' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='mnt_opts' type-id='type-id-37' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='mnt_freq' type-id='type-id-1' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='288'>
- <var-decl name='mnt_passno' type-id='type-id-1' visibility='default'/>
- </data-member>
- </class-decl>
- <pointer-type-def type-id='type-id-339' size-in-bits='64' id='type-id-340'/>
- <function-decl name='getmntent_r' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-325'/>
- <parameter type-id='type-id-340'/>
- <parameter type-id='type-id-37'/>
- <parameter type-id='type-id-1'/>
- <return type-id='type-id-340'/>
- </function-decl>
- <function-decl name='feof' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-325'/>
+ <parameter type-id='type-id-234' name='fp'/>
+ <parameter type-id='type-id-236' name='mgetp'/>
+ <parameter type-id='type-id-236' name='mrefp'/>
<return type-id='type-id-1'/>
</function-decl>
<function-decl name='_sol_getmntent' mangled-name='_sol_getmntent' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='_sol_getmntent'>
- <parameter type-id='type-id-336' name='fp'/>
- <parameter type-id='type-id-338' name='mgetp'/>
+ <parameter type-id='type-id-234' name='fp'/>
+ <parameter type-id='type-id-236' name='mgetp'/>
<return type-id='type-id-1'/>
</function-decl>
- <class-decl name='extmnttab' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-341'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='mnt_special' type-id='type-id-37' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='mnt_mountp' type-id='type-id-37' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='mnt_fstype' type-id='type-id-37' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='mnt_mntopts' type-id='type-id-37' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='mnt_major' type-id='type-id-34' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='288'>
- <var-decl name='mnt_minor' type-id='type-id-34' visibility='default'/>
- </data-member>
- </class-decl>
- <pointer-type-def type-id='type-id-341' size-in-bits='64' id='type-id-342'/>
- <class-decl name='stat64' size-in-bits='1152' is-struct='yes' visibility='default' id='type-id-343'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='st_dev' type-id='type-id-344' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='st_ino' type-id='type-id-199' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='st_nlink' type-id='type-id-345' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='st_mode' type-id='type-id-346' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='224'>
- <var-decl name='st_uid' type-id='type-id-347' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='st_gid' type-id='type-id-348' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='288'>
- <var-decl name='__pad0' type-id='type-id-1' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='st_rdev' type-id='type-id-344' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='st_size' type-id='type-id-326' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='st_blksize' type-id='type-id-349' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='st_blocks' type-id='type-id-350' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='576'>
- <var-decl name='st_atim' type-id='type-id-238' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='704'>
- <var-decl name='st_mtim' type-id='type-id-238' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='832'>
- <var-decl name='st_ctim' type-id='type-id-238' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='960'>
- <var-decl name='__glibc_reserved' type-id='type-id-351' visibility='default'/>
- </data-member>
- </class-decl>
- <typedef-decl name='__dev_t' type-id='type-id-26' id='type-id-344'/>
- <typedef-decl name='__nlink_t' type-id='type-id-26' id='type-id-345'/>
- <typedef-decl name='__mode_t' type-id='type-id-6' id='type-id-346'/>
- <typedef-decl name='__uid_t' type-id='type-id-6' id='type-id-347'/>
- <typedef-decl name='__gid_t' type-id='type-id-6' id='type-id-348'/>
- <typedef-decl name='__blksize_t' type-id='type-id-5' id='type-id-349'/>
- <typedef-decl name='__blkcnt64_t' type-id='type-id-5' id='type-id-350'/>
-
- <array-type-def dimensions='1' type-id='type-id-240' size-in-bits='192' id='type-id-351'>
- <subrange length='3' type-id='type-id-12' id='type-id-59'/>
-
- </array-type-def>
- <pointer-type-def type-id='type-id-343' size-in-bits='64' id='type-id-352'/>
- <function-decl name='getextmntent' mangled-name='getextmntent' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getextmntent'>
- <parameter type-id='type-id-16' name='path'/>
- <parameter type-id='type-id-342' name='entry'/>
- <parameter type-id='type-id-352' name='statbuf'/>
- <return type-id='type-id-1'/>
+ <function-decl name='__builtin_fwrite' mangled-name='fwrite' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='feof' mangled-name='feof' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='getmntent_r' mangled-name='getmntent_r' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='list.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libspl' language='LANG_C99'>
- <class-decl name='list' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-353'>
+ <abi-instr version='1.0' address-size='64' path='list.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
+ <class-decl name='list' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-237'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='list_size' type-id='type-id-125' visibility='default'/>
+ <var-decl name='list_size' type-id='type-id-85' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='list_offset' type-id='type-id-125' visibility='default'/>
+ <var-decl name='list_offset' type-id='type-id-85' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='list_head' type-id='type-id-354' visibility='default'/>
+ <var-decl name='list_head' type-id='type-id-238' visibility='default'/>
</data-member>
</class-decl>
- <class-decl name='list_node' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-354'>
+ <class-decl name='list_node' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-238'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='next' type-id='type-id-355' visibility='default'/>
+ <var-decl name='next' type-id='type-id-239' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='prev' type-id='type-id-355' visibility='default'/>
+ <var-decl name='prev' type-id='type-id-239' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-354' size-in-bits='64' id='type-id-355'/>
- <typedef-decl name='list_t' type-id='type-id-353' id='type-id-356'/>
- <pointer-type-def type-id='type-id-356' size-in-bits='64' id='type-id-357'/>
- <function-decl name='list_create' mangled-name='list_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_create'>
- <parameter type-id='type-id-357' name='list'/>
- <parameter type-id='type-id-125' name='size'/>
- <parameter type-id='type-id-125' name='offset'/>
- <return type-id='type-id-17'/>
- </function-decl>
- <function-decl name='list_destroy' mangled-name='list_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_destroy'>
- <parameter type-id='type-id-357' name='list'/>
- <return type-id='type-id-17'/>
+ <pointer-type-def type-id='type-id-238' size-in-bits='64' id='type-id-239'/>
+ <typedef-decl name='list_t' type-id='type-id-237' id='type-id-240'/>
+ <pointer-type-def type-id='type-id-240' size-in-bits='64' id='type-id-241'/>
+ <function-decl name='list_is_empty' mangled-name='list_is_empty' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_is_empty'>
+ <parameter type-id='type-id-241' name='list'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='list_insert_after' mangled-name='list_insert_after' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_insert_after'>
- <parameter type-id='type-id-357' name='list'/>
- <parameter type-id='type-id-73' name='object'/>
- <parameter type-id='type-id-73' name='nobject'/>
- <return type-id='type-id-17'/>
+ <typedef-decl name='list_node_t' type-id='type-id-238' id='type-id-242'/>
+ <pointer-type-def type-id='type-id-242' size-in-bits='64' id='type-id-243'/>
+ <function-decl name='list_link_active' mangled-name='list_link_active' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_link_active'>
+ <parameter type-id='type-id-243' name='ln'/>
+ <return type-id='type-id-1'/>
</function-decl>
- <function-decl name='list_insert_head' mangled-name='list_insert_head' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_insert_head'>
- <parameter type-id='type-id-357' name='list'/>
- <parameter type-id='type-id-73' name='object'/>
- <return type-id='type-id-17'/>
+ <function-decl name='list_link_init' mangled-name='list_link_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_link_init'>
+ <parameter type-id='type-id-243' name='ln'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='list_insert_before' mangled-name='list_insert_before' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_insert_before'>
- <parameter type-id='type-id-357' name='list'/>
- <parameter type-id='type-id-73' name='object'/>
- <parameter type-id='type-id-73' name='nobject'/>
- <return type-id='type-id-17'/>
+ <function-decl name='list_link_replace' mangled-name='list_link_replace' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_link_replace'>
+ <parameter type-id='type-id-243' name='lold'/>
+ <parameter type-id='type-id-243' name='lnew'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='list_insert_tail' mangled-name='list_insert_tail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_insert_tail'>
- <parameter type-id='type-id-357' name='list'/>
- <parameter type-id='type-id-73' name='object'/>
- <return type-id='type-id-17'/>
+ <function-decl name='list_move_tail' mangled-name='list_move_tail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_move_tail'>
+ <parameter type-id='type-id-241' name='dst'/>
+ <parameter type-id='type-id-241' name='src'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='list_remove' mangled-name='list_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_remove'>
- <parameter type-id='type-id-357' name='list'/>
- <parameter type-id='type-id-73' name='object'/>
- <return type-id='type-id-17'/>
+ <function-decl name='list_prev' mangled-name='list_prev' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_prev'>
+ <parameter type-id='type-id-241' name='list'/>
+ <parameter type-id='type-id-89' name='object'/>
+ <return type-id='type-id-89'/>
</function-decl>
- <function-decl name='list_remove_head' mangled-name='list_remove_head' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_remove_head'>
- <parameter type-id='type-id-357' name='list'/>
- <return type-id='type-id-73'/>
+ <function-decl name='list_next' mangled-name='list_next' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_next'>
+ <parameter type-id='type-id-241' name='list'/>
+ <parameter type-id='type-id-89' name='object'/>
+ <return type-id='type-id-89'/>
</function-decl>
- <function-decl name='list_remove_tail' mangled-name='list_remove_tail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_remove_tail'>
- <parameter type-id='type-id-357' name='list'/>
- <return type-id='type-id-73'/>
+ <function-decl name='list_tail' mangled-name='list_tail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_tail'>
+ <parameter type-id='type-id-241' name='list'/>
+ <return type-id='type-id-89'/>
</function-decl>
<function-decl name='list_head' mangled-name='list_head' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_head'>
- <parameter type-id='type-id-357' name='list'/>
- <return type-id='type-id-73'/>
+ <parameter type-id='type-id-241' name='list'/>
+ <return type-id='type-id-89'/>
</function-decl>
- <function-decl name='list_tail' mangled-name='list_tail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_tail'>
- <parameter type-id='type-id-357' name='list'/>
- <return type-id='type-id-73'/>
+ <function-decl name='list_remove_tail' mangled-name='list_remove_tail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_remove_tail'>
+ <parameter type-id='type-id-241' name='list'/>
+ <return type-id='type-id-89'/>
</function-decl>
- <function-decl name='list_next' mangled-name='list_next' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_next'>
- <parameter type-id='type-id-357' name='list'/>
- <parameter type-id='type-id-73' name='object'/>
- <return type-id='type-id-73'/>
+ <function-decl name='list_remove_head' mangled-name='list_remove_head' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_remove_head'>
+ <parameter type-id='type-id-241' name='list'/>
+ <return type-id='type-id-89'/>
</function-decl>
- <function-decl name='list_prev' mangled-name='list_prev' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_prev'>
- <parameter type-id='type-id-357' name='list'/>
- <parameter type-id='type-id-73' name='object'/>
- <return type-id='type-id-73'/>
+ <function-decl name='list_remove' mangled-name='list_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_remove'>
+ <parameter type-id='type-id-241' name='list'/>
+ <parameter type-id='type-id-89' name='object'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='list_move_tail' mangled-name='list_move_tail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_move_tail'>
- <parameter type-id='type-id-357' name='dst'/>
- <parameter type-id='type-id-357' name='src'/>
- <return type-id='type-id-17'/>
+ <function-decl name='list_insert_tail' mangled-name='list_insert_tail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_insert_tail'>
+ <parameter type-id='type-id-241' name='list'/>
+ <parameter type-id='type-id-89' name='object'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <typedef-decl name='list_node_t' type-id='type-id-354' id='type-id-358'/>
- <pointer-type-def type-id='type-id-358' size-in-bits='64' id='type-id-359'/>
- <function-decl name='list_link_replace' mangled-name='list_link_replace' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_link_replace'>
- <parameter type-id='type-id-359' name='lold'/>
- <parameter type-id='type-id-359' name='lnew'/>
- <return type-id='type-id-17'/>
+ <function-decl name='list_insert_head' mangled-name='list_insert_head' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_insert_head'>
+ <parameter type-id='type-id-241' name='list'/>
+ <parameter type-id='type-id-89' name='object'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='list_link_init' mangled-name='list_link_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_link_init'>
- <parameter type-id='type-id-359' name='ln'/>
- <return type-id='type-id-17'/>
+ <function-decl name='list_insert_before' mangled-name='list_insert_before' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_insert_before'>
+ <parameter type-id='type-id-241' name='list'/>
+ <parameter type-id='type-id-89' name='object'/>
+ <parameter type-id='type-id-89' name='nobject'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='list_link_active' mangled-name='list_link_active' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_link_active'>
- <parameter type-id='type-id-359' name='ln'/>
- <return type-id='type-id-1'/>
+ <function-decl name='list_insert_after' mangled-name='list_insert_after' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_insert_after'>
+ <parameter type-id='type-id-241' name='list'/>
+ <parameter type-id='type-id-89' name='object'/>
+ <parameter type-id='type-id-89' name='nobject'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='list_is_empty' mangled-name='list_is_empty' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_is_empty'>
- <parameter type-id='type-id-357' name='list'/>
- <return type-id='type-id-1'/>
+ <function-decl name='list_destroy' mangled-name='list_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_destroy'>
+ <parameter type-id='type-id-241' name='list'/>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='list_create' mangled-name='list_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_create'>
+ <parameter type-id='type-id-241' name='list'/>
+ <parameter type-id='type-id-85' name='size'/>
+ <parameter type-id='type-id-85' name='offset'/>
+ <return type-id='type-id-84'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='mkdirp.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libspl' language='LANG_C99'>
- <typedef-decl name='mode_t' type-id='type-id-346' id='type-id-360'/>
+ <abi-instr version='1.0' address-size='64' path='mkdirp.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
+ <typedef-decl name='mode_t' type-id='type-id-213' id='type-id-244'/>
<function-decl name='mkdirp' mangled-name='mkdirp' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='mkdirp'>
- <parameter type-id='type-id-16' name='d'/>
- <parameter type-id='type-id-360' name='mode'/>
+ <parameter type-id='type-id-4' name='d'/>
+ <parameter type-id='type-id-244' name='mode'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='mbstowcs' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-129'/>
- <parameter type-id='type-id-16'/>
- <parameter type-id='type-id-26'/>
- <return type-id='type-id-26'/>
- </function-decl>
- <qualified-type-def type-id='type-id-1' const='yes' id='type-id-361'/>
- <pointer-type-def type-id='type-id-361' size-in-bits='64' id='type-id-362'/>
- <function-decl name='wcstombs' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-37'/>
- <parameter type-id='type-id-362'/>
- <parameter type-id='type-id-26'/>
- <return type-id='type-id-26'/>
- </function-decl>
- <function-decl name='mkdir' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-16'/>
- <parameter type-id='type-id-6'/>
- <return type-id='type-id-1'/>
+ <function-decl name='__mbstowcs_alias' mangled-name='mbstowcs' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='__wcstombs_alias' mangled-name='wcstombs' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='mkdir' mangled-name='mkdir' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='page.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libspl' language='LANG_C99'>
+ <abi-instr version='1.0' address-size='64' path='page.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
<function-decl name='spl_pagesize' mangled-name='spl_pagesize' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='spl_pagesize'>
- <return type-id='type-id-125'/>
+ <return type-id='type-id-85'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='strlcat.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libspl' language='LANG_C99'>
+ <abi-instr version='1.0' address-size='64' path='strlcat.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
<function-decl name='strlcat' mangled-name='strlcat' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='strlcat'>
- <parameter type-id='type-id-37' name='dst'/>
- <parameter type-id='type-id-16' name='src'/>
- <parameter type-id='type-id-125' name='dstsize'/>
- <return type-id='type-id-125'/>
+ <parameter type-id='type-id-36' name='dst'/>
+ <parameter type-id='type-id-4' name='src'/>
+ <parameter type-id='type-id-85' name='dstsize'/>
+ <return type-id='type-id-85'/>
+ </function-decl>
+ <function-decl name='__builtin_memcpy' mangled-name='memcpy' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='strlcpy.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libspl' language='LANG_C99'>
+ <abi-instr version='1.0' address-size='64' path='strlcpy.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
<function-decl name='strlcpy' mangled-name='strlcpy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='strlcpy'>
- <parameter type-id='type-id-37' name='dst'/>
- <parameter type-id='type-id-16' name='src'/>
- <parameter type-id='type-id-125' name='len'/>
- <return type-id='type-id-125'/>
+ <parameter type-id='type-id-36' name='dst'/>
+ <parameter type-id='type-id-4' name='src'/>
+ <parameter type-id='type-id-85' name='len'/>
+ <return type-id='type-id-85'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='timestamp.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libspl' language='LANG_C99'>
+ <abi-instr version='1.0' address-size='64' path='timestamp.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
<function-decl name='print_timestamp' mangled-name='print_timestamp' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='print_timestamp'>
- <parameter type-id='type-id-34' name='timestamp_fmt'/>
- <return type-id='type-id-17'/>
+ <parameter type-id='type-id-35' name='timestamp_fmt'/>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='localtime' mangled-name='localtime' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='strftime' mangled-name='strftime' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='time' mangled-name='time' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <pointer-type-def type-id='type-id-5' size-in-bits='64' id='type-id-363'/>
- <function-decl name='time' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-363'/>
- <return type-id='type-id-5'/>
+ <function-decl name='nl_langinfo' mangled-name='nl_langinfo' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='nl_langinfo' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-1'/>
- <return type-id='type-id-37'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='os/linux/zone.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
+ <typedef-decl name='zoneid_t' type-id='type-id-1' id='type-id-245'/>
+ <function-decl name='getzoneid' mangled-name='getzoneid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getzoneid'>
+ <return type-id='type-id-245'/>
</function-decl>
- <class-decl name='tm' size-in-bits='448' is-struct='yes' visibility='default' id='type-id-364'>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='rdwr_efi.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libefi' language='LANG_C99'>
+ <var-decl name='efi_debug' type-id='type-id-1' mangled-name='efi_debug' visibility='default' elf-symbol-id='efi_debug'/>
+ <class-decl name='dk_gpt' size-in-bits='1920' is-struct='yes' visibility='default' id='type-id-246'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='tm_sec' type-id='type-id-1' visibility='default'/>
+ <var-decl name='efi_version' type-id='type-id-35' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='tm_min' type-id='type-id-1' visibility='default'/>
+ <var-decl name='efi_nparts' type-id='type-id-35' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='tm_hour' type-id='type-id-1' visibility='default'/>
+ <var-decl name='efi_part_size' type-id='type-id-35' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='96'>
- <var-decl name='tm_mday' type-id='type-id-1' visibility='default'/>
+ <var-decl name='efi_lbasize' type-id='type-id-35' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='tm_mon' type-id='type-id-1' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='160'>
- <var-decl name='tm_year' type-id='type-id-1' visibility='default'/>
+ <var-decl name='efi_last_lba' type-id='type-id-247' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='tm_wday' type-id='type-id-1' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='224'>
- <var-decl name='tm_yday' type-id='type-id-1' visibility='default'/>
+ <var-decl name='efi_first_u_lba' type-id='type-id-247' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='tm_isdst' type-id='type-id-1' visibility='default'/>
+ <var-decl name='efi_last_u_lba' type-id='type-id-247' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='tm_gmtoff' type-id='type-id-5' visibility='default'/>
+ <var-decl name='efi_disk_uguid' type-id='type-id-248' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='tm_zone' type-id='type-id-16' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='448'>
+ <var-decl name='efi_flags' type-id='type-id-35' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='480'>
+ <var-decl name='efi_reserved1' type-id='type-id-35' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='512'>
+ <var-decl name='efi_altern_lba' type-id='type-id-247' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='576'>
+ <var-decl name='efi_reserved' type-id='type-id-249' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='960'>
+ <var-decl name='efi_parts' type-id='type-id-250' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-364' size-in-bits='64' id='type-id-365'/>
- <qualified-type-def type-id='type-id-5' const='yes' id='type-id-366'/>
- <pointer-type-def type-id='type-id-366' size-in-bits='64' id='type-id-367'/>
- <function-decl name='localtime' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-367'/>
- <return type-id='type-id-365'/>
- </function-decl>
- <qualified-type-def type-id='type-id-364' const='yes' id='type-id-368'/>
- <pointer-type-def type-id='type-id-368' size-in-bits='64' id='type-id-369'/>
- <function-decl name='strftime' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-37'/>
- <parameter type-id='type-id-26'/>
- <parameter type-id='type-id-16'/>
- <parameter type-id='type-id-369'/>
- <return type-id='type-id-26'/>
- </function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='os/linux/zone.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libspl' language='LANG_C99'>
- <typedef-decl name='zoneid_t' type-id='type-id-1' id='type-id-370'/>
- <function-decl name='getzoneid' mangled-name='getzoneid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getzoneid'>
- <return type-id='type-id-370'/>
+ <typedef-decl name='longlong_t' type-id='type-id-172' id='type-id-251'/>
+ <typedef-decl name='diskaddr_t' type-id='type-id-251' id='type-id-247'/>
+ <class-decl name='uuid' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-248'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='time_low' type-id='type-id-7' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='32'>
+ <var-decl name='time_mid' type-id='type-id-197' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='48'>
+ <var-decl name='time_hi_and_version' type-id='type-id-197' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='clock_seq_hi_and_reserved' type-id='type-id-33' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='72'>
+ <var-decl name='clock_seq_low' type-id='type-id-33' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='80'>
+ <var-decl name='node_addr' type-id='type-id-72' visibility='default'/>
+ </data-member>
+ </class-decl>
+
+ <array-type-def dimensions='1' type-id='type-id-35' size-in-bits='384' id='type-id-249'>
+ <subrange length='12' type-id='type-id-12' id='type-id-70'/>
+
+ </array-type-def>
+ <class-decl name='dk_part' size-in-bits='960' is-struct='yes' visibility='default' id='type-id-252'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='p_start' type-id='type-id-247' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='p_size' type-id='type-id-247' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='p_guid' type-id='type-id-248' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='p_tag' type-id='type-id-253' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='272'>
+ <var-decl name='p_flag' type-id='type-id-253' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='288'>
+ <var-decl name='p_name' type-id='type-id-254' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='576'>
+ <var-decl name='p_uguid' type-id='type-id-248' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='704'>
+ <var-decl name='p_resv' type-id='type-id-255' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='ushort_t' type-id='type-id-195' id='type-id-253'/>
+
+ <array-type-def dimensions='1' type-id='type-id-2' size-in-bits='288' id='type-id-254'>
+ <subrange length='36' type-id='type-id-12' id='type-id-256'/>
+
+ </array-type-def>
+
+ <array-type-def dimensions='1' type-id='type-id-35' size-in-bits='256' id='type-id-255'>
+ <subrange length='8' type-id='type-id-12' id='type-id-69'/>
+
+ </array-type-def>
+
+ <array-type-def dimensions='1' type-id='type-id-252' size-in-bits='960' id='type-id-250'>
+ <subrange length='1' type-id='type-id-12' id='type-id-231'/>
+
+ </array-type-def>
+ <pointer-type-def type-id='type-id-246' size-in-bits='64' id='type-id-257'/>
+ <function-decl name='efi_err_check' mangled-name='efi_err_check' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_err_check'>
+ <parameter type-id='type-id-257' name='vtoc'/>
+ <return type-id='type-id-84'/>
</function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='rdwr_efi.c' comp-dir-path='/home/nabijaczleweli/store/code/zfs/lib/libefi' language='LANG_C99'>
- <var-decl name='efi_debug' type-id='type-id-1' mangled-name='efi_debug' visibility='default' elf-symbol-id='efi_debug'/>
- <function-decl name='efi_alloc_and_init' mangled-name='efi_alloc_and_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_alloc_and_init'>
+ <function-decl name='efi_type' mangled-name='efi_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_type'>
<parameter type-id='type-id-1' name='fd'/>
- <parameter type-id='type-id-22' name='nparts'/>
- <parameter type-id='type-id-233' name='vtoc'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='uuid_generate' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-36'/>
- <return type-id='type-id-17'/>
+ <function-decl name='efi_free' mangled-name='efi_free' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_free'>
+ <parameter type-id='type-id-257' name='ptr'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='efi_alloc_and_read' mangled-name='efi_alloc_and_read' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_alloc_and_read'>
+ <function-decl name='efi_write' mangled-name='efi_write' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_write'>
<parameter type-id='type-id-1' name='fd'/>
- <parameter type-id='type-id-233' name='vtoc'/>
+ <parameter type-id='type-id-257' name='vtoc'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='efi_rescan' mangled-name='efi_rescan' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_rescan'>
+ <function-decl name='efi_use_whole_disk' mangled-name='efi_use_whole_disk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_use_whole_disk'>
<parameter type-id='type-id-1' name='fd'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='efi_use_whole_disk' mangled-name='efi_use_whole_disk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_use_whole_disk'>
+ <function-decl name='efi_rescan' mangled-name='efi_rescan' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_rescan'>
<parameter type-id='type-id-1' name='fd'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='efi_write' mangled-name='efi_write' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_write'>
+ <pointer-type-def type-id='type-id-257' size-in-bits='64' id='type-id-258'/>
+ <function-decl name='efi_alloc_and_read' mangled-name='efi_alloc_and_read' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_alloc_and_read'>
<parameter type-id='type-id-1' name='fd'/>
- <parameter type-id='type-id-232' name='vtoc'/>
+ <parameter type-id='type-id-258' name='vtoc'/>
<return type-id='type-id-1'/>
</function-decl>
- <qualified-type-def type-id='type-id-30' const='yes' id='type-id-371'/>
- <pointer-type-def type-id='type-id-371' size-in-bits='64' id='type-id-372'/>
- <function-decl name='uuid_is_null' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-372'/>
+ <function-decl name='efi_alloc_and_init' mangled-name='efi_alloc_and_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_alloc_and_init'>
+ <parameter type-id='type-id-1' name='fd'/>
+ <parameter type-id='type-id-7' name='nparts'/>
+ <parameter type-id='type-id-258' name='vtoc'/>
<return type-id='type-id-1'/>
</function-decl>
- <function-decl name='crc32' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-26'/>
- <parameter type-id='type-id-372'/>
- <parameter type-id='type-id-6'/>
- <return type-id='type-id-26'/>
+ <function-decl name='uuid_is_null' mangled-name='uuid_is_null' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='uuid_generate' mangled-name='uuid_generate' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='bcmp' mangled-name='bcmp' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
<function-decl name='lseek' mangled-name='lseek64' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-1'/>
- <parameter type-id='type-id-5'/>
- <parameter type-id='type-id-1'/>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='write' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-1'/>
- <parameter type-id='type-id-73'/>
- <parameter type-id='type-id-26'/>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='fsync' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='type-id-1'/>
- <return type-id='type-id-1'/>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='efi_type' mangled-name='efi_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_type'>
- <parameter type-id='type-id-1'/>
- <return type-id='type-id-1'/>
+ <function-decl name='write' mangled-name='write' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
- <function-decl name='efi_err_check' mangled-name='efi_err_check' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_err_check'>
- <parameter type-id='type-id-232' name='vtoc'/>
- <return type-id='type-id-17'/>
+ <function-decl name='fsync' mangled-name='fsync' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
+ </function-decl>
+ <function-decl name='crc32' mangled-name='crc32' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-84'/>
</function-decl>
</abi-instr>
</abi-corpus>
diff --git a/sys/contrib/openzfs/lib/libzfsbootenv/libzfsbootenv.abi b/sys/contrib/openzfs/lib/libzfsbootenv/libzfsbootenv.abi
index 8ef242d2f5ac..04bce74d8c00 100644
--- a/sys/contrib/openzfs/lib/libzfsbootenv/libzfsbootenv.abi
+++ b/sys/contrib/openzfs/lib/libzfsbootenv/libzfsbootenv.abi
@@ -1,212 +1,361 @@
-<abi-corpus path='libzfsbootenv.so' architecture='elf-amd-x86_64' soname='libzfsbootenv.so.1'>
+<abi-corpus architecture='elf-amd-x86_64' soname='libzfsbootenv.so.1'>
<elf-needed>
<dependency name='libzfs.so.4'/>
- <dependency name='libzfs_core.so.3'/>
- <dependency name='libuuid.so.1'/>
- <dependency name='libblkid.so.1'/>
- <dependency name='libudev.so.1'/>
- <dependency name='libuutil.so.3'/>
- <dependency name='libm.so.6'/>
- <dependency name='libcrypto.so.1.1'/>
- <dependency name='libz.so.1'/>
<dependency name='libnvpair.so.3'/>
- <dependency name='libtirpc.so.3'/>
- <dependency name='libpthread.so.0'/>
<dependency name='libc.so.6'/>
</elf-needed>
<elf-function-symbols>
+ <elf-symbol name='_fini' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzbe_add_pair' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzbe_bootenv_print' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzbe_get_boot_device' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzbe_nvlist_free' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzbe_nvlist_get' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzbe_nvlist_set' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzbe_remove_pair' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzbe_set_boot_device' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
</elf-function-symbols>
- <abi-instr version='1.0' address-size='64' path='lzbe_device.c' comp-dir-path='/home/fedora/zfs/lib/libzfsbootenv' language='LANG_C99'>
- <type-decl name='char' size-in-bits='8' id='type-id-1'/>
- <type-decl name='int' size-in-bits='32' id='type-id-2'/>
- <type-decl name='unnamed-enum-underlying-type' is-anonymous='yes' size-in-bits='32' alignment-in-bits='32' id='type-id-3'/>
- <typedef-decl name='lzbe_flags_t' type-id='type-id-4' filepath='../../include/libzfsbootenv.h' line='26' column='1' id='type-id-5'/>
- <enum-decl name='lzbe_flags' filepath='../../include/libzfsbootenv.h' line='23' column='1' id='type-id-4'>
- <underlying-type type-id='type-id-3'/>
+ <abi-instr version='1.0' address-size='64' path='lzbe_device.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfsbootenv' language='LANG_C99'>
+ <type-decl name='int' size-in-bits='32' id='type-id-1'/>
+ <type-decl name='char' size-in-bits='8' id='type-id-2'/>
+ <qualified-type-def type-id='type-id-2' const='yes' id='type-id-3'/>
+ <pointer-type-def type-id='type-id-3' size-in-bits='64' id='type-id-4'/>
+ <pointer-type-def type-id='type-id-2' size-in-bits='64' id='type-id-5'/>
+ <pointer-type-def type-id='type-id-5' size-in-bits='64' id='type-id-6'/>
+ <function-decl name='lzbe_get_boot_device' mangled-name='lzbe_get_boot_device' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzbe_get_boot_device'>
+ <parameter type-id='type-id-4' name='pool'/>
+ <parameter type-id='type-id-6' name='device'/>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <type-decl name='unnamed-enum-underlying-type' is-anonymous='yes' size-in-bits='32' alignment-in-bits='32' id='type-id-7'/>
+ <enum-decl name='lzbe_flags' id='type-id-8'>
+ <underlying-type type-id='type-id-7'/>
<enumerator name='lzbe_add' value='0'/>
<enumerator name='lzbe_replace' value='1'/>
</enum-decl>
- <pointer-type-def type-id='type-id-1' size-in-bits='64' id='type-id-6'/>
- <pointer-type-def type-id='type-id-6' size-in-bits='64' id='type-id-7'/>
- <qualified-type-def type-id='type-id-1' const='yes' id='type-id-8'/>
- <pointer-type-def type-id='type-id-8' size-in-bits='64' id='type-id-9'/>
- <function-decl name='lzbe_get_boot_device' mangled-name='lzbe_get_boot_device' filepath='/home/fedora/zfs/lib/libzfsbootenv/lzbe_device.c' line='114' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzbe_get_boot_device'>
- <parameter type-id='type-id-9' name='pool' filepath='/home/fedora/zfs/lib/libzfsbootenv/lzbe_device.c' line='114' column='1'/>
- <parameter type-id='type-id-7' name='device' filepath='/home/fedora/zfs/lib/libzfsbootenv/lzbe_device.c' line='114' column='1'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='lzbe_set_boot_device' mangled-name='lzbe_set_boot_device' filepath='/home/fedora/zfs/lib/libzfsbootenv/lzbe_device.c' line='28' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzbe_set_boot_device'>
- <parameter type-id='type-id-9' name='pool' filepath='/home/fedora/zfs/lib/libzfsbootenv/lzbe_device.c' line='28' column='1'/>
- <parameter type-id='type-id-5' name='flag' filepath='/home/fedora/zfs/lib/libzfsbootenv/lzbe_device.c' line='28' column='1'/>
- <parameter type-id='type-id-9' name='device' filepath='/home/fedora/zfs/lib/libzfsbootenv/lzbe_device.c' line='28' column='1'/>
- <return type-id='type-id-2'/>
+ <typedef-decl name='lzbe_flags_t' type-id='type-id-8' id='type-id-9'/>
+ <function-decl name='lzbe_set_boot_device' mangled-name='lzbe_set_boot_device' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzbe_set_boot_device'>
+ <parameter type-id='type-id-4' name='pool'/>
+ <parameter type-id='type-id-9' name='flag'/>
+ <parameter type-id='type-id-4' name='device'/>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <type-decl name='void' id='type-id-10'/>
+ <function-decl name='libzfs_init' mangled-name='libzfs_init' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='zpool_open' mangled-name='zpool_open' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='zpool_get_bootenv' mangled-name='zpool_get_bootenv' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='zpool_close' mangled-name='zpool_close' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='libzfs_fini' mangled-name='libzfs_fini' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_string' mangled-name='nvlist_lookup_string' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='nvlist_free' mangled-name='nvlist_free' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='strdup' mangled-name='strdup' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='__stack_chk_fail' mangled-name='__stack_chk_fail' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='__asprintf_chk' mangled-name='__asprintf_chk' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='__fprintf_chk' mangled-name='__fprintf_chk' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='fnvlist_alloc' mangled-name='fnvlist_alloc' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='fnvlist_add_uint64' mangled-name='fnvlist_add_uint64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='nvlist_exists' mangled-name='nvlist_exists' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='zpool_set_bootenv' mangled-name='zpool_set_bootenv' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='fnvlist_free' mangled-name='fnvlist_free' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='fnvlist_add_string' mangled-name='fnvlist_add_string' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='free' mangled-name='free' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_uint64' mangled-name='nvlist_lookup_uint64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='libzfs_error_description' mangled-name='libzfs_error_description' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='fnvlist_remove' mangled-name='fnvlist_remove' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='lzbe_pair.c' comp-dir-path='/home/fedora/zfs/lib/libzfsbootenv' language='LANG_C99'>
- <type-decl name='unsigned long int' size-in-bits='64' id='type-id-10'/>
- <type-decl name='void' id='type-id-11'/>
- <typedef-decl name='size_t' type-id='type-id-10' filepath='/usr/lib/gcc/x86_64-redhat-linux/10/include/stddef.h' line='209' column='1' id='type-id-12'/>
- <pointer-type-def type-id='type-id-11' size-in-bits='64' id='type-id-13'/>
- <pointer-type-def type-id='type-id-13' size-in-bits='64' id='type-id-14'/>
- <function-decl name='lzbe_remove_pair' mangled-name='lzbe_remove_pair' filepath='/home/fedora/zfs/lib/libzfsbootenv/lzbe_pair.c' line='343' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzbe_remove_pair'>
- <parameter type-id='type-id-13' name='ptr' filepath='/home/fedora/zfs/lib/libzfsbootenv/lzbe_pair.c' line='343' column='1'/>
- <parameter type-id='type-id-9' name='key' filepath='/home/fedora/zfs/lib/libzfsbootenv/lzbe_pair.c' line='343' column='1'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='lzbe_add_pair' mangled-name='lzbe_add_pair' filepath='/home/fedora/zfs/lib/libzfsbootenv/lzbe_pair.c' line='182' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzbe_add_pair'>
- <parameter type-id='type-id-13' name='ptr' filepath='/home/fedora/zfs/lib/libzfsbootenv/lzbe_pair.c' line='182' column='1'/>
- <parameter type-id='type-id-9' name='key' filepath='/home/fedora/zfs/lib/libzfsbootenv/lzbe_pair.c' line='182' column='1'/>
- <parameter type-id='type-id-9' name='type' filepath='/home/fedora/zfs/lib/libzfsbootenv/lzbe_pair.c' line='182' column='1'/>
- <parameter type-id='type-id-13' name='value' filepath='/home/fedora/zfs/lib/libzfsbootenv/lzbe_pair.c' line='182' column='1'/>
- <parameter type-id='type-id-12' name='size' filepath='/home/fedora/zfs/lib/libzfsbootenv/lzbe_pair.c' line='183' column='1'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='lzbe_nvlist_free' mangled-name='lzbe_nvlist_free' filepath='/home/fedora/zfs/lib/libzfsbootenv/lzbe_pair.c' line='131' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzbe_nvlist_free'>
- <parameter type-id='type-id-13' name='ptr' filepath='/home/fedora/zfs/lib/libzfsbootenv/lzbe_pair.c' line='131' column='1'/>
- <return type-id='type-id-11'/>
- </function-decl>
- <function-decl name='lzbe_nvlist_set' mangled-name='lzbe_nvlist_set' filepath='/home/fedora/zfs/lib/libzfsbootenv/lzbe_pair.c' line='74' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzbe_nvlist_set'>
- <parameter type-id='type-id-9' name='pool' filepath='/home/fedora/zfs/lib/libzfsbootenv/lzbe_pair.c' line='74' column='1'/>
- <parameter type-id='type-id-9' name='key' filepath='/home/fedora/zfs/lib/libzfsbootenv/lzbe_pair.c' line='74' column='1'/>
- <parameter type-id='type-id-13' name='ptr' filepath='/home/fedora/zfs/lib/libzfsbootenv/lzbe_pair.c' line='74' column='1'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='lzbe_nvlist_get' mangled-name='lzbe_nvlist_get' filepath='/home/fedora/zfs/lib/libzfsbootenv/lzbe_pair.c' line='27' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzbe_nvlist_get'>
- <parameter type-id='type-id-9' name='pool' filepath='/home/fedora/zfs/lib/libzfsbootenv/lzbe_pair.c' line='27' column='1'/>
- <parameter type-id='type-id-9' name='key' filepath='/home/fedora/zfs/lib/libzfsbootenv/lzbe_pair.c' line='27' column='1'/>
- <parameter type-id='type-id-14' name='ptr' filepath='/home/fedora/zfs/lib/libzfsbootenv/lzbe_pair.c' line='27' column='1'/>
- <return type-id='type-id-2'/>
+ <abi-instr version='1.0' address-size='64' path='lzbe_pair.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfsbootenv' language='LANG_C99'>
+ <pointer-type-def type-id='type-id-10' size-in-bits='64' id='type-id-11'/>
+ <function-decl name='lzbe_remove_pair' mangled-name='lzbe_remove_pair' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzbe_remove_pair'>
+ <parameter type-id='type-id-11' name='ptr'/>
+ <parameter type-id='type-id-4' name='key'/>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <type-decl name='unsigned long int' size-in-bits='64' id='type-id-12'/>
+ <typedef-decl name='size_t' type-id='type-id-12' id='type-id-13'/>
+ <function-decl name='lzbe_add_pair' mangled-name='lzbe_add_pair' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzbe_add_pair'>
+ <parameter type-id='type-id-11' name='ptr'/>
+ <parameter type-id='type-id-4' name='key'/>
+ <parameter type-id='type-id-4' name='type'/>
+ <parameter type-id='type-id-11' name='value'/>
+ <parameter type-id='type-id-13' name='size'/>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='lzbe_nvlist_free' mangled-name='lzbe_nvlist_free' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzbe_nvlist_free'>
+ <parameter type-id='type-id-11' name='ptr'/>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='lzbe_nvlist_set' mangled-name='lzbe_nvlist_set' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzbe_nvlist_set'>
+ <parameter type-id='type-id-4' name='pool'/>
+ <parameter type-id='type-id-4' name='key'/>
+ <parameter type-id='type-id-11' name='ptr'/>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <pointer-type-def type-id='type-id-11' size-in-bits='64' id='type-id-14'/>
+ <function-decl name='lzbe_nvlist_get' mangled-name='lzbe_nvlist_get' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzbe_nvlist_get'>
+ <parameter type-id='type-id-4' name='pool'/>
+ <parameter type-id='type-id-4' name='key'/>
+ <parameter type-id='type-id-14' name='ptr'/>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='nvlist_remove_all' mangled-name='nvlist_remove_all' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='strcmp' mangled-name='strcmp' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='nvlist_add_uint8_array' mangled-name='nvlist_add_uint8_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='nvlist_add_byte' mangled-name='nvlist_add_byte' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='nvlist_add_int16' mangled-name='nvlist_add_int16' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='nvlist_add_uint16' mangled-name='nvlist_add_uint16' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='nvlist_add_int32' mangled-name='nvlist_add_int32' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='nvlist_add_uint32' mangled-name='nvlist_add_uint32' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='nvlist_add_int64' mangled-name='nvlist_add_int64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='nvlist_add_uint64' mangled-name='nvlist_add_uint64' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='nvlist_add_string' mangled-name='nvlist_add_string' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='nvlist_add_byte_array' mangled-name='nvlist_add_byte_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='nvlist_add_int16_array' mangled-name='nvlist_add_int16_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='nvlist_add_uint16_array' mangled-name='nvlist_add_uint16_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='nvlist_add_int32_array' mangled-name='nvlist_add_int32_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='nvlist_add_uint32_array' mangled-name='nvlist_add_uint32_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='nvlist_add_int64_array' mangled-name='nvlist_add_int64_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='nvlist_add_uint64_array' mangled-name='nvlist_add_uint64_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='nvlist_add_string_array' mangled-name='nvlist_add_string_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='nvlist_add_nvlist' mangled-name='nvlist_add_nvlist' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='nvlist_add_nvlist_array' mangled-name='nvlist_add_nvlist_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='nvlist_add_boolean_value' mangled-name='nvlist_add_boolean_value' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='nvlist_add_int8' mangled-name='nvlist_add_int8' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='nvlist_add_uint8' mangled-name='nvlist_add_uint8' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='nvlist_add_boolean_array' mangled-name='nvlist_add_boolean_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='nvlist_add_int8_array' mangled-name='nvlist_add_int8_array' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_nvlist' mangled-name='nvlist_lookup_nvlist' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='nvlist_dup' mangled-name='nvlist_dup' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='nvlist_alloc' mangled-name='nvlist_alloc' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='lzbe_util.c' comp-dir-path='/home/fedora/zfs/lib/libzfsbootenv' language='LANG_C99'>
-
-
- <array-type-def dimensions='1' type-id='type-id-1' size-in-bits='8' id='type-id-15'>
- <subrange length='1' type-id='type-id-10' id='type-id-16'/>
-
- </array-type-def>
- <array-type-def dimensions='1' type-id='type-id-1' size-in-bits='160' id='type-id-17'>
- <subrange length='20' type-id='type-id-10' id='type-id-18'/>
-
- </array-type-def>
- <class-decl name='_IO_codecvt' is-struct='yes' visibility='default' is-declaration-only='yes' id='type-id-19'/>
- <class-decl name='_IO_marker' is-struct='yes' visibility='default' is-declaration-only='yes' id='type-id-20'/>
- <class-decl name='_IO_wide_data' is-struct='yes' visibility='default' is-declaration-only='yes' id='type-id-21'/>
- <type-decl name='long int' size-in-bits='64' id='type-id-22'/>
- <type-decl name='signed char' size-in-bits='8' id='type-id-23'/>
- <type-decl name='unsigned short int' size-in-bits='16' id='type-id-24'/>
- <typedef-decl name='FILE' type-id='type-id-25' filepath='/usr/include/bits/types/FILE.h' line='7' column='1' id='type-id-26'/>
- <class-decl name='_IO_FILE' size-in-bits='1728' is-struct='yes' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='49' column='1' id='type-id-25'>
+ <abi-instr version='1.0' address-size='64' path='lzbe_util.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfsbootenv' language='LANG_C99'>
+ <class-decl name='_IO_FILE' size-in-bits='1728' is-struct='yes' visibility='default' id='type-id-15'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='_flags' type-id='type-id-2' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='51' column='1'/>
+ <var-decl name='_flags' type-id='type-id-1' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='_IO_read_ptr' type-id='type-id-6' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='54' column='1'/>
+ <var-decl name='_IO_read_ptr' type-id='type-id-5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='_IO_read_end' type-id='type-id-6' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='55' column='1'/>
+ <var-decl name='_IO_read_end' type-id='type-id-5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='_IO_read_base' type-id='type-id-6' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='56' column='1'/>
+ <var-decl name='_IO_read_base' type-id='type-id-5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='_IO_write_base' type-id='type-id-6' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='57' column='1'/>
+ <var-decl name='_IO_write_base' type-id='type-id-5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='_IO_write_ptr' type-id='type-id-6' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='58' column='1'/>
+ <var-decl name='_IO_write_ptr' type-id='type-id-5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='_IO_write_end' type-id='type-id-6' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='59' column='1'/>
+ <var-decl name='_IO_write_end' type-id='type-id-5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='_IO_buf_base' type-id='type-id-6' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='60' column='1'/>
+ <var-decl name='_IO_buf_base' type-id='type-id-5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='_IO_buf_end' type-id='type-id-6' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='61' column='1'/>
+ <var-decl name='_IO_buf_end' type-id='type-id-5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='576'>
- <var-decl name='_IO_save_base' type-id='type-id-6' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='64' column='1'/>
+ <var-decl name='_IO_save_base' type-id='type-id-5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='640'>
- <var-decl name='_IO_backup_base' type-id='type-id-6' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='65' column='1'/>
+ <var-decl name='_IO_backup_base' type-id='type-id-5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='704'>
- <var-decl name='_IO_save_end' type-id='type-id-6' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='66' column='1'/>
+ <var-decl name='_IO_save_end' type-id='type-id-5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='768'>
- <var-decl name='_markers' type-id='type-id-27' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='68' column='1'/>
+ <var-decl name='_markers' type-id='type-id-16' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='832'>
- <var-decl name='_chain' type-id='type-id-28' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='70' column='1'/>
+ <var-decl name='_chain' type-id='type-id-17' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='896'>
- <var-decl name='_fileno' type-id='type-id-2' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='72' column='1'/>
+ <var-decl name='_fileno' type-id='type-id-1' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='928'>
- <var-decl name='_flags2' type-id='type-id-2' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='73' column='1'/>
+ <var-decl name='_flags2' type-id='type-id-1' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='960'>
- <var-decl name='_old_offset' type-id='type-id-29' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='74' column='1'/>
+ <var-decl name='_old_offset' type-id='type-id-18' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1024'>
- <var-decl name='_cur_column' type-id='type-id-24' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='77' column='1'/>
+ <var-decl name='_cur_column' type-id='type-id-19' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1040'>
- <var-decl name='_vtable_offset' type-id='type-id-23' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='78' column='1'/>
+ <var-decl name='_vtable_offset' type-id='type-id-20' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1048'>
- <var-decl name='_shortbuf' type-id='type-id-15' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='79' column='1'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1088'>
- <var-decl name='_lock' type-id='type-id-30' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='81' column='1'/>
+ <var-decl name='_shortbuf' type-id='type-id-21' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1152'>
- <var-decl name='_offset' type-id='type-id-31' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='89' column='1'/>
+ <var-decl name='_offset' type-id='type-id-22' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1216'>
- <var-decl name='_codecvt' type-id='type-id-32' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='91' column='1'/>
+ <var-decl name='__pad1' type-id='type-id-11' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1280'>
- <var-decl name='_wide_data' type-id='type-id-33' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='92' column='1'/>
+ <var-decl name='__pad2' type-id='type-id-11' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1344'>
- <var-decl name='_freeres_list' type-id='type-id-28' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='93' column='1'/>
+ <var-decl name='__pad3' type-id='type-id-11' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1408'>
- <var-decl name='_freeres_buf' type-id='type-id-13' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='94' column='1'/>
+ <var-decl name='__pad4' type-id='type-id-11' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1472'>
- <var-decl name='__pad5' type-id='type-id-12' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='95' column='1'/>
+ <var-decl name='__pad5' type-id='type-id-13' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1536'>
- <var-decl name='_mode' type-id='type-id-2' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='96' column='1'/>
+ <var-decl name='_mode' type-id='type-id-1' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1568'>
- <var-decl name='_unused2' type-id='type-id-17' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='98' column='1'/>
+ <var-decl name='_unused2' type-id='type-id-23' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='__off_t' type-id='type-id-22' filepath='/usr/include/bits/types.h' line='152' column='1' id='type-id-29'/>
- <typedef-decl name='_IO_lock_t' type-id='type-id-11' filepath='/usr/include/bits/types/struct_FILE.h' line='43' column='1' id='type-id-34'/>
- <typedef-decl name='__off64_t' type-id='type-id-22' filepath='/usr/include/bits/types.h' line='153' column='1' id='type-id-31'/>
- <pointer-type-def type-id='type-id-26' size-in-bits='64' id='type-id-35'/>
- <pointer-type-def type-id='type-id-25' size-in-bits='64' id='type-id-28'/>
- <pointer-type-def type-id='type-id-19' size-in-bits='64' id='type-id-32'/>
- <pointer-type-def type-id='type-id-34' size-in-bits='64' id='type-id-30'/>
- <pointer-type-def type-id='type-id-20' size-in-bits='64' id='type-id-27'/>
- <pointer-type-def type-id='type-id-21' size-in-bits='64' id='type-id-33'/>
- <function-decl name='lzbe_bootenv_print' mangled-name='lzbe_bootenv_print' filepath='/home/fedora/zfs/lib/libzfsbootenv/lzbe_util.c' line='24' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzbe_bootenv_print'>
- <parameter type-id='type-id-9' name='pool' filepath='/home/fedora/zfs/lib/libzfsbootenv/lzbe_util.c' line='24' column='1'/>
- <parameter type-id='type-id-9' name='nvlist' filepath='/home/fedora/zfs/lib/libzfsbootenv/lzbe_util.c' line='24' column='1'/>
- <parameter type-id='type-id-35' name='of' filepath='/home/fedora/zfs/lib/libzfsbootenv/lzbe_util.c' line='24' column='1'/>
- <return type-id='type-id-2'/>
+ <class-decl name='_IO_marker' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-24'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='_next' type-id='type-id-16' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='_sbuf' type-id='type-id-17' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='_pos' type-id='type-id-1' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <pointer-type-def type-id='type-id-24' size-in-bits='64' id='type-id-16'/>
+ <pointer-type-def type-id='type-id-15' size-in-bits='64' id='type-id-17'/>
+ <type-decl name='long int' size-in-bits='64' id='type-id-25'/>
+ <typedef-decl name='__off_t' type-id='type-id-25' id='type-id-18'/>
+ <type-decl name='unsigned short int' size-in-bits='16' id='type-id-19'/>
+ <type-decl name='signed char' size-in-bits='8' id='type-id-20'/>
+
+ <array-type-def dimensions='1' type-id='type-id-2' size-in-bits='8' id='type-id-21'>
+ <subrange length='1' type-id='type-id-12' id='type-id-26'/>
+
+ </array-type-def>
+ <typedef-decl name='__off64_t' type-id='type-id-25' id='type-id-22'/>
+
+ <array-type-def dimensions='1' type-id='type-id-2' size-in-bits='160' id='type-id-23'>
+ <subrange length='20' type-id='type-id-12' id='type-id-27'/>
+
+ </array-type-def>
+ <typedef-decl name='FILE' type-id='type-id-15' id='type-id-28'/>
+ <pointer-type-def type-id='type-id-28' size-in-bits='64' id='type-id-29'/>
+ <function-decl name='lzbe_bootenv_print' mangled-name='lzbe_bootenv_print' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzbe_bootenv_print'>
+ <parameter type-id='type-id-4' name='pool'/>
+ <parameter type-id='type-id-4' name='nvlist'/>
+ <parameter type-id='type-id-29' name='of'/>
+ <return type-id='type-id-1'/>
+ </function-decl>
+ <function-decl name='lzbe_nvlist_get' mangled-name='lzbe_nvlist_get' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
+ </function-decl>
+ <function-decl name='nvlist_print' mangled-name='nvlist_print' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-10'/>
</function-decl>
</abi-instr>
</abi-corpus>
diff --git a/sys/contrib/openzfs/lib/libzpool/kernel.c b/sys/contrib/openzfs/lib/libzpool/kernel.c
index 25f58f156bf9..ef75706fa6e3 100644
--- a/sys/contrib/openzfs/lib/libzpool/kernel.c
+++ b/sys/contrib/openzfs/lib/libzpool/kernel.c
@@ -1,1377 +1,1376 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
*/
#include <assert.h>
#include <fcntl.h>
#include <libgen.h>
#include <poll.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <libzutil.h>
#include <sys/crypto/icp.h>
#include <sys/processor.h>
#include <sys/rrwlock.h>
#include <sys/spa.h>
#include <sys/stat.h>
#include <sys/systeminfo.h>
#include <sys/time.h>
#include <sys/utsname.h>
#include <sys/zfs_context.h>
#include <sys/zfs_onexit.h>
#include <sys/zfs_vfsops.h>
#include <sys/zstd/zstd.h>
#include <sys/zvol.h>
#include <zfs_fletcher.h>
#include <zlib.h>
/*
* Emulation of kernel services in userland.
*/
uint64_t physmem;
char hw_serial[HW_HOSTID_LEN];
struct utsname hw_utsname;
/* If set, all blocks read will be copied to the specified directory. */
char *vn_dumpdir = NULL;
/* this only exists to have its address taken */
struct proc p0;
/*
* =========================================================================
* threads
* =========================================================================
*
* TS_STACK_MIN is dictated by the minimum allowed pthread stack size. While
* TS_STACK_MAX is somewhat arbitrary, it was selected to be large enough for
* the expected stack depth while small enough to avoid exhausting address
* space with high thread counts.
*/
#define TS_STACK_MIN MAX(PTHREAD_STACK_MIN, 32768)
#define TS_STACK_MAX (256 * 1024)
/*ARGSUSED*/
kthread_t *
zk_thread_create(void (*func)(void *), void *arg, size_t stksize, int state)
{
pthread_attr_t attr;
pthread_t tid;
char *stkstr;
int detachstate = PTHREAD_CREATE_DETACHED;
VERIFY0(pthread_attr_init(&attr));
if (state & TS_JOINABLE)
detachstate = PTHREAD_CREATE_JOINABLE;
VERIFY0(pthread_attr_setdetachstate(&attr, detachstate));
/*
* We allow the default stack size in user space to be specified by
* setting the ZFS_STACK_SIZE environment variable. This allows us
* the convenience of observing and debugging stack overruns in
* user space. Explicitly specified stack sizes will be honored.
* The usage of ZFS_STACK_SIZE is discussed further in the
* ENVIRONMENT VARIABLES sections of the ztest(1) man page.
*/
if (stksize == 0) {
stkstr = getenv("ZFS_STACK_SIZE");
if (stkstr == NULL)
stksize = TS_STACK_MAX;
else
stksize = MAX(atoi(stkstr), TS_STACK_MIN);
}
VERIFY3S(stksize, >, 0);
stksize = P2ROUNDUP(MAX(stksize, TS_STACK_MIN), PAGESIZE);
/*
* If this ever fails, it may be because the stack size is not a
* multiple of system page size.
*/
VERIFY0(pthread_attr_setstacksize(&attr, stksize));
VERIFY0(pthread_attr_setguardsize(&attr, PAGESIZE));
VERIFY0(pthread_create(&tid, &attr, (void *(*)(void *))func, arg));
VERIFY0(pthread_attr_destroy(&attr));
return ((void *)(uintptr_t)tid);
}
/*
* =========================================================================
* kstats
* =========================================================================
*/
/*ARGSUSED*/
kstat_t *
kstat_create(const char *module, int instance, const char *name,
const char *class, uchar_t type, ulong_t ndata, uchar_t ks_flag)
{
return (NULL);
}
/*ARGSUSED*/
void
kstat_install(kstat_t *ksp)
{}
/*ARGSUSED*/
void
kstat_delete(kstat_t *ksp)
{}
void
kstat_set_raw_ops(kstat_t *ksp,
int (*headers)(char *buf, size_t size),
int (*data)(char *buf, size_t size, void *data),
void *(*addr)(kstat_t *ksp, loff_t index))
{}
/*
* =========================================================================
* mutexes
* =========================================================================
*/
void
mutex_init(kmutex_t *mp, char *name, int type, void *cookie)
{
VERIFY0(pthread_mutex_init(&mp->m_lock, NULL));
memset(&mp->m_owner, 0, sizeof (pthread_t));
}
void
mutex_destroy(kmutex_t *mp)
{
VERIFY0(pthread_mutex_destroy(&mp->m_lock));
}
void
mutex_enter(kmutex_t *mp)
{
VERIFY0(pthread_mutex_lock(&mp->m_lock));
mp->m_owner = pthread_self();
}
int
mutex_tryenter(kmutex_t *mp)
{
int error;
error = pthread_mutex_trylock(&mp->m_lock);
if (error == 0) {
mp->m_owner = pthread_self();
return (1);
} else {
VERIFY3S(error, ==, EBUSY);
return (0);
}
}
void
mutex_exit(kmutex_t *mp)
{
memset(&mp->m_owner, 0, sizeof (pthread_t));
VERIFY0(pthread_mutex_unlock(&mp->m_lock));
}
/*
* =========================================================================
* rwlocks
* =========================================================================
*/
void
rw_init(krwlock_t *rwlp, char *name, int type, void *arg)
{
VERIFY0(pthread_rwlock_init(&rwlp->rw_lock, NULL));
rwlp->rw_readers = 0;
rwlp->rw_owner = 0;
}
void
rw_destroy(krwlock_t *rwlp)
{
VERIFY0(pthread_rwlock_destroy(&rwlp->rw_lock));
}
void
rw_enter(krwlock_t *rwlp, krw_t rw)
{
if (rw == RW_READER) {
VERIFY0(pthread_rwlock_rdlock(&rwlp->rw_lock));
atomic_inc_uint(&rwlp->rw_readers);
} else {
VERIFY0(pthread_rwlock_wrlock(&rwlp->rw_lock));
rwlp->rw_owner = pthread_self();
}
}
void
rw_exit(krwlock_t *rwlp)
{
if (RW_READ_HELD(rwlp))
atomic_dec_uint(&rwlp->rw_readers);
else
rwlp->rw_owner = 0;
VERIFY0(pthread_rwlock_unlock(&rwlp->rw_lock));
}
int
rw_tryenter(krwlock_t *rwlp, krw_t rw)
{
int error;
if (rw == RW_READER)
error = pthread_rwlock_tryrdlock(&rwlp->rw_lock);
else
error = pthread_rwlock_trywrlock(&rwlp->rw_lock);
if (error == 0) {
if (rw == RW_READER)
atomic_inc_uint(&rwlp->rw_readers);
else
rwlp->rw_owner = pthread_self();
return (1);
}
VERIFY3S(error, ==, EBUSY);
return (0);
}
/* ARGSUSED */
uint32_t
zone_get_hostid(void *zonep)
{
/*
* We're emulating the system's hostid in userland.
*/
return (strtoul(hw_serial, NULL, 10));
}
int
rw_tryupgrade(krwlock_t *rwlp)
{
return (0);
}
/*
* =========================================================================
* condition variables
* =========================================================================
*/
void
cv_init(kcondvar_t *cv, char *name, int type, void *arg)
{
VERIFY0(pthread_cond_init(cv, NULL));
}
void
cv_destroy(kcondvar_t *cv)
{
VERIFY0(pthread_cond_destroy(cv));
}
void
cv_wait(kcondvar_t *cv, kmutex_t *mp)
{
memset(&mp->m_owner, 0, sizeof (pthread_t));
VERIFY0(pthread_cond_wait(cv, &mp->m_lock));
mp->m_owner = pthread_self();
}
int
cv_wait_sig(kcondvar_t *cv, kmutex_t *mp)
{
cv_wait(cv, mp);
return (1);
}
int
cv_timedwait(kcondvar_t *cv, kmutex_t *mp, clock_t abstime)
{
int error;
struct timeval tv;
struct timespec ts;
clock_t delta;
delta = abstime - ddi_get_lbolt();
if (delta <= 0)
return (-1);
VERIFY(gettimeofday(&tv, NULL) == 0);
ts.tv_sec = tv.tv_sec + delta / hz;
ts.tv_nsec = tv.tv_usec * NSEC_PER_USEC + (delta % hz) * (NANOSEC / hz);
if (ts.tv_nsec >= NANOSEC) {
ts.tv_sec++;
ts.tv_nsec -= NANOSEC;
}
memset(&mp->m_owner, 0, sizeof (pthread_t));
error = pthread_cond_timedwait(cv, &mp->m_lock, &ts);
mp->m_owner = pthread_self();
if (error == ETIMEDOUT)
return (-1);
VERIFY0(error);
return (1);
}
/*ARGSUSED*/
int
cv_timedwait_hires(kcondvar_t *cv, kmutex_t *mp, hrtime_t tim, hrtime_t res,
int flag)
{
int error;
struct timeval tv;
struct timespec ts;
hrtime_t delta;
ASSERT(flag == 0 || flag == CALLOUT_FLAG_ABSOLUTE);
delta = tim;
if (flag & CALLOUT_FLAG_ABSOLUTE)
delta -= gethrtime();
if (delta <= 0)
return (-1);
VERIFY0(gettimeofday(&tv, NULL));
ts.tv_sec = tv.tv_sec + delta / NANOSEC;
ts.tv_nsec = tv.tv_usec * NSEC_PER_USEC + (delta % NANOSEC);
if (ts.tv_nsec >= NANOSEC) {
ts.tv_sec++;
ts.tv_nsec -= NANOSEC;
}
memset(&mp->m_owner, 0, sizeof (pthread_t));
error = pthread_cond_timedwait(cv, &mp->m_lock, &ts);
mp->m_owner = pthread_self();
if (error == ETIMEDOUT)
return (-1);
VERIFY0(error);
return (1);
}
void
cv_signal(kcondvar_t *cv)
{
VERIFY0(pthread_cond_signal(cv));
}
void
cv_broadcast(kcondvar_t *cv)
{
VERIFY0(pthread_cond_broadcast(cv));
}
/*
* =========================================================================
* procfs list
* =========================================================================
*/
void
seq_printf(struct seq_file *m, const char *fmt, ...)
{}
void
procfs_list_install(const char *module,
const char *submodule,
const char *name,
mode_t mode,
procfs_list_t *procfs_list,
int (*show)(struct seq_file *f, void *p),
int (*show_header)(struct seq_file *f),
int (*clear)(procfs_list_t *procfs_list),
size_t procfs_list_node_off)
{
mutex_init(&procfs_list->pl_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&procfs_list->pl_list,
procfs_list_node_off + sizeof (procfs_list_node_t),
procfs_list_node_off + offsetof(procfs_list_node_t, pln_link));
procfs_list->pl_next_id = 1;
procfs_list->pl_node_offset = procfs_list_node_off;
}
void
procfs_list_uninstall(procfs_list_t *procfs_list)
{}
void
procfs_list_destroy(procfs_list_t *procfs_list)
{
ASSERT(list_is_empty(&procfs_list->pl_list));
list_destroy(&procfs_list->pl_list);
mutex_destroy(&procfs_list->pl_lock);
}
#define NODE_ID(procfs_list, obj) \
(((procfs_list_node_t *)(((char *)obj) + \
(procfs_list)->pl_node_offset))->pln_id)
void
procfs_list_add(procfs_list_t *procfs_list, void *p)
{
ASSERT(MUTEX_HELD(&procfs_list->pl_lock));
NODE_ID(procfs_list, p) = procfs_list->pl_next_id++;
list_insert_tail(&procfs_list->pl_list, p);
}
/*
* =========================================================================
* vnode operations
* =========================================================================
*/
/*
* =========================================================================
* Figure out which debugging statements to print
* =========================================================================
*/
static char *dprintf_string;
static int dprintf_print_all;
int
dprintf_find_string(const char *string)
{
char *tmp_str = dprintf_string;
int len = strlen(string);
/*
* Find out if this is a string we want to print.
* String format: file1.c,function_name1,file2.c,file3.c
*/
while (tmp_str != NULL) {
if (strncmp(tmp_str, string, len) == 0 &&
(tmp_str[len] == ',' || tmp_str[len] == '\0'))
return (1);
tmp_str = strchr(tmp_str, ',');
if (tmp_str != NULL)
tmp_str++; /* Get rid of , */
}
return (0);
}
void
dprintf_setup(int *argc, char **argv)
{
int i, j;
/*
* Debugging can be specified two ways: by setting the
* environment variable ZFS_DEBUG, or by including a
* "debug=..." argument on the command line. The command
* line setting overrides the environment variable.
*/
for (i = 1; i < *argc; i++) {
int len = strlen("debug=");
/* First look for a command line argument */
if (strncmp("debug=", argv[i], len) == 0) {
dprintf_string = argv[i] + len;
/* Remove from args */
for (j = i; j < *argc; j++)
argv[j] = argv[j+1];
argv[j] = NULL;
(*argc)--;
}
}
if (dprintf_string == NULL) {
/* Look for ZFS_DEBUG environment variable */
dprintf_string = getenv("ZFS_DEBUG");
}
/*
* Are we just turning on all debugging?
*/
if (dprintf_find_string("on"))
dprintf_print_all = 1;
if (dprintf_string != NULL)
zfs_flags |= ZFS_DEBUG_DPRINTF;
}
/*
* =========================================================================
* debug printfs
* =========================================================================
*/
void
__dprintf(boolean_t dprint, const char *file, const char *func,
int line, const char *fmt, ...)
{
/* Get rid of annoying "../common/" prefix to filename. */
const char *newfile = zfs_basename(file);
va_list adx;
if (dprint) {
/* dprintf messages are printed immediately */
if (!dprintf_print_all &&
!dprintf_find_string(newfile) &&
!dprintf_find_string(func))
return;
/* Print out just the function name if requested */
flockfile(stdout);
if (dprintf_find_string("pid"))
(void) printf("%d ", getpid());
if (dprintf_find_string("tid"))
(void) printf("%ju ",
(uintmax_t)(uintptr_t)pthread_self());
if (dprintf_find_string("cpu"))
(void) printf("%u ", getcpuid());
if (dprintf_find_string("time"))
(void) printf("%llu ", gethrtime());
if (dprintf_find_string("long"))
(void) printf("%s, line %d: ", newfile, line);
(void) printf("dprintf: %s: ", func);
va_start(adx, fmt);
(void) vprintf(fmt, adx);
va_end(adx);
funlockfile(stdout);
} else {
/* zfs_dbgmsg is logged for dumping later */
size_t size;
char *buf;
int i;
size = 1024;
buf = umem_alloc(size, UMEM_NOFAIL);
i = snprintf(buf, size, "%s:%d:%s(): ", newfile, line, func);
if (i < size) {
va_start(adx, fmt);
(void) vsnprintf(buf + i, size - i, fmt, adx);
va_end(adx);
}
__zfs_dbgmsg(buf);
umem_free(buf, size);
}
}
/*
* =========================================================================
* cmn_err() and panic()
* =========================================================================
*/
static char ce_prefix[CE_IGNORE][10] = { "", "NOTICE: ", "WARNING: ", "" };
static char ce_suffix[CE_IGNORE][2] = { "", "\n", "\n", "" };
void
vpanic(const char *fmt, va_list adx)
{
(void) fprintf(stderr, "error: ");
(void) vfprintf(stderr, fmt, adx);
(void) fprintf(stderr, "\n");
abort(); /* think of it as a "user-level crash dump" */
}
void
panic(const char *fmt, ...)
{
va_list adx;
va_start(adx, fmt);
vpanic(fmt, adx);
va_end(adx);
}
void
vcmn_err(int ce, const char *fmt, va_list adx)
{
if (ce == CE_PANIC)
vpanic(fmt, adx);
if (ce != CE_NOTE) { /* suppress noise in userland stress testing */
(void) fprintf(stderr, "%s", ce_prefix[ce]);
(void) vfprintf(stderr, fmt, adx);
(void) fprintf(stderr, "%s", ce_suffix[ce]);
}
}
-/*PRINTFLIKE2*/
void
cmn_err(int ce, const char *fmt, ...)
{
va_list adx;
va_start(adx, fmt);
vcmn_err(ce, fmt, adx);
va_end(adx);
}
/*
* =========================================================================
* misc routines
* =========================================================================
*/
void
delay(clock_t ticks)
{
(void) poll(0, 0, ticks * (1000 / hz));
}
/*
* Find highest one bit set.
* Returns bit number + 1 of highest bit that is set, otherwise returns 0.
* The __builtin_clzll() function is supported by both GCC and Clang.
*/
int
highbit64(uint64_t i)
{
if (i == 0)
return (0);
return (NBBY * sizeof (uint64_t) - __builtin_clzll(i));
}
/*
* Find lowest one bit set.
* Returns bit number + 1 of lowest bit that is set, otherwise returns 0.
* The __builtin_ffsll() function is supported by both GCC and Clang.
*/
int
lowbit64(uint64_t i)
{
if (i == 0)
return (0);
return (__builtin_ffsll(i));
}
const char *random_path = "/dev/random";
const char *urandom_path = "/dev/urandom";
static int random_fd = -1, urandom_fd = -1;
void
random_init(void)
{
VERIFY((random_fd = open(random_path, O_RDONLY | O_CLOEXEC)) != -1);
VERIFY((urandom_fd = open(urandom_path, O_RDONLY | O_CLOEXEC)) != -1);
}
void
random_fini(void)
{
close(random_fd);
close(urandom_fd);
random_fd = -1;
urandom_fd = -1;
}
static int
random_get_bytes_common(uint8_t *ptr, size_t len, int fd)
{
size_t resid = len;
ssize_t bytes;
ASSERT(fd != -1);
while (resid != 0) {
bytes = read(fd, ptr, resid);
ASSERT3S(bytes, >=, 0);
ptr += bytes;
resid -= bytes;
}
return (0);
}
int
random_get_bytes(uint8_t *ptr, size_t len)
{
return (random_get_bytes_common(ptr, len, random_fd));
}
int
random_get_pseudo_bytes(uint8_t *ptr, size_t len)
{
return (random_get_bytes_common(ptr, len, urandom_fd));
}
int
ddi_strtoul(const char *hw_serial, char **nptr, int base, unsigned long *result)
{
char *end;
*result = strtoul(hw_serial, &end, base);
if (*result == 0)
return (errno);
return (0);
}
int
ddi_strtoull(const char *str, char **nptr, int base, u_longlong_t *result)
{
char *end;
*result = strtoull(str, &end, base);
if (*result == 0)
return (errno);
return (0);
}
utsname_t *
utsname(void)
{
return (&hw_utsname);
}
/*
* =========================================================================
* kernel emulation setup & teardown
* =========================================================================
*/
static int
umem_out_of_memory(void)
{
char errmsg[] = "out of memory -- generating core dump\n";
(void) fprintf(stderr, "%s", errmsg);
abort();
return (0);
}
void
kernel_init(int mode)
{
extern uint_t rrw_tsd_key;
umem_nofail_callback(umem_out_of_memory);
physmem = sysconf(_SC_PHYS_PAGES);
dprintf("physmem = %llu pages (%.2f GB)\n", (u_longlong_t)physmem,
(double)physmem * sysconf(_SC_PAGE_SIZE) / (1ULL << 30));
(void) snprintf(hw_serial, sizeof (hw_serial), "%ld",
(mode & SPA_MODE_WRITE) ? get_system_hostid() : 0);
random_init();
VERIFY0(uname(&hw_utsname));
system_taskq_init();
icp_init();
zstd_init();
spa_init((spa_mode_t)mode);
fletcher_4_init();
tsd_create(&rrw_tsd_key, rrw_tsd_destroy);
}
void
kernel_fini(void)
{
fletcher_4_fini();
spa_fini();
zstd_fini();
icp_fini();
system_taskq_fini();
random_fini();
}
uid_t
crgetuid(cred_t *cr)
{
return (0);
}
uid_t
crgetruid(cred_t *cr)
{
return (0);
}
gid_t
crgetgid(cred_t *cr)
{
return (0);
}
int
crgetngroups(cred_t *cr)
{
return (0);
}
gid_t *
crgetgroups(cred_t *cr)
{
return (NULL);
}
int
zfs_secpolicy_snapshot_perms(const char *name, cred_t *cr)
{
return (0);
}
int
zfs_secpolicy_rename_perms(const char *from, const char *to, cred_t *cr)
{
return (0);
}
int
zfs_secpolicy_destroy_perms(const char *name, cred_t *cr)
{
return (0);
}
int
secpolicy_zfs(const cred_t *cr)
{
return (0);
}
int
secpolicy_zfs_proc(const cred_t *cr, proc_t *proc)
{
return (0);
}
ksiddomain_t *
ksid_lookupdomain(const char *dom)
{
ksiddomain_t *kd;
kd = umem_zalloc(sizeof (ksiddomain_t), UMEM_NOFAIL);
kd->kd_name = spa_strdup(dom);
return (kd);
}
void
ksiddomain_rele(ksiddomain_t *ksid)
{
spa_strfree(ksid->kd_name);
umem_free(ksid, sizeof (ksiddomain_t));
}
char *
kmem_vasprintf(const char *fmt, va_list adx)
{
char *buf = NULL;
va_list adx_copy;
va_copy(adx_copy, adx);
VERIFY(vasprintf(&buf, fmt, adx_copy) != -1);
va_end(adx_copy);
return (buf);
}
char *
kmem_asprintf(const char *fmt, ...)
{
char *buf = NULL;
va_list adx;
va_start(adx, fmt);
VERIFY(vasprintf(&buf, fmt, adx) != -1);
va_end(adx);
return (buf);
}
/* ARGSUSED */
zfs_file_t *
zfs_onexit_fd_hold(int fd, minor_t *minorp)
{
*minorp = 0;
return (NULL);
}
/* ARGSUSED */
void
zfs_onexit_fd_rele(zfs_file_t *fp)
{
}
/* ARGSUSED */
int
zfs_onexit_add_cb(minor_t minor, void (*func)(void *), void *data,
uint64_t *action_handle)
{
return (0);
}
fstrans_cookie_t
spl_fstrans_mark(void)
{
return ((fstrans_cookie_t)0);
}
void
spl_fstrans_unmark(fstrans_cookie_t cookie)
{
}
int
__spl_pf_fstrans_check(void)
{
return (0);
}
int
kmem_cache_reap_active(void)
{
return (0);
}
void *zvol_tag = "zvol_tag";
void
zvol_create_minor(const char *name)
{
}
void
zvol_create_minors_recursive(const char *name)
{
}
void
zvol_remove_minors(spa_t *spa, const char *name, boolean_t async)
{
}
void
zvol_rename_minors(spa_t *spa, const char *oldname, const char *newname,
boolean_t async)
{
}
/*
* Open file
*
* path - fully qualified path to file
* flags - file attributes O_READ / O_WRITE / O_EXCL
* fpp - pointer to return file pointer
*
* Returns 0 on success underlying error on failure.
*/
int
zfs_file_open(const char *path, int flags, int mode, zfs_file_t **fpp)
{
int fd = -1;
int dump_fd = -1;
int err;
int old_umask = 0;
zfs_file_t *fp;
struct stat64 st;
if (!(flags & O_CREAT) && stat64(path, &st) == -1)
return (errno);
if (!(flags & O_CREAT) && S_ISBLK(st.st_mode))
flags |= O_DIRECT;
if (flags & O_CREAT)
old_umask = umask(0);
fd = open64(path, flags, mode);
if (fd == -1)
return (errno);
if (flags & O_CREAT)
(void) umask(old_umask);
if (vn_dumpdir != NULL) {
char *dumppath = umem_zalloc(MAXPATHLEN, UMEM_NOFAIL);
const char *inpath = zfs_basename(path);
(void) snprintf(dumppath, MAXPATHLEN,
"%s/%s", vn_dumpdir, inpath);
dump_fd = open64(dumppath, O_CREAT | O_WRONLY, 0666);
umem_free(dumppath, MAXPATHLEN);
if (dump_fd == -1) {
err = errno;
close(fd);
return (err);
}
} else {
dump_fd = -1;
}
(void) fcntl(fd, F_SETFD, FD_CLOEXEC);
fp = umem_zalloc(sizeof (zfs_file_t), UMEM_NOFAIL);
fp->f_fd = fd;
fp->f_dump_fd = dump_fd;
*fpp = fp;
return (0);
}
void
zfs_file_close(zfs_file_t *fp)
{
close(fp->f_fd);
if (fp->f_dump_fd != -1)
close(fp->f_dump_fd);
umem_free(fp, sizeof (zfs_file_t));
}
/*
* Stateful write - use os internal file pointer to determine where to
* write and update on successful completion.
*
* fp - pointer to file (pipe, socket, etc) to write to
* buf - buffer to write
* count - # of bytes to write
* resid - pointer to count of unwritten bytes (if short write)
*
* Returns 0 on success errno on failure.
*/
int
zfs_file_write(zfs_file_t *fp, const void *buf, size_t count, ssize_t *resid)
{
ssize_t rc;
rc = write(fp->f_fd, buf, count);
if (rc < 0)
return (errno);
if (resid) {
*resid = count - rc;
} else if (rc != count) {
return (EIO);
}
return (0);
}
/*
* Stateless write - os internal file pointer is not updated.
*
* fp - pointer to file (pipe, socket, etc) to write to
* buf - buffer to write
* count - # of bytes to write
* off - file offset to write to (only valid for seekable types)
* resid - pointer to count of unwritten bytes
*
* Returns 0 on success errno on failure.
*/
int
zfs_file_pwrite(zfs_file_t *fp, const void *buf,
size_t count, loff_t pos, ssize_t *resid)
{
ssize_t rc, split, done;
int sectors;
/*
* To simulate partial disk writes, we split writes into two
* system calls so that the process can be killed in between.
* This is used by ztest to simulate realistic failure modes.
*/
sectors = count >> SPA_MINBLOCKSHIFT;
split = (sectors > 0 ? rand() % sectors : 0) << SPA_MINBLOCKSHIFT;
rc = pwrite64(fp->f_fd, buf, split, pos);
if (rc != -1) {
done = rc;
rc = pwrite64(fp->f_fd, (char *)buf + split,
count - split, pos + split);
}
#ifdef __linux__
if (rc == -1 && errno == EINVAL) {
/*
* Under Linux, this most likely means an alignment issue
* (memory or disk) due to O_DIRECT, so we abort() in order
* to catch the offender.
*/
abort();
}
#endif
if (rc < 0)
return (errno);
done += rc;
if (resid) {
*resid = count - done;
} else if (done != count) {
return (EIO);
}
return (0);
}
/*
* Stateful read - use os internal file pointer to determine where to
* read and update on successful completion.
*
* fp - pointer to file (pipe, socket, etc) to read from
* buf - buffer to write
* count - # of bytes to read
* resid - pointer to count of unread bytes (if short read)
*
* Returns 0 on success errno on failure.
*/
int
zfs_file_read(zfs_file_t *fp, void *buf, size_t count, ssize_t *resid)
{
int rc;
rc = read(fp->f_fd, buf, count);
if (rc < 0)
return (errno);
if (resid) {
*resid = count - rc;
} else if (rc != count) {
return (EIO);
}
return (0);
}
/*
* Stateless read - os internal file pointer is not updated.
*
* fp - pointer to file (pipe, socket, etc) to read from
* buf - buffer to write
* count - # of bytes to write
* off - file offset to read from (only valid for seekable types)
* resid - pointer to count of unwritten bytes (if short write)
*
* Returns 0 on success errno on failure.
*/
int
zfs_file_pread(zfs_file_t *fp, void *buf, size_t count, loff_t off,
ssize_t *resid)
{
ssize_t rc;
rc = pread64(fp->f_fd, buf, count, off);
if (rc < 0) {
#ifdef __linux__
/*
* Under Linux, this most likely means an alignment issue
* (memory or disk) due to O_DIRECT, so we abort() in order to
* catch the offender.
*/
if (errno == EINVAL)
abort();
#endif
return (errno);
}
if (fp->f_dump_fd != -1) {
int status;
status = pwrite64(fp->f_dump_fd, buf, rc, off);
ASSERT(status != -1);
}
if (resid) {
*resid = count - rc;
} else if (rc != count) {
return (EIO);
}
return (0);
}
/*
* lseek - set / get file pointer
*
* fp - pointer to file (pipe, socket, etc) to read from
* offp - value to seek to, returns current value plus passed offset
* whence - see man pages for standard lseek whence values
*
* Returns 0 on success errno on failure (ESPIPE for non seekable types)
*/
int
zfs_file_seek(zfs_file_t *fp, loff_t *offp, int whence)
{
loff_t rc;
rc = lseek(fp->f_fd, *offp, whence);
if (rc < 0)
return (errno);
*offp = rc;
return (0);
}
/*
* Get file attributes
*
* filp - file pointer
* zfattr - pointer to file attr structure
*
* Currently only used for fetching size and file mode
*
* Returns 0 on success or error code of underlying getattr call on failure.
*/
int
zfs_file_getattr(zfs_file_t *fp, zfs_file_attr_t *zfattr)
{
struct stat64 st;
if (fstat64_blk(fp->f_fd, &st) == -1)
return (errno);
zfattr->zfa_size = st.st_size;
zfattr->zfa_mode = st.st_mode;
return (0);
}
/*
* Sync file to disk
*
* filp - file pointer
* flags - O_SYNC and or O_DSYNC
*
* Returns 0 on success or error code of underlying sync call on failure.
*/
int
zfs_file_fsync(zfs_file_t *fp, int flags)
{
int rc;
rc = fsync(fp->f_fd);
if (rc < 0)
return (errno);
return (0);
}
/*
* fallocate - allocate or free space on disk
*
* fp - file pointer
* mode (non-standard options for hole punching etc)
* offset - offset to start allocating or freeing from
* len - length to free / allocate
*
* OPTIONAL
*/
int
zfs_file_fallocate(zfs_file_t *fp, int mode, loff_t offset, loff_t len)
{
#ifdef __linux__
return (fallocate(fp->f_fd, mode, offset, len));
#else
return (EOPNOTSUPP);
#endif
}
/*
* Request current file pointer offset
*
* fp - pointer to file
*
* Returns current file offset.
*/
loff_t
zfs_file_off(zfs_file_t *fp)
{
return (lseek(fp->f_fd, SEEK_CUR, 0));
}
/*
* unlink file
*
* path - fully qualified file path
*
* Returns 0 on success.
*
* OPTIONAL
*/
int
zfs_file_unlink(const char *path)
{
return (remove(path));
}
/*
* Get reference to file pointer
*
* fd - input file descriptor
*
* Returns pointer to file struct or NULL.
* Unsupported in user space.
*/
zfs_file_t *
zfs_file_get(int fd)
{
abort();
return (NULL);
}
/*
* Drop reference to file pointer
*
* fp - pointer to file struct
*
* Unsupported in user space.
*/
void
zfs_file_put(zfs_file_t *fp)
{
abort();
}
void
zfsvfs_update_fromname(const char *oldname, const char *newname)
{
}
diff --git a/sys/contrib/openzfs/lib/libzutil/zutil_import.c b/sys/contrib/openzfs/lib/libzutil/zutil_import.c
index b5b2d7dbed91..95fd0ec0af85 100644
--- a/sys/contrib/openzfs/lib/libzutil/zutil_import.c
+++ b/sys/contrib/openzfs/lib/libzutil/zutil_import.c
@@ -1,1861 +1,1860 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2015 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
* Copyright 2015 RackTop Systems.
* Copyright (c) 2016, Intel Corporation.
* Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
*/
/*
* Pool import support functions.
*
* Used by zpool, ztest, zdb, and zhack to locate importable configs. Since
* these commands are expected to run in the global zone, we can assume
* that the devices are all readable when called.
*
* To import a pool, we rely on reading the configuration information from the
* ZFS label of each device. If we successfully read the label, then we
* organize the configuration information in the following hierarchy:
*
* pool guid -> toplevel vdev guid -> label txg
*
* Duplicate entries matching this same tuple will be discarded. Once we have
* examined every device, we pick the best label txg config for each toplevel
* vdev. We then arrange these toplevel vdevs into a complete pool config, and
* update any paths that have changed. Finally, we attempt to import the pool
* using our derived config, and record the results.
*/
#include <aio.h>
#include <ctype.h>
#include <dirent.h>
#include <errno.h>
#include <libintl.h>
#include <libgen.h>
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/dktp/fdisk.h>
#include <sys/vdev_impl.h>
#include <sys/fs/zfs.h>
#include <thread_pool.h>
#include <libzutil.h>
#include <libnvpair.h>
#include "zutil_import.h"
-/*PRINTFLIKE2*/
-static void
+static __attribute__((format(printf, 2, 3))) void
zutil_error_aux(libpc_handle_t *hdl, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
(void) vsnprintf(hdl->lpc_desc, sizeof (hdl->lpc_desc), fmt, ap);
hdl->lpc_desc_active = B_TRUE;
va_end(ap);
}
static void
zutil_verror(libpc_handle_t *hdl, const char *error, const char *fmt,
va_list ap)
{
char action[1024];
(void) vsnprintf(action, sizeof (action), fmt, ap);
if (hdl->lpc_desc_active)
hdl->lpc_desc_active = B_FALSE;
else
hdl->lpc_desc[0] = '\0';
if (hdl->lpc_printerr) {
if (hdl->lpc_desc[0] != '\0')
error = hdl->lpc_desc;
(void) fprintf(stderr, "%s: %s\n", action, error);
}
}
-/*PRINTFLIKE3*/
-static int
+static __attribute__((format(printf, 3, 4))) int
zutil_error_fmt(libpc_handle_t *hdl, const char *error, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
zutil_verror(hdl, error, fmt, ap);
va_end(ap);
return (-1);
}
static int
zutil_error(libpc_handle_t *hdl, const char *error, const char *msg)
{
return (zutil_error_fmt(hdl, error, "%s", msg));
}
static int
zutil_no_memory(libpc_handle_t *hdl)
{
zutil_error(hdl, EZFS_NOMEM, "internal error");
exit(1);
}
void *
zutil_alloc(libpc_handle_t *hdl, size_t size)
{
void *data;
if ((data = calloc(1, size)) == NULL)
(void) zutil_no_memory(hdl);
return (data);
}
char *
zutil_strdup(libpc_handle_t *hdl, const char *str)
{
char *ret;
if ((ret = strdup(str)) == NULL)
(void) zutil_no_memory(hdl);
return (ret);
}
static char *
zutil_strndup(libpc_handle_t *hdl, const char *str, size_t n)
{
char *ret;
if ((ret = strndup(str, n)) == NULL)
(void) zutil_no_memory(hdl);
return (ret);
}
/*
* Intermediate structures used to gather configuration information.
*/
typedef struct config_entry {
uint64_t ce_txg;
nvlist_t *ce_config;
struct config_entry *ce_next;
} config_entry_t;
typedef struct vdev_entry {
uint64_t ve_guid;
config_entry_t *ve_configs;
struct vdev_entry *ve_next;
} vdev_entry_t;
typedef struct pool_entry {
uint64_t pe_guid;
vdev_entry_t *pe_vdevs;
struct pool_entry *pe_next;
} pool_entry_t;
typedef struct name_entry {
char *ne_name;
uint64_t ne_guid;
uint64_t ne_order;
uint64_t ne_num_labels;
struct name_entry *ne_next;
} name_entry_t;
typedef struct pool_list {
pool_entry_t *pools;
name_entry_t *names;
} pool_list_t;
/*
* Go through and fix up any path and/or devid information for the given vdev
* configuration.
*/
static int
fix_paths(libpc_handle_t *hdl, nvlist_t *nv, name_entry_t *names)
{
nvlist_t **child;
uint_t c, children;
uint64_t guid;
name_entry_t *ne, *best;
char *path;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0) {
for (c = 0; c < children; c++)
if (fix_paths(hdl, child[c], names) != 0)
return (-1);
return (0);
}
/*
* This is a leaf (file or disk) vdev. In either case, go through
* the name list and see if we find a matching guid. If so, replace
* the path and see if we can calculate a new devid.
*
* There may be multiple names associated with a particular guid, in
* which case we have overlapping partitions or multiple paths to the
* same disk. In this case we prefer to use the path name which
* matches the ZPOOL_CONFIG_PATH. If no matching entry is found we
* use the lowest order device which corresponds to the first match
* while traversing the ZPOOL_IMPORT_PATH search path.
*/
verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
path = NULL;
best = NULL;
for (ne = names; ne != NULL; ne = ne->ne_next) {
if (ne->ne_guid == guid) {
if (path == NULL) {
best = ne;
break;
}
if ((strlen(path) == strlen(ne->ne_name)) &&
strncmp(path, ne->ne_name, strlen(path)) == 0) {
best = ne;
break;
}
if (best == NULL) {
best = ne;
continue;
}
/* Prefer paths with move vdev labels. */
if (ne->ne_num_labels > best->ne_num_labels) {
best = ne;
continue;
}
/* Prefer paths earlier in the search order. */
if (ne->ne_num_labels == best->ne_num_labels &&
ne->ne_order < best->ne_order) {
best = ne;
continue;
}
}
}
if (best == NULL)
return (0);
if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) != 0)
return (-1);
update_vdev_config_dev_strs(nv);
return (0);
}
/*
* Add the given configuration to the list of known devices.
*/
static int
add_config(libpc_handle_t *hdl, pool_list_t *pl, const char *path,
int order, int num_labels, nvlist_t *config)
{
uint64_t pool_guid, vdev_guid, top_guid, txg, state;
pool_entry_t *pe;
vdev_entry_t *ve;
config_entry_t *ce;
name_entry_t *ne;
/*
* If this is a hot spare not currently in use or level 2 cache
* device, add it to the list of names to translate, but don't do
* anything else.
*/
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
&state) == 0 &&
(state == POOL_STATE_SPARE || state == POOL_STATE_L2CACHE) &&
nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) {
if ((ne = zutil_alloc(hdl, sizeof (name_entry_t))) == NULL)
return (-1);
if ((ne->ne_name = zutil_strdup(hdl, path)) == NULL) {
free(ne);
return (-1);
}
ne->ne_guid = vdev_guid;
ne->ne_order = order;
ne->ne_num_labels = num_labels;
ne->ne_next = pl->names;
pl->names = ne;
return (0);
}
/*
* If we have a valid config but cannot read any of these fields, then
* it means we have a half-initialized label. In vdev_label_init()
* we write a label with txg == 0 so that we can identify the device
* in case the user refers to the same disk later on. If we fail to
* create the pool, we'll be left with a label in this state
* which should not be considered part of a valid pool.
*/
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
&pool_guid) != 0 ||
nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
&vdev_guid) != 0 ||
nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID,
&top_guid) != 0 ||
nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
&txg) != 0 || txg == 0) {
return (0);
}
/*
* First, see if we know about this pool. If not, then add it to the
* list of known pools.
*/
for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
if (pe->pe_guid == pool_guid)
break;
}
if (pe == NULL) {
if ((pe = zutil_alloc(hdl, sizeof (pool_entry_t))) == NULL) {
return (-1);
}
pe->pe_guid = pool_guid;
pe->pe_next = pl->pools;
pl->pools = pe;
}
/*
* Second, see if we know about this toplevel vdev. Add it if its
* missing.
*/
for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
if (ve->ve_guid == top_guid)
break;
}
if (ve == NULL) {
if ((ve = zutil_alloc(hdl, sizeof (vdev_entry_t))) == NULL) {
return (-1);
}
ve->ve_guid = top_guid;
ve->ve_next = pe->pe_vdevs;
pe->pe_vdevs = ve;
}
/*
* Third, see if we have a config with a matching transaction group. If
* so, then we do nothing. Otherwise, add it to the list of known
* configs.
*/
for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) {
if (ce->ce_txg == txg)
break;
}
if (ce == NULL) {
if ((ce = zutil_alloc(hdl, sizeof (config_entry_t))) == NULL) {
return (-1);
}
ce->ce_txg = txg;
ce->ce_config = fnvlist_dup(config);
ce->ce_next = ve->ve_configs;
ve->ve_configs = ce;
}
/*
* At this point we've successfully added our config to the list of
* known configs. The last thing to do is add the vdev guid -> path
* mappings so that we can fix up the configuration as necessary before
* doing the import.
*/
if ((ne = zutil_alloc(hdl, sizeof (name_entry_t))) == NULL)
return (-1);
if ((ne->ne_name = zutil_strdup(hdl, path)) == NULL) {
free(ne);
return (-1);
}
ne->ne_guid = vdev_guid;
ne->ne_order = order;
ne->ne_num_labels = num_labels;
ne->ne_next = pl->names;
pl->names = ne;
return (0);
}
static int
zutil_pool_active(libpc_handle_t *hdl, const char *name, uint64_t guid,
boolean_t *isactive)
{
ASSERT(hdl->lpc_ops->pco_pool_active != NULL);
int error = hdl->lpc_ops->pco_pool_active(hdl->lpc_lib_handle, name,
guid, isactive);
return (error);
}
static nvlist_t *
zutil_refresh_config(libpc_handle_t *hdl, nvlist_t *tryconfig)
{
ASSERT(hdl->lpc_ops->pco_refresh_config != NULL);
return (hdl->lpc_ops->pco_refresh_config(hdl->lpc_lib_handle,
tryconfig));
}
/*
* Determine if the vdev id is a hole in the namespace.
*/
static boolean_t
vdev_is_hole(uint64_t *hole_array, uint_t holes, uint_t id)
{
int c;
for (c = 0; c < holes; c++) {
/* Top-level is a hole */
if (hole_array[c] == id)
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Convert our list of pools into the definitive set of configurations. We
* start by picking the best config for each toplevel vdev. Once that's done,
* we assemble the toplevel vdevs into a full config for the pool. We make a
* pass to fix up any incorrect paths, and then add it to the main list to
* return to the user.
*/
static nvlist_t *
get_configs(libpc_handle_t *hdl, pool_list_t *pl, boolean_t active_ok,
nvlist_t *policy)
{
pool_entry_t *pe;
vdev_entry_t *ve;
config_entry_t *ce;
nvlist_t *ret = NULL, *config = NULL, *tmp = NULL, *nvtop, *nvroot;
nvlist_t **spares, **l2cache;
uint_t i, nspares, nl2cache;
boolean_t config_seen;
uint64_t best_txg;
char *name, *hostname = NULL;
uint64_t guid;
uint_t children = 0;
nvlist_t **child = NULL;
uint_t holes;
uint64_t *hole_array, max_id;
uint_t c;
boolean_t isactive;
uint64_t hostid;
nvlist_t *nvl;
boolean_t valid_top_config = B_FALSE;
if (nvlist_alloc(&ret, 0, 0) != 0)
goto nomem;
for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
uint64_t id, max_txg = 0;
if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0)
goto nomem;
config_seen = B_FALSE;
/*
* Iterate over all toplevel vdevs. Grab the pool configuration
* from the first one we find, and then go through the rest and
* add them as necessary to the 'vdevs' member of the config.
*/
for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
/*
* Determine the best configuration for this vdev by
* selecting the config with the latest transaction
* group.
*/
best_txg = 0;
for (ce = ve->ve_configs; ce != NULL;
ce = ce->ce_next) {
if (ce->ce_txg > best_txg) {
tmp = ce->ce_config;
best_txg = ce->ce_txg;
}
}
/*
* We rely on the fact that the max txg for the
* pool will contain the most up-to-date information
* about the valid top-levels in the vdev namespace.
*/
if (best_txg > max_txg) {
(void) nvlist_remove(config,
ZPOOL_CONFIG_VDEV_CHILDREN,
DATA_TYPE_UINT64);
(void) nvlist_remove(config,
ZPOOL_CONFIG_HOLE_ARRAY,
DATA_TYPE_UINT64_ARRAY);
max_txg = best_txg;
hole_array = NULL;
holes = 0;
max_id = 0;
valid_top_config = B_FALSE;
if (nvlist_lookup_uint64(tmp,
ZPOOL_CONFIG_VDEV_CHILDREN, &max_id) == 0) {
verify(nvlist_add_uint64(config,
ZPOOL_CONFIG_VDEV_CHILDREN,
max_id) == 0);
valid_top_config = B_TRUE;
}
if (nvlist_lookup_uint64_array(tmp,
ZPOOL_CONFIG_HOLE_ARRAY, &hole_array,
&holes) == 0) {
verify(nvlist_add_uint64_array(config,
ZPOOL_CONFIG_HOLE_ARRAY,
hole_array, holes) == 0);
}
}
if (!config_seen) {
/*
* Copy the relevant pieces of data to the pool
* configuration:
*
* version
* pool guid
* name
* comment (if available)
* compatibility features (if available)
* pool state
* hostid (if available)
* hostname (if available)
*/
uint64_t state, version;
char *comment = NULL;
char *compatibility = NULL;
version = fnvlist_lookup_uint64(tmp,
ZPOOL_CONFIG_VERSION);
fnvlist_add_uint64(config,
ZPOOL_CONFIG_VERSION, version);
guid = fnvlist_lookup_uint64(tmp,
ZPOOL_CONFIG_POOL_GUID);
fnvlist_add_uint64(config,
ZPOOL_CONFIG_POOL_GUID, guid);
name = fnvlist_lookup_string(tmp,
ZPOOL_CONFIG_POOL_NAME);
fnvlist_add_string(config,
ZPOOL_CONFIG_POOL_NAME, name);
if (nvlist_lookup_string(tmp,
ZPOOL_CONFIG_COMMENT, &comment) == 0)
fnvlist_add_string(config,
ZPOOL_CONFIG_COMMENT, comment);
if (nvlist_lookup_string(tmp,
ZPOOL_CONFIG_COMPATIBILITY,
&compatibility) == 0)
fnvlist_add_string(config,
ZPOOL_CONFIG_COMPATIBILITY,
compatibility);
state = fnvlist_lookup_uint64(tmp,
ZPOOL_CONFIG_POOL_STATE);
fnvlist_add_uint64(config,
ZPOOL_CONFIG_POOL_STATE, state);
hostid = 0;
if (nvlist_lookup_uint64(tmp,
ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
fnvlist_add_uint64(config,
ZPOOL_CONFIG_HOSTID, hostid);
hostname = fnvlist_lookup_string(tmp,
ZPOOL_CONFIG_HOSTNAME);
fnvlist_add_string(config,
ZPOOL_CONFIG_HOSTNAME, hostname);
}
config_seen = B_TRUE;
}
/*
* Add this top-level vdev to the child array.
*/
verify(nvlist_lookup_nvlist(tmp,
ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0);
verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID,
&id) == 0);
if (id >= children) {
nvlist_t **newchild;
newchild = zutil_alloc(hdl, (id + 1) *
sizeof (nvlist_t *));
if (newchild == NULL)
goto nomem;
for (c = 0; c < children; c++)
newchild[c] = child[c];
free(child);
child = newchild;
children = id + 1;
}
if (nvlist_dup(nvtop, &child[id], 0) != 0)
goto nomem;
}
/*
* If we have information about all the top-levels then
* clean up the nvlist which we've constructed. This
* means removing any extraneous devices that are
* beyond the valid range or adding devices to the end
* of our array which appear to be missing.
*/
if (valid_top_config) {
if (max_id < children) {
for (c = max_id; c < children; c++)
nvlist_free(child[c]);
children = max_id;
} else if (max_id > children) {
nvlist_t **newchild;
newchild = zutil_alloc(hdl, (max_id) *
sizeof (nvlist_t *));
if (newchild == NULL)
goto nomem;
for (c = 0; c < children; c++)
newchild[c] = child[c];
free(child);
child = newchild;
children = max_id;
}
}
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
&guid) == 0);
/*
* The vdev namespace may contain holes as a result of
* device removal. We must add them back into the vdev
* tree before we process any missing devices.
*/
if (holes > 0) {
ASSERT(valid_top_config);
for (c = 0; c < children; c++) {
nvlist_t *holey;
if (child[c] != NULL ||
!vdev_is_hole(hole_array, holes, c))
continue;
if (nvlist_alloc(&holey, NV_UNIQUE_NAME,
0) != 0)
goto nomem;
/*
* Holes in the namespace are treated as
* "hole" top-level vdevs and have a
* special flag set on them.
*/
if (nvlist_add_string(holey,
ZPOOL_CONFIG_TYPE,
VDEV_TYPE_HOLE) != 0 ||
nvlist_add_uint64(holey,
ZPOOL_CONFIG_ID, c) != 0 ||
nvlist_add_uint64(holey,
ZPOOL_CONFIG_GUID, 0ULL) != 0) {
nvlist_free(holey);
goto nomem;
}
child[c] = holey;
}
}
/*
* Look for any missing top-level vdevs. If this is the case,
* create a faked up 'missing' vdev as a placeholder. We cannot
* simply compress the child array, because the kernel performs
* certain checks to make sure the vdev IDs match their location
* in the configuration.
*/
for (c = 0; c < children; c++) {
if (child[c] == NULL) {
nvlist_t *missing;
if (nvlist_alloc(&missing, NV_UNIQUE_NAME,
0) != 0)
goto nomem;
if (nvlist_add_string(missing,
ZPOOL_CONFIG_TYPE,
VDEV_TYPE_MISSING) != 0 ||
nvlist_add_uint64(missing,
ZPOOL_CONFIG_ID, c) != 0 ||
nvlist_add_uint64(missing,
ZPOOL_CONFIG_GUID, 0ULL) != 0) {
nvlist_free(missing);
goto nomem;
}
child[c] = missing;
}
}
/*
* Put all of this pool's top-level vdevs into a root vdev.
*/
if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
goto nomem;
if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
VDEV_TYPE_ROOT) != 0 ||
nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 ||
nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 ||
nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
child, children) != 0) {
nvlist_free(nvroot);
goto nomem;
}
for (c = 0; c < children; c++)
nvlist_free(child[c]);
free(child);
children = 0;
child = NULL;
/*
* Go through and fix up any paths and/or devids based on our
* known list of vdev GUID -> path mappings.
*/
if (fix_paths(hdl, nvroot, pl->names) != 0) {
nvlist_free(nvroot);
goto nomem;
}
/*
* Add the root vdev to this pool's configuration.
*/
if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
nvroot) != 0) {
nvlist_free(nvroot);
goto nomem;
}
nvlist_free(nvroot);
/*
* zdb uses this path to report on active pools that were
* imported or created using -R.
*/
if (active_ok)
goto add_pool;
/*
* Determine if this pool is currently active, in which case we
* can't actually import it.
*/
verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
&name) == 0);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
&guid) == 0);
if (zutil_pool_active(hdl, name, guid, &isactive) != 0)
goto error;
if (isactive) {
nvlist_free(config);
config = NULL;
continue;
}
if (policy != NULL) {
if (nvlist_add_nvlist(config, ZPOOL_LOAD_POLICY,
policy) != 0)
goto nomem;
}
if ((nvl = zutil_refresh_config(hdl, config)) == NULL) {
nvlist_free(config);
config = NULL;
continue;
}
nvlist_free(config);
config = nvl;
/*
* Go through and update the paths for spares, now that we have
* them.
*/
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) == 0) {
for (i = 0; i < nspares; i++) {
if (fix_paths(hdl, spares[i], pl->names) != 0)
goto nomem;
}
}
/*
* Update the paths for l2cache devices.
*/
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2cache, &nl2cache) == 0) {
for (i = 0; i < nl2cache; i++) {
if (fix_paths(hdl, l2cache[i], pl->names) != 0)
goto nomem;
}
}
/*
* Restore the original information read from the actual label.
*/
(void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID,
DATA_TYPE_UINT64);
(void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME,
DATA_TYPE_STRING);
if (hostid != 0) {
verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID,
hostid) == 0);
verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME,
hostname) == 0);
}
add_pool:
/*
* Add this pool to the list of configs.
*/
verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
&name) == 0);
if (nvlist_add_nvlist(ret, name, config) != 0)
goto nomem;
nvlist_free(config);
config = NULL;
}
return (ret);
nomem:
(void) zutil_no_memory(hdl);
error:
nvlist_free(config);
nvlist_free(ret);
for (c = 0; c < children; c++)
nvlist_free(child[c]);
free(child);
return (NULL);
}
/*
* Return the offset of the given label.
*/
static uint64_t
label_offset(uint64_t size, int l)
{
ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0);
return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
0 : size - VDEV_LABELS * sizeof (vdev_label_t)));
}
/*
* The same description applies as to zpool_read_label below,
* except here we do it without aio, presumably because an aio call
* errored out in a way we think not using it could circumvent.
*/
static int
zpool_read_label_slow(int fd, nvlist_t **config, int *num_labels)
{
struct stat64 statbuf;
int l, count = 0;
vdev_phys_t *label;
nvlist_t *expected_config = NULL;
uint64_t expected_guid = 0, size;
int error;
*config = NULL;
if (fstat64_blk(fd, &statbuf) == -1)
return (0);
size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
error = posix_memalign((void **)&label, PAGESIZE, sizeof (*label));
if (error)
return (-1);
for (l = 0; l < VDEV_LABELS; l++) {
uint64_t state, guid, txg;
off_t offset = label_offset(size, l) + VDEV_SKIP_SIZE;
if (pread64(fd, label, sizeof (vdev_phys_t),
offset) != sizeof (vdev_phys_t))
continue;
if (nvlist_unpack(label->vp_nvlist,
sizeof (label->vp_nvlist), config, 0) != 0)
continue;
if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_GUID,
&guid) != 0 || guid == 0) {
nvlist_free(*config);
continue;
}
if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
&state) != 0 || state > POOL_STATE_L2CACHE) {
nvlist_free(*config);
continue;
}
if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
(nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
&txg) != 0 || txg == 0)) {
nvlist_free(*config);
continue;
}
if (expected_guid) {
if (expected_guid == guid)
count++;
nvlist_free(*config);
} else {
expected_config = *config;
expected_guid = guid;
count++;
}
}
if (num_labels != NULL)
*num_labels = count;
free(label);
*config = expected_config;
return (0);
}
/*
* Given a file descriptor, read the label information and return an nvlist
* describing the configuration, if there is one. The number of valid
* labels found will be returned in num_labels when non-NULL.
*/
int
zpool_read_label(int fd, nvlist_t **config, int *num_labels)
{
struct stat64 statbuf;
struct aiocb aiocbs[VDEV_LABELS];
struct aiocb *aiocbps[VDEV_LABELS];
vdev_phys_t *labels;
nvlist_t *expected_config = NULL;
uint64_t expected_guid = 0, size;
int error, l, count = 0;
*config = NULL;
if (fstat64_blk(fd, &statbuf) == -1)
return (0);
size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
error = posix_memalign((void **)&labels, PAGESIZE,
VDEV_LABELS * sizeof (*labels));
if (error)
return (-1);
memset(aiocbs, 0, sizeof (aiocbs));
for (l = 0; l < VDEV_LABELS; l++) {
off_t offset = label_offset(size, l) + VDEV_SKIP_SIZE;
aiocbs[l].aio_fildes = fd;
aiocbs[l].aio_offset = offset;
aiocbs[l].aio_buf = &labels[l];
aiocbs[l].aio_nbytes = sizeof (vdev_phys_t);
aiocbs[l].aio_lio_opcode = LIO_READ;
aiocbps[l] = &aiocbs[l];
}
if (lio_listio(LIO_WAIT, aiocbps, VDEV_LABELS, NULL) != 0) {
int saved_errno = errno;
boolean_t do_slow = B_FALSE;
error = -1;
if (errno == EAGAIN || errno == EINTR || errno == EIO) {
/*
* A portion of the requests may have been submitted.
* Clean them up.
*/
for (l = 0; l < VDEV_LABELS; l++) {
errno = 0;
switch (aio_error(&aiocbs[l])) {
case EINVAL:
break;
case EINPROGRESS:
// This shouldn't be possible to
// encounter, die if we do.
ASSERT(B_FALSE);
case EOPNOTSUPP:
case ENOSYS:
do_slow = B_TRUE;
+ /* FALLTHROUGH */
case 0:
default:
(void) aio_return(&aiocbs[l]);
}
}
}
if (do_slow) {
/*
* At least some IO involved access unsafe-for-AIO
* files. Let's try again, without AIO this time.
*/
error = zpool_read_label_slow(fd, config, num_labels);
saved_errno = errno;
}
free(labels);
errno = saved_errno;
return (error);
}
for (l = 0; l < VDEV_LABELS; l++) {
uint64_t state, guid, txg;
if (aio_return(&aiocbs[l]) != sizeof (vdev_phys_t))
continue;
if (nvlist_unpack(labels[l].vp_nvlist,
sizeof (labels[l].vp_nvlist), config, 0) != 0)
continue;
if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_GUID,
&guid) != 0 || guid == 0) {
nvlist_free(*config);
continue;
}
if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
&state) != 0 || state > POOL_STATE_L2CACHE) {
nvlist_free(*config);
continue;
}
if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
(nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
&txg) != 0 || txg == 0)) {
nvlist_free(*config);
continue;
}
if (expected_guid) {
if (expected_guid == guid)
count++;
nvlist_free(*config);
} else {
expected_config = *config;
expected_guid = guid;
count++;
}
}
if (num_labels != NULL)
*num_labels = count;
free(labels);
*config = expected_config;
return (0);
}
/*
* Sorted by full path and then vdev guid to allow for multiple entries with
* the same full path name. This is required because it's possible to
* have multiple block devices with labels that refer to the same
* ZPOOL_CONFIG_PATH yet have different vdev guids. In this case both
* entries need to be added to the cache. Scenarios where this can occur
* include overwritten pool labels, devices which are visible from multiple
* hosts and multipath devices.
*/
int
slice_cache_compare(const void *arg1, const void *arg2)
{
const char *nm1 = ((rdsk_node_t *)arg1)->rn_name;
const char *nm2 = ((rdsk_node_t *)arg2)->rn_name;
uint64_t guid1 = ((rdsk_node_t *)arg1)->rn_vdev_guid;
uint64_t guid2 = ((rdsk_node_t *)arg2)->rn_vdev_guid;
int rv;
rv = TREE_ISIGN(strcmp(nm1, nm2));
if (rv)
return (rv);
return (TREE_CMP(guid1, guid2));
}
static int
label_paths_impl(libpc_handle_t *hdl, nvlist_t *nvroot, uint64_t pool_guid,
uint64_t vdev_guid, char **path, char **devid)
{
nvlist_t **child;
uint_t c, children;
uint64_t guid;
char *val;
int error;
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0) {
for (c = 0; c < children; c++) {
error = label_paths_impl(hdl, child[c],
pool_guid, vdev_guid, path, devid);
if (error)
return (error);
}
return (0);
}
if (nvroot == NULL)
return (0);
error = nvlist_lookup_uint64(nvroot, ZPOOL_CONFIG_GUID, &guid);
if ((error != 0) || (guid != vdev_guid))
return (0);
error = nvlist_lookup_string(nvroot, ZPOOL_CONFIG_PATH, &val);
if (error == 0)
*path = val;
error = nvlist_lookup_string(nvroot, ZPOOL_CONFIG_DEVID, &val);
if (error == 0)
*devid = val;
return (0);
}
/*
* Given a disk label fetch the ZPOOL_CONFIG_PATH and ZPOOL_CONFIG_DEVID
* and store these strings as config_path and devid_path respectively.
* The returned pointers are only valid as long as label remains valid.
*/
int
label_paths(libpc_handle_t *hdl, nvlist_t *label, char **path, char **devid)
{
nvlist_t *nvroot;
uint64_t pool_guid;
uint64_t vdev_guid;
*path = NULL;
*devid = NULL;
if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_VDEV_TREE, &nvroot) ||
nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, &pool_guid) ||
nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &vdev_guid))
return (ENOENT);
return (label_paths_impl(hdl, nvroot, pool_guid, vdev_guid, path,
devid));
}
static void
zpool_find_import_scan_add_slice(libpc_handle_t *hdl, pthread_mutex_t *lock,
avl_tree_t *cache, const char *path, const char *name, int order)
{
avl_index_t where;
rdsk_node_t *slice;
slice = zutil_alloc(hdl, sizeof (rdsk_node_t));
if (asprintf(&slice->rn_name, "%s/%s", path, name) == -1) {
free(slice);
return;
}
slice->rn_vdev_guid = 0;
slice->rn_lock = lock;
slice->rn_avl = cache;
slice->rn_hdl = hdl;
slice->rn_order = order + IMPORT_ORDER_SCAN_OFFSET;
slice->rn_labelpaths = B_FALSE;
pthread_mutex_lock(lock);
if (avl_find(cache, slice, &where)) {
free(slice->rn_name);
free(slice);
} else {
avl_insert(cache, slice, where);
}
pthread_mutex_unlock(lock);
}
static int
zpool_find_import_scan_dir(libpc_handle_t *hdl, pthread_mutex_t *lock,
avl_tree_t *cache, const char *dir, int order)
{
int error;
char path[MAXPATHLEN];
struct dirent64 *dp;
DIR *dirp;
if (realpath(dir, path) == NULL) {
error = errno;
if (error == ENOENT)
return (0);
- zutil_error_aux(hdl, strerror(error));
+ zutil_error_aux(hdl, "%s", strerror(error));
(void) zutil_error_fmt(hdl, EZFS_BADPATH, dgettext(
TEXT_DOMAIN, "cannot resolve path '%s'"), dir);
return (error);
}
dirp = opendir(path);
if (dirp == NULL) {
error = errno;
- zutil_error_aux(hdl, strerror(error));
+ zutil_error_aux(hdl, "%s", strerror(error));
(void) zutil_error_fmt(hdl, EZFS_BADPATH,
dgettext(TEXT_DOMAIN, "cannot open '%s'"), path);
return (error);
}
while ((dp = readdir64(dirp)) != NULL) {
const char *name = dp->d_name;
if (strcmp(name, ".") == 0 || strcmp(name, "..") == 0)
continue;
switch (dp->d_type) {
case DT_UNKNOWN:
case DT_BLK:
case DT_LNK:
#ifdef __FreeBSD__
case DT_CHR:
#endif
case DT_REG:
break;
default:
continue;
}
zpool_find_import_scan_add_slice(hdl, lock, cache, path, name,
order);
}
(void) closedir(dirp);
return (0);
}
static int
zpool_find_import_scan_path(libpc_handle_t *hdl, pthread_mutex_t *lock,
avl_tree_t *cache, const char *dir, int order)
{
int error = 0;
char path[MAXPATHLEN];
char *d = NULL;
ssize_t dl;
const char *dpath, *name;
/*
* Separate the directory and the basename.
* We do this so that we can get the realpath of
* the directory. We don't get the realpath on the
* whole path because if it's a symlink, we want the
* path of the symlink not where it points to.
*/
name = zfs_basename(dir);
if ((dl = zfs_dirnamelen(dir)) == -1)
dpath = ".";
else
dpath = d = zutil_strndup(hdl, dir, dl);
if (realpath(dpath, path) == NULL) {
error = errno;
if (error == ENOENT) {
error = 0;
goto out;
}
- zutil_error_aux(hdl, strerror(error));
+ zutil_error_aux(hdl, "%s", strerror(error));
(void) zutil_error_fmt(hdl, EZFS_BADPATH, dgettext(
TEXT_DOMAIN, "cannot resolve path '%s'"), dir);
goto out;
}
zpool_find_import_scan_add_slice(hdl, lock, cache, path, name, order);
out:
free(d);
return (error);
}
/*
* Scan a list of directories for zfs devices.
*/
static int
zpool_find_import_scan(libpc_handle_t *hdl, pthread_mutex_t *lock,
avl_tree_t **slice_cache, const char * const *dir, size_t dirs)
{
avl_tree_t *cache;
rdsk_node_t *slice;
void *cookie;
int i, error;
*slice_cache = NULL;
cache = zutil_alloc(hdl, sizeof (avl_tree_t));
avl_create(cache, slice_cache_compare, sizeof (rdsk_node_t),
offsetof(rdsk_node_t, rn_node));
for (i = 0; i < dirs; i++) {
struct stat sbuf;
if (stat(dir[i], &sbuf) != 0) {
error = errno;
if (error == ENOENT)
continue;
- zutil_error_aux(hdl, strerror(error));
+ zutil_error_aux(hdl, "%s", strerror(error));
(void) zutil_error_fmt(hdl, EZFS_BADPATH, dgettext(
TEXT_DOMAIN, "cannot resolve path '%s'"), dir[i]);
goto error;
}
/*
* If dir[i] is a directory, we walk through it and add all
* the entries to the cache. If it's not a directory, we just
* add it to the cache.
*/
if (S_ISDIR(sbuf.st_mode)) {
if ((error = zpool_find_import_scan_dir(hdl, lock,
cache, dir[i], i)) != 0)
goto error;
} else {
if ((error = zpool_find_import_scan_path(hdl, lock,
cache, dir[i], i)) != 0)
goto error;
}
}
*slice_cache = cache;
return (0);
error:
cookie = NULL;
while ((slice = avl_destroy_nodes(cache, &cookie)) != NULL) {
free(slice->rn_name);
free(slice);
}
free(cache);
return (error);
}
/*
* Given a list of directories to search, find all pools stored on disk. This
* includes partial pools which are not available to import. If no args are
* given (argc is 0), then the default directory (/dev/dsk) is searched.
* poolname or guid (but not both) are provided by the caller when trying
* to import a specific pool.
*/
static nvlist_t *
zpool_find_import_impl(libpc_handle_t *hdl, importargs_t *iarg,
pthread_mutex_t *lock, avl_tree_t *cache)
{
nvlist_t *ret = NULL;
pool_list_t pools = { 0 };
pool_entry_t *pe, *penext;
vdev_entry_t *ve, *venext;
config_entry_t *ce, *cenext;
name_entry_t *ne, *nenext;
rdsk_node_t *slice;
void *cookie;
tpool_t *t;
verify(iarg->poolname == NULL || iarg->guid == 0);
/*
* Create a thread pool to parallelize the process of reading and
* validating labels, a large number of threads can be used due to
* minimal contention.
*/
t = tpool_create(1, 2 * sysconf(_SC_NPROCESSORS_ONLN), 0, NULL);
for (slice = avl_first(cache); slice;
(slice = avl_walk(cache, slice, AVL_AFTER)))
(void) tpool_dispatch(t, zpool_open_func, slice);
tpool_wait(t);
tpool_destroy(t);
/*
* Process the cache, filtering out any entries which are not
* for the specified pool then adding matching label configs.
*/
cookie = NULL;
while ((slice = avl_destroy_nodes(cache, &cookie)) != NULL) {
if (slice->rn_config != NULL) {
nvlist_t *config = slice->rn_config;
boolean_t matched = B_TRUE;
boolean_t aux = B_FALSE;
int fd;
/*
* Check if it's a spare or l2cache device. If it is,
* we need to skip the name and guid check since they
* don't exist on aux device label.
*/
if (iarg->poolname != NULL || iarg->guid != 0) {
uint64_t state;
aux = nvlist_lookup_uint64(config,
ZPOOL_CONFIG_POOL_STATE, &state) == 0 &&
(state == POOL_STATE_SPARE ||
state == POOL_STATE_L2CACHE);
}
if (iarg->poolname != NULL && !aux) {
char *pname;
matched = nvlist_lookup_string(config,
ZPOOL_CONFIG_POOL_NAME, &pname) == 0 &&
strcmp(iarg->poolname, pname) == 0;
} else if (iarg->guid != 0 && !aux) {
uint64_t this_guid;
matched = nvlist_lookup_uint64(config,
ZPOOL_CONFIG_POOL_GUID, &this_guid) == 0 &&
iarg->guid == this_guid;
}
if (matched) {
/*
* Verify all remaining entries can be opened
* exclusively. This will prune all underlying
* multipath devices which otherwise could
* result in the vdev appearing as UNAVAIL.
*
* Under zdb, this step isn't required and
* would prevent a zdb -e of active pools with
* no cachefile.
*/
fd = open(slice->rn_name,
O_RDONLY | O_EXCL | O_CLOEXEC);
if (fd >= 0 || iarg->can_be_active) {
if (fd >= 0)
close(fd);
add_config(hdl, &pools,
slice->rn_name, slice->rn_order,
slice->rn_num_labels, config);
}
}
nvlist_free(config);
}
free(slice->rn_name);
free(slice);
}
avl_destroy(cache);
free(cache);
ret = get_configs(hdl, &pools, iarg->can_be_active, iarg->policy);
for (pe = pools.pools; pe != NULL; pe = penext) {
penext = pe->pe_next;
for (ve = pe->pe_vdevs; ve != NULL; ve = venext) {
venext = ve->ve_next;
for (ce = ve->ve_configs; ce != NULL; ce = cenext) {
cenext = ce->ce_next;
nvlist_free(ce->ce_config);
free(ce);
}
free(ve);
}
free(pe);
}
for (ne = pools.names; ne != NULL; ne = nenext) {
nenext = ne->ne_next;
free(ne->ne_name);
free(ne);
}
return (ret);
}
/*
* Given a config, discover the paths for the devices which
* exist in the config.
*/
static int
discover_cached_paths(libpc_handle_t *hdl, nvlist_t *nv,
avl_tree_t *cache, pthread_mutex_t *lock)
{
char *path = NULL;
ssize_t dl;
uint_t children;
nvlist_t **child;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0) {
for (int c = 0; c < children; c++) {
discover_cached_paths(hdl, child[c], cache, lock);
}
}
/*
* Once we have the path, we need to add the directory to
* our directory cache.
*/
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
if ((dl = zfs_dirnamelen(path)) == -1)
path = ".";
else
path[dl] = '\0';
return (zpool_find_import_scan_dir(hdl, lock, cache,
path, 0));
}
return (0);
}
/*
* Given a cache file, return the contents as a list of importable pools.
* poolname or guid (but not both) are provided by the caller when trying
* to import a specific pool.
*/
static nvlist_t *
zpool_find_import_cached(libpc_handle_t *hdl, importargs_t *iarg)
{
char *buf;
int fd;
struct stat64 statbuf;
nvlist_t *raw, *src, *dst;
nvlist_t *pools;
nvpair_t *elem;
char *name;
uint64_t this_guid;
boolean_t active;
verify(iarg->poolname == NULL || iarg->guid == 0);
if ((fd = open(iarg->cachefile, O_RDONLY | O_CLOEXEC)) < 0) {
zutil_error_aux(hdl, "%s", strerror(errno));
(void) zutil_error(hdl, EZFS_BADCACHE,
dgettext(TEXT_DOMAIN, "failed to open cache file"));
return (NULL);
}
if (fstat64(fd, &statbuf) != 0) {
zutil_error_aux(hdl, "%s", strerror(errno));
(void) close(fd);
(void) zutil_error(hdl, EZFS_BADCACHE,
dgettext(TEXT_DOMAIN, "failed to get size of cache file"));
return (NULL);
}
if ((buf = zutil_alloc(hdl, statbuf.st_size)) == NULL) {
(void) close(fd);
return (NULL);
}
if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
(void) close(fd);
free(buf);
(void) zutil_error(hdl, EZFS_BADCACHE,
dgettext(TEXT_DOMAIN,
"failed to read cache file contents"));
return (NULL);
}
(void) close(fd);
if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) {
free(buf);
(void) zutil_error(hdl, EZFS_BADCACHE,
dgettext(TEXT_DOMAIN,
"invalid or corrupt cache file contents"));
return (NULL);
}
free(buf);
/*
* Go through and get the current state of the pools and refresh their
* state.
*/
if (nvlist_alloc(&pools, 0, 0) != 0) {
(void) zutil_no_memory(hdl);
nvlist_free(raw);
return (NULL);
}
elem = NULL;
while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) {
src = fnvpair_value_nvlist(elem);
name = fnvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME);
if (iarg->poolname != NULL && strcmp(iarg->poolname, name) != 0)
continue;
this_guid = fnvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID);
if (iarg->guid != 0 && iarg->guid != this_guid)
continue;
if (zutil_pool_active(hdl, name, this_guid, &active) != 0) {
nvlist_free(raw);
nvlist_free(pools);
return (NULL);
}
if (active)
continue;
if (iarg->scan) {
uint64_t saved_guid = iarg->guid;
const char *saved_poolname = iarg->poolname;
pthread_mutex_t lock;
/*
* Create the device cache that will hold the
* devices we will scan based on the cachefile.
* This will get destroyed and freed by
* zpool_find_import_impl.
*/
avl_tree_t *cache = zutil_alloc(hdl,
sizeof (avl_tree_t));
avl_create(cache, slice_cache_compare,
sizeof (rdsk_node_t),
offsetof(rdsk_node_t, rn_node));
nvlist_t *nvroot = fnvlist_lookup_nvlist(src,
ZPOOL_CONFIG_VDEV_TREE);
/*
* We only want to find the pool with this_guid.
* We will reset these values back later.
*/
iarg->guid = this_guid;
iarg->poolname = NULL;
/*
* We need to build up a cache of devices that exists
* in the paths pointed to by the cachefile. This allows
* us to preserve the device namespace that was
* originally specified by the user but also lets us
* scan devices in those directories in case they had
* been renamed.
*/
pthread_mutex_init(&lock, NULL);
discover_cached_paths(hdl, nvroot, cache, &lock);
nvlist_t *nv = zpool_find_import_impl(hdl, iarg,
&lock, cache);
pthread_mutex_destroy(&lock);
/*
* zpool_find_import_impl will return back
* a list of pools that it found based on the
* device cache. There should only be one pool
* since we're looking for a specific guid.
* We will use that pool to build up the final
* pool nvlist which is returned back to the
* caller.
*/
nvpair_t *pair = nvlist_next_nvpair(nv, NULL);
fnvlist_add_nvlist(pools, nvpair_name(pair),
fnvpair_value_nvlist(pair));
VERIFY3P(nvlist_next_nvpair(nv, pair), ==, NULL);
iarg->guid = saved_guid;
iarg->poolname = saved_poolname;
continue;
}
if (nvlist_add_string(src, ZPOOL_CONFIG_CACHEFILE,
iarg->cachefile) != 0) {
(void) zutil_no_memory(hdl);
nvlist_free(raw);
nvlist_free(pools);
return (NULL);
}
if ((dst = zutil_refresh_config(hdl, src)) == NULL) {
nvlist_free(raw);
nvlist_free(pools);
return (NULL);
}
if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) != 0) {
(void) zutil_no_memory(hdl);
nvlist_free(dst);
nvlist_free(raw);
nvlist_free(pools);
return (NULL);
}
nvlist_free(dst);
}
nvlist_free(raw);
return (pools);
}
static nvlist_t *
zpool_find_import(libpc_handle_t *hdl, importargs_t *iarg)
{
pthread_mutex_t lock;
avl_tree_t *cache;
nvlist_t *pools = NULL;
verify(iarg->poolname == NULL || iarg->guid == 0);
pthread_mutex_init(&lock, NULL);
/*
* Locate pool member vdevs by blkid or by directory scanning.
* On success a newly allocated AVL tree which is populated with an
* entry for each discovered vdev will be returned in the cache.
* It's the caller's responsibility to consume and destroy this tree.
*/
if (iarg->scan || iarg->paths != 0) {
size_t dirs = iarg->paths;
const char * const *dir = (const char * const *)iarg->path;
if (dirs == 0)
dir = zpool_default_search_paths(&dirs);
if (zpool_find_import_scan(hdl, &lock, &cache,
dir, dirs) != 0) {
pthread_mutex_destroy(&lock);
return (NULL);
}
} else {
if (zpool_find_import_blkid(hdl, &lock, &cache) != 0) {
pthread_mutex_destroy(&lock);
return (NULL);
}
}
pools = zpool_find_import_impl(hdl, iarg, &lock, cache);
pthread_mutex_destroy(&lock);
return (pools);
}
nvlist_t *
zpool_search_import(void *hdl, importargs_t *import,
const pool_config_ops_t *pco)
{
libpc_handle_t handle = { 0 };
nvlist_t *pools = NULL;
handle.lpc_lib_handle = hdl;
handle.lpc_ops = pco;
handle.lpc_printerr = B_TRUE;
verify(import->poolname == NULL || import->guid == 0);
if (import->cachefile != NULL)
pools = zpool_find_import_cached(&handle, import);
else
pools = zpool_find_import(&handle, import);
if ((pools == NULL || nvlist_empty(pools)) &&
handle.lpc_open_access_error && geteuid() != 0) {
(void) zutil_error(&handle, EZFS_EACESS, dgettext(TEXT_DOMAIN,
"no pools found"));
}
return (pools);
}
static boolean_t
pool_match(nvlist_t *cfg, char *tgt)
{
uint64_t v, guid = strtoull(tgt, NULL, 0);
char *s;
if (guid != 0) {
if (nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_POOL_GUID, &v) == 0)
return (v == guid);
} else {
if (nvlist_lookup_string(cfg, ZPOOL_CONFIG_POOL_NAME, &s) == 0)
return (strcmp(s, tgt) == 0);
}
return (B_FALSE);
}
int
zpool_find_config(void *hdl, const char *target, nvlist_t **configp,
importargs_t *args, const pool_config_ops_t *pco)
{
nvlist_t *pools;
nvlist_t *match = NULL;
nvlist_t *config = NULL;
char *sepp = NULL;
int count = 0;
char *targetdup = strdup(target);
*configp = NULL;
if ((sepp = strpbrk(targetdup, "/@")) != NULL)
*sepp = '\0';
pools = zpool_search_import(hdl, args, pco);
if (pools != NULL) {
nvpair_t *elem = NULL;
while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) {
VERIFY0(nvpair_value_nvlist(elem, &config));
if (pool_match(config, targetdup)) {
count++;
if (match != NULL) {
/* multiple matches found */
continue;
} else {
match = fnvlist_dup(config);
}
}
}
fnvlist_free(pools);
}
if (count == 0) {
free(targetdup);
return (ENOENT);
}
if (count > 1) {
free(targetdup);
fnvlist_free(match);
return (EINVAL);
}
*configp = match;
free(targetdup);
return (0);
}
diff --git a/sys/contrib/openzfs/man/man8/zfs-allow.8 b/sys/contrib/openzfs/man/man8/zfs-allow.8
index 070161be5413..bbd62edc2896 100644
--- a/sys/contrib/openzfs/man/man8/zfs-allow.8
+++ b/sys/contrib/openzfs/man/man8/zfs-allow.8
@@ -1,362 +1,386 @@
.\"
.\" CDDL HEADER START
.\"
.\" The contents of this file are subject to the terms of the
.\" Common Development and Distribution License (the "License").
.\" You may not use this file except in compliance with the License.
.\"
.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
.\" or http://www.opensolaris.org/os/licensing.
.\" See the License for the specific language governing permissions
.\" and limitations under the License.
.\"
.\" When distributing Covered Code, include this CDDL HEADER in each
.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
.\" If applicable, add the following below this CDDL HEADER, with the
.\" fields enclosed by brackets "[]" replaced with your own identifying
.\" information: Portions Copyright [yyyy] [name of copyright owner]
.\"
.\" CDDL HEADER END
.\"
.\" Copyright (c) 2009 Sun Microsystems, Inc. All Rights Reserved.
.\" Copyright 2011 Joshua M. Clulow <josh@sysmgr.org>
.\" Copyright (c) 2011, 2019 by Delphix. All rights reserved.
.\" Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
.\" Copyright (c) 2014, Joyent, Inc. All rights reserved.
.\" Copyright (c) 2014 by Adam Stevko. All rights reserved.
.\" Copyright (c) 2014 Integros [integros.com]
.\" Copyright 2019 Richard Laager. All rights reserved.
.\" Copyright 2018 Nexenta Systems, Inc.
.\" Copyright 2019 Joyent, Inc.
.\"
.Dd May 27, 2021
.Dt ZFS-ALLOW 8
.Os
.
.Sh NAME
.Nm zfs-allow
.Nd delegate ZFS administration permissions to unprivileged users
.Sh SYNOPSIS
.Nm zfs
.Cm allow
.Op Fl dglu
.Ar user Ns | Ns Ar group Ns Oo , Ns Ar user Ns | Ns Ar group Oc Ns …
.Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
.Ar setname Oc Ns …
.Ar filesystem Ns | Ns Ar volume
.Nm zfs
.Cm allow
.Op Fl dl
.Fl e Ns | Ns Sy everyone
.Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
.Ar setname Oc Ns …
.Ar filesystem Ns | Ns Ar volume
.Nm zfs
.Cm allow
.Fl c
.Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
.Ar setname Oc Ns …
.Ar filesystem Ns | Ns Ar volume
.Nm zfs
.Cm allow
.Fl s No @ Ns Ar setname
.Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
.Ar setname Oc Ns …
.Ar filesystem Ns | Ns Ar volume
.Nm zfs
.Cm unallow
.Op Fl dglru
.Ar user Ns | Ns Ar group Ns Oo , Ns Ar user Ns | Ns Ar group Oc Ns …
.Oo Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
.Ar setname Oc Ns … Oc
.Ar filesystem Ns | Ns Ar volume
.Nm zfs
.Cm unallow
.Op Fl dlr
.Fl e Ns | Ns Sy everyone
.Oo Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
.Ar setname Oc Ns … Oc
.Ar filesystem Ns | Ns Ar volume
.Nm zfs
.Cm unallow
.Op Fl r
.Fl c
.Oo Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
.Ar setname Oc Ns … Oc
.Ar filesystem Ns | Ns Ar volume
.Nm zfs
.Cm unallow
.Op Fl r
.Fl s No @ Ns Ar setname
.Oo Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
.Ar setname Oc Ns … Oc
.Ar filesystem Ns | Ns Ar volume
.
.Sh DESCRIPTION
.Bl -tag -width ""
.It Xo
.Nm zfs
.Cm allow
.Ar filesystem Ns | Ns Ar volume
.Xc
Displays permissions that have been delegated on the specified filesystem or
volume.
See the other forms of
.Nm zfs Cm allow
for more information.
.Pp
Delegations are supported under Linux with the exception of
.Sy mount ,
.Sy unmount ,
.Sy mountpoint ,
.Sy canmount ,
.Sy rename ,
and
.Sy share .
These permissions cannot be delegated because the Linux
.Xr mount 8
command restricts modifications of the global namespace to the root user.
.It Xo
.Nm zfs
.Cm allow
.Op Fl dglu
.Ar user Ns | Ns Ar group Ns Oo , Ns Ar user Ns | Ns Ar group Oc Ns …
.Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
.Ar setname Oc Ns …
.Ar filesystem Ns | Ns Ar volume
.Xc
.It Xo
.Nm zfs
.Cm allow
.Op Fl dl
.Fl e Ns | Ns Sy everyone
.Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
.Ar setname Oc Ns …
.Ar filesystem Ns | Ns Ar volume
.Xc
Delegates ZFS administration permission for the file systems to non-privileged
users.
.Bl -tag -width "-d"
.It Fl d
Allow only for the descendent file systems.
.It Fl e Ns | Ns Sy everyone
Specifies that the permissions be delegated to everyone.
.It Fl g Ar group Ns Oo , Ns Ar group Oc Ns …
Explicitly specify that permissions are delegated to the group.
.It Fl l
Allow
.Qq locally
only for the specified file system.
.It Fl u Ar user Ns Oo , Ns Ar user Oc Ns …
Explicitly specify that permissions are delegated to the user.
.It Ar user Ns | Ns Ar group Ns Oo , Ns Ar user Ns | Ns Ar group Oc Ns …
Specifies to whom the permissions are delegated.
Multiple entities can be specified as a comma-separated list.
If neither of the
.Fl gu
options are specified, then the argument is interpreted preferentially as the
keyword
.Sy everyone ,
then as a user name, and lastly as a group name.
To specify a user or group named
.Qq everyone ,
use the
.Fl g
or
.Fl u
options.
To specify a group with the same name as a user, use the
.Fl g
options.
.It Xo
.Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
.Ar setname Oc Ns …
.Xc
The permissions to delegate.
Multiple permissions may be specified as a comma-separated list.
Permission names are the same as ZFS subcommand and property names.
See the property list below.
Property set names, which begin with
.Sy @ ,
may be specified.
See the
.Fl s
form below for details.
.El
.Pp
If neither of the
.Fl dl
options are specified, or both are, then the permissions are allowed for the
file system or volume, and all of its descendents.
.Pp
Permissions are generally the ability to use a ZFS subcommand or change a ZFS
property.
The following permissions are available:
.TS
l l l .
NAME TYPE NOTES
_ _ _
allow subcommand Must also have the permission that is being allowed
bookmark subcommand
clone subcommand Must also have the \fBcreate\fR ability and \fBmount\fR ability in the origin file system
create subcommand Must also have the \fBmount\fR ability. Must also have the \fBrefreservation\fR ability to create a non-sparse volume.
destroy subcommand Must also have the \fBmount\fR ability
diff subcommand Allows lookup of paths within a dataset given an object number, and the ability to create snapshots necessary to \fBzfs diff\fR.
hold subcommand Allows adding a user hold to a snapshot
-load subcommand Allows loading and unloading of encryption key (see \fBzfs load-key\fR and \fBzfs unload-key\fR).
-change subcommand Allows changing an encryption key via \fBzfs change-key\fR.
+load-key subcommand Allows loading and unloading of encryption key (see \fBzfs load-key\fR and \fBzfs unload-key\fR).
+change-key subcommand Allows changing an encryption key via \fBzfs change-key\fR.
mount subcommand Allows mounting/umounting ZFS datasets
promote subcommand Must also have the \fBmount\fR and \fBpromote\fR ability in the origin file system
receive subcommand Must also have the \fBmount\fR and \fBcreate\fR ability
release subcommand Allows releasing a user hold which might destroy the snapshot
rename subcommand Must also have the \fBmount\fR and \fBcreate\fR ability in the new parent
rollback subcommand Must also have the \fBmount\fR ability
send subcommand
share subcommand Allows sharing file systems over NFS or SMB protocols
snapshot subcommand Must also have the \fBmount\fR ability
groupquota other Allows accessing any \fBgroupquota@\fI...\fR property
+groupobjquota other Allows accessing any \fBgroupobjquota@\fI...\fR property
groupused other Allows reading any \fBgroupused@\fI...\fR property
+groupobjused other Allows reading any \fBgroupobjused@\fI...\fR property
userprop other Allows changing any user property
userquota other Allows accessing any \fBuserquota@\fI...\fR property
+userobjquota other Allows accessing any \fBuserobjquota@\fI...\fR property
userused other Allows reading any \fBuserused@\fI...\fR property
+userobjused other Allows reading any \fBuserobjused@\fI...\fR property
projectobjquota other Allows accessing any \fBprojectobjquota@\fI...\fR property
projectquota other Allows accessing any \fBprojectquota@\fI...\fR property
projectobjused other Allows reading any \fBprojectobjused@\fI...\fR property
projectused other Allows reading any \fBprojectused@\fI...\fR property
aclinherit property
+aclmode property
acltype property
atime property
canmount property
casesensitivity property
checksum property
compression property
+context property
copies property
+dedup property
+defcontext property
devices property
+dnodesize property
+encryption property
exec property
filesystem_limit property
+fscontext property
+keyformat property
+keylocation property
+logbias property
+mlslabel property
mountpoint property
nbmand property
normalization property
+overlay property
+pbkdf2iters property
primarycache property
quota property
readonly property
recordsize property
+redundant_metadata property
refquota property
refreservation property
+relatime property
reservation property
+rootcontext property
secondarycache property
setuid property
sharenfs property
sharesmb property
+snapdev property
snapdir property
snapshot_limit property
+special_small_blocks property
+sync property
utf8only property
version property
volblocksize property
+volmode property
volsize property
vscan property
xattr property
zoned property
.TE
.It Xo
.Nm zfs
.Cm allow
.Fl c
.Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
.Ar setname Oc Ns …
.Ar filesystem Ns | Ns Ar volume
.Xc
Sets
.Qq create time
permissions.
These permissions are granted
.Pq locally
to the creator of any newly-created descendent file system.
.It Xo
.Nm zfs
.Cm allow
.Fl s No @ Ns Ar setname
.Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
.Ar setname Oc Ns …
.Ar filesystem Ns | Ns Ar volume
.Xc
Defines or adds permissions to a permission set.
The set can be used by other
.Nm zfs Cm allow
commands for the specified file system and its descendents.
Sets are evaluated dynamically, so changes to a set are immediately reflected.
Permission sets follow the same naming restrictions as ZFS file systems, but the
name must begin with
.Sy @ ,
and can be no more than 64 characters long.
.It Xo
.Nm zfs
.Cm unallow
.Op Fl dglru
.Ar user Ns | Ns Ar group Ns Oo , Ns Ar user Ns | Ns Ar group Oc Ns …
.Oo Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
.Ar setname Oc Ns … Oc
.Ar filesystem Ns | Ns Ar volume
.Xc
.It Xo
.Nm zfs
.Cm unallow
.Op Fl dlr
.Fl e Ns | Ns Sy everyone
.Oo Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
.Ar setname Oc Ns … Oc
.Ar filesystem Ns | Ns Ar volume
.Xc
.It Xo
.Nm zfs
.Cm unallow
.Op Fl r
.Fl c
.Oo Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
.Ar setname Oc Ns … Oc
.Ar filesystem Ns | Ns Ar volume
.Xc
Removes permissions that were granted with the
.Nm zfs Cm allow
command.
No permissions are explicitly denied, so other permissions granted are still in
effect.
For example, if the permission is granted by an ancestor.
If no permissions are specified, then all permissions for the specified
.Ar user ,
.Ar group ,
or
.Sy everyone
are removed.
Specifying
.Sy everyone
.Po or using the
.Fl e
option
.Pc
only removes the permissions that were granted to everyone, not all permissions
for every user and group.
See the
.Nm zfs Cm allow
command for a description of the
.Fl ldugec
options.
.Bl -tag -width "-r"
.It Fl r
Recursively remove the permissions from this file system and all descendents.
.El
.It Xo
.Nm zfs
.Cm unallow
.Op Fl r
.Fl s No @ Ns Ar setname
.Oo Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
.Ar setname Oc Ns … Oc
.Ar filesystem Ns | Ns Ar volume
.Xc
Removes permissions from a permission set.
If no permissions are specified, then all permissions are removed, thus removing
the set entirely.
.El
diff --git a/sys/contrib/openzfs/man/man8/zpool-scrub.8 b/sys/contrib/openzfs/man/man8/zpool-scrub.8
index 10375b6393ac..768f71539290 100644
--- a/sys/contrib/openzfs/man/man8/zpool-scrub.8
+++ b/sys/contrib/openzfs/man/man8/zpool-scrub.8
@@ -1,98 +1,123 @@
.\"
.\" CDDL HEADER START
.\"
.\" The contents of this file are subject to the terms of the
.\" Common Development and Distribution License (the "License").
.\" You may not use this file except in compliance with the License.
.\"
.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
.\" or http://www.opensolaris.org/os/licensing.
.\" See the License for the specific language governing permissions
.\" and limitations under the License.
.\"
.\" When distributing Covered Code, include this CDDL HEADER in each
.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
.\" If applicable, add the following below this CDDL HEADER, with the
.\" fields enclosed by brackets "[]" replaced with your own identifying
.\" information: Portions Copyright [yyyy] [name of copyright owner]
.\"
.\" CDDL HEADER END
.\"
.\" Copyright (c) 2007, Sun Microsystems, Inc. All Rights Reserved.
.\" Copyright (c) 2012, 2018 by Delphix. All rights reserved.
.\" Copyright (c) 2012 Cyril Plisko. All Rights Reserved.
.\" Copyright (c) 2017 Datto Inc.
-.\" Copyright (c) 2018 George Melikov. All Rights Reserved.
+.\" Copyright (c) 2018, 2021 George Melikov. All Rights Reserved.
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
-.Dd May 27, 2021
+.Dd July 25, 2021
.Dt ZPOOL-SCRUB 8
.Os
.
.Sh NAME
.Nm zpool-scrub
.Nd begin or resume scrub of ZFS storage pools
.Sh SYNOPSIS
.Nm zpool
.Cm scrub
.Op Fl s Ns | Ns Fl p
.Op Fl w
.Ar pool Ns …
.
.Sh DESCRIPTION
Begins a scrub or resumes a paused scrub.
The scrub examines all data in the specified pools to verify that it checksums
correctly.
For replicated
.Pq mirror, raidz, or draid
devices, ZFS automatically repairs any damage discovered during the scrub.
The
.Nm zpool Cm status
command reports the progress of the scrub and summarizes the results of the
scrub upon completion.
.Pp
Scrubbing and resilvering are very similar operations.
The difference is that resilvering only examines data that ZFS knows to be out
of date
.Po
for example, when attaching a new device to a mirror or replacing an existing
device
.Pc ,
whereas scrubbing examines all data to discover silent errors due to hardware
faults or disk failure.
.Pp
Because scrubbing and resilvering are I/O-intensive operations, ZFS only allows
one at a time.
+.Pp
+A scrub is split into two parts: metadata scanning and block scrubbing.
+The metadata scanning sorts blocks into large sequential ranges which can then
+be read much more efficiently from disk when issuing the scrub I/O.
+.Pp
If a scrub is paused, the
.Nm zpool Cm scrub
resumes it.
If a resilver is in progress, ZFS does not allow a scrub to be started until the
resilver completes.
.Pp
Note that, due to changes in pool data on a live system, it is possible for
scrubs to progress slightly beyond 100% completion.
During this period, no completion time estimate will be provided.
.
.Sh OPTIONS
.Bl -tag -width "-s"
.It Fl s
Stop scrubbing.
.It Fl p
Pause scrubbing.
Scrub pause state and progress are periodically synced to disk.
If the system is restarted or pool is exported during a paused scrub,
even after import, scrub will remain paused until it is resumed.
Once resumed the scrub will pick up from the place where it was last
checkpointed to disk.
To resume a paused scrub issue
.Nm zpool Cm scrub
again.
.It Fl w
Wait until scrub has completed before returning.
.El
+.Sh EXAMPLES
+.Bl -tag -width "Exam"
+.It Sy Example 1 : Status of pool with ongoing scrub:
+Output:
+.Bd -literal -compact -offset Ds
+.No # Nm zpool Cm status
+ ...
+ scan: scrub in progress since Sun Jul 25 16:07:49 2021
+ 403M scanned at 100M/s, 68.4M issued at 10.0M/s, 405M total
+ 0B repaired, 16.91% done, 00:00:04 to go
+ ...
+.Ed
+Where:
+.Bl -dash -offset indent
+.It
+Metadata which references 403M of file data has been
+scanned at 100M/s, and 68.4M of that file data has been
+scrubbed sequentially at 10.0M/s.
+.El
+.El
.
.Sh SEE ALSO
.Xr zpool-iostat 8 ,
.Xr zpool-resilver 8 ,
.Xr zpool-status 8
diff --git a/sys/contrib/openzfs/module/icp/algs/skein/skein_impl.h b/sys/contrib/openzfs/module/icp/algs/skein/skein_impl.h
index 205a517d69db..2f6307fa7b55 100644
--- a/sys/contrib/openzfs/module/icp/algs/skein/skein_impl.h
+++ b/sys/contrib/openzfs/module/icp/algs/skein/skein_impl.h
@@ -1,292 +1,284 @@
/*
* Internal definitions for Skein hashing.
* Source code author: Doug Whiting, 2008.
* This algorithm and source code is released to the public domain.
*
* The following compile-time switches may be defined to control some
* tradeoffs between speed, code size, error checking, and security.
*
* The "default" note explains what happens when the switch is not defined.
*
* SKEIN_DEBUG -- make callouts from inside Skein code
* to examine/display intermediate values.
* [default: no callouts (no overhead)]
*
* SKEIN_ERR_CHECK -- how error checking is handled inside Skein
* code. If not defined, most error checking
* is disabled (for performance). Otherwise,
* the switch value is interpreted as:
* 0: use assert() to flag errors
* 1: return SKEIN_FAIL to flag errors
*/
/* Copyright 2013 Doug Whiting. This code is released to the public domain. */
#ifndef _SKEIN_IMPL_H_
#define _SKEIN_IMPL_H_
#include <sys/skein.h>
#include <sys/strings.h>
-#include <sys/note.h>
#include "skein_impl.h"
#include "skein_port.h"
/*
* "Internal" Skein definitions
* -- not needed for sequential hashing API, but will be
* helpful for other uses of Skein (e.g., tree hash mode).
* -- included here so that they can be shared between
* reference and optimized code.
*/
/* tweak word T[1]: bit field starting positions */
/* offset 64 because it's the second word */
#define SKEIN_T1_BIT(BIT) ((BIT) - 64)
/* bits 112..118: level in hash tree */
#define SKEIN_T1_POS_TREE_LVL SKEIN_T1_BIT(112)
/* bit 119: partial final input byte */
#define SKEIN_T1_POS_BIT_PAD SKEIN_T1_BIT(119)
/* bits 120..125: type field */
#define SKEIN_T1_POS_BLK_TYPE SKEIN_T1_BIT(120)
/* bits 126: first block flag */
#define SKEIN_T1_POS_FIRST SKEIN_T1_BIT(126)
/* bit 127: final block flag */
#define SKEIN_T1_POS_FINAL SKEIN_T1_BIT(127)
/* tweak word T[1]: flag bit definition(s) */
#define SKEIN_T1_FLAG_FIRST (((uint64_t)1) << SKEIN_T1_POS_FIRST)
#define SKEIN_T1_FLAG_FINAL (((uint64_t)1) << SKEIN_T1_POS_FINAL)
#define SKEIN_T1_FLAG_BIT_PAD (((uint64_t)1) << SKEIN_T1_POS_BIT_PAD)
/* tweak word T[1]: tree level bit field mask */
#define SKEIN_T1_TREE_LVL_MASK (((uint64_t)0x7F) << SKEIN_T1_POS_TREE_LVL)
#define SKEIN_T1_TREE_LEVEL(n) (((uint64_t)(n)) << SKEIN_T1_POS_TREE_LVL)
/* tweak word T[1]: block type field */
#define SKEIN_BLK_TYPE_KEY (0) /* key, for MAC and KDF */
#define SKEIN_BLK_TYPE_CFG (4) /* configuration block */
#define SKEIN_BLK_TYPE_PERS (8) /* personalization string */
#define SKEIN_BLK_TYPE_PK (12) /* public key (for signature hashing) */
#define SKEIN_BLK_TYPE_KDF (16) /* key identifier for KDF */
#define SKEIN_BLK_TYPE_NONCE (20) /* nonce for PRNG */
#define SKEIN_BLK_TYPE_MSG (48) /* message processing */
#define SKEIN_BLK_TYPE_OUT (63) /* output stage */
#define SKEIN_BLK_TYPE_MASK (63) /* bit field mask */
#define SKEIN_T1_BLK_TYPE(T) \
(((uint64_t)(SKEIN_BLK_TYPE_##T)) << SKEIN_T1_POS_BLK_TYPE)
/* key, for MAC and KDF */
#define SKEIN_T1_BLK_TYPE_KEY SKEIN_T1_BLK_TYPE(KEY)
/* configuration block */
#define SKEIN_T1_BLK_TYPE_CFG SKEIN_T1_BLK_TYPE(CFG)
/* personalization string */
#define SKEIN_T1_BLK_TYPE_PERS SKEIN_T1_BLK_TYPE(PERS)
/* public key (for digital signature hashing) */
#define SKEIN_T1_BLK_TYPE_PK SKEIN_T1_BLK_TYPE(PK)
/* key identifier for KDF */
#define SKEIN_T1_BLK_TYPE_KDF SKEIN_T1_BLK_TYPE(KDF)
/* nonce for PRNG */
#define SKEIN_T1_BLK_TYPE_NONCE SKEIN_T1_BLK_TYPE(NONCE)
/* message processing */
#define SKEIN_T1_BLK_TYPE_MSG SKEIN_T1_BLK_TYPE(MSG)
/* output stage */
#define SKEIN_T1_BLK_TYPE_OUT SKEIN_T1_BLK_TYPE(OUT)
/* field bit mask */
#define SKEIN_T1_BLK_TYPE_MASK SKEIN_T1_BLK_TYPE(MASK)
#define SKEIN_T1_BLK_TYPE_CFG_FINAL \
(SKEIN_T1_BLK_TYPE_CFG | SKEIN_T1_FLAG_FINAL)
#define SKEIN_T1_BLK_TYPE_OUT_FINAL \
(SKEIN_T1_BLK_TYPE_OUT | SKEIN_T1_FLAG_FINAL)
#define SKEIN_VERSION (1)
#ifndef SKEIN_ID_STRING_LE /* allow compile-time personalization */
#define SKEIN_ID_STRING_LE (0x33414853) /* "SHA3" (little-endian) */
#endif
#define SKEIN_MK_64(hi32, lo32) ((lo32) + (((uint64_t)(hi32)) << 32))
#define SKEIN_SCHEMA_VER SKEIN_MK_64(SKEIN_VERSION, SKEIN_ID_STRING_LE)
#define SKEIN_KS_PARITY SKEIN_MK_64(0x1BD11BDA, 0xA9FC1A22)
#define SKEIN_CFG_STR_LEN (4*8)
/* bit field definitions in config block treeInfo word */
#define SKEIN_CFG_TREE_LEAF_SIZE_POS (0)
#define SKEIN_CFG_TREE_NODE_SIZE_POS (8)
#define SKEIN_CFG_TREE_MAX_LEVEL_POS (16)
#define SKEIN_CFG_TREE_LEAF_SIZE_MSK \
(((uint64_t)0xFF) << SKEIN_CFG_TREE_LEAF_SIZE_POS)
#define SKEIN_CFG_TREE_NODE_SIZE_MSK \
(((uint64_t)0xFF) << SKEIN_CFG_TREE_NODE_SIZE_POS)
#define SKEIN_CFG_TREE_MAX_LEVEL_MSK \
(((uint64_t)0xFF) << SKEIN_CFG_TREE_MAX_LEVEL_POS)
#define SKEIN_CFG_TREE_INFO(leaf, node, maxLvl) \
((((uint64_t)(leaf)) << SKEIN_CFG_TREE_LEAF_SIZE_POS) | \
(((uint64_t)(node)) << SKEIN_CFG_TREE_NODE_SIZE_POS) | \
(((uint64_t)(maxLvl)) << SKEIN_CFG_TREE_MAX_LEVEL_POS))
/* use as treeInfo in InitExt() call for sequential processing */
#define SKEIN_CFG_TREE_INFO_SEQUENTIAL SKEIN_CFG_TREE_INFO(0, 0, 0)
/*
* Skein macros for getting/setting tweak words, etc.
* These are useful for partial input bytes, hash tree init/update, etc.
*/
#define Skein_Get_Tweak(ctxPtr, TWK_NUM) ((ctxPtr)->h.T[TWK_NUM])
#define Skein_Set_Tweak(ctxPtr, TWK_NUM, tVal) \
do { \
(ctxPtr)->h.T[TWK_NUM] = (tVal); \
- _NOTE(CONSTCOND) \
} while (0)
#define Skein_Get_T0(ctxPtr) Skein_Get_Tweak(ctxPtr, 0)
#define Skein_Get_T1(ctxPtr) Skein_Get_Tweak(ctxPtr, 1)
#define Skein_Set_T0(ctxPtr, T0) Skein_Set_Tweak(ctxPtr, 0, T0)
#define Skein_Set_T1(ctxPtr, T1) Skein_Set_Tweak(ctxPtr, 1, T1)
/* set both tweak words at once */
#define Skein_Set_T0_T1(ctxPtr, T0, T1) \
do { \
Skein_Set_T0(ctxPtr, (T0)); \
Skein_Set_T1(ctxPtr, (T1)); \
- _NOTE(CONSTCOND) \
} while (0)
#define Skein_Set_Type(ctxPtr, BLK_TYPE) \
Skein_Set_T1(ctxPtr, SKEIN_T1_BLK_TYPE_##BLK_TYPE)
/*
* set up for starting with a new type: h.T[0]=0; h.T[1] = NEW_TYPE; h.bCnt=0;
*/
#define Skein_Start_New_Type(ctxPtr, BLK_TYPE) \
do { \
Skein_Set_T0_T1(ctxPtr, 0, SKEIN_T1_FLAG_FIRST | \
SKEIN_T1_BLK_TYPE_ ## BLK_TYPE); \
(ctxPtr)->h.bCnt = 0; \
- _NOTE(CONSTCOND) \
} while (0)
#define Skein_Clear_First_Flag(hdr) \
do { \
(hdr).T[1] &= ~SKEIN_T1_FLAG_FIRST; \
- _NOTE(CONSTCOND) \
} while (0)
#define Skein_Set_Bit_Pad_Flag(hdr) \
do { \
(hdr).T[1] |= SKEIN_T1_FLAG_BIT_PAD; \
- _NOTE(CONSTCOND) \
} while (0)
#define Skein_Set_Tree_Level(hdr, height) \
do { \
(hdr).T[1] |= SKEIN_T1_TREE_LEVEL(height); \
- _NOTE(CONSTCOND) \
} while (0)
/*
* "Internal" Skein definitions for debugging and error checking
* Note: in Illumos we always disable debugging features.
*/
#define Skein_Show_Block(bits, ctx, X, blkPtr, wPtr, ksEvenPtr, ksOddPtr)
#define Skein_Show_Round(bits, ctx, r, X)
#define Skein_Show_R_Ptr(bits, ctx, r, X_ptr)
#define Skein_Show_Final(bits, ctx, cnt, outPtr)
#define Skein_Show_Key(bits, ctx, key, keyBytes)
/* run-time checks (e.g., bad params, uninitialized context)? */
#ifndef SKEIN_ERR_CHECK
/* default: ignore all Asserts, for performance */
#define Skein_Assert(x, retCode)
#define Skein_assert(x)
#elif defined(SKEIN_ASSERT)
#include <sys/debug.h>
#define Skein_Assert(x, retCode) ASSERT(x)
#define Skein_assert(x) ASSERT(x)
#else
#include <sys/debug.h>
/* caller error */
#define Skein_Assert(x, retCode) \
do { \
if (!(x)) \
return (retCode); \
- _NOTE(CONSTCOND) \
} while (0)
/* internal error */
#define Skein_assert(x) ASSERT(x)
#endif
/*
* Skein block function constants (shared across Ref and Opt code)
*/
enum {
/* Skein_256 round rotation constants */
R_256_0_0 = 14, R_256_0_1 = 16,
R_256_1_0 = 52, R_256_1_1 = 57,
R_256_2_0 = 23, R_256_2_1 = 40,
R_256_3_0 = 5, R_256_3_1 = 37,
R_256_4_0 = 25, R_256_4_1 = 33,
R_256_5_0 = 46, R_256_5_1 = 12,
R_256_6_0 = 58, R_256_6_1 = 22,
R_256_7_0 = 32, R_256_7_1 = 32,
/* Skein_512 round rotation constants */
R_512_0_0 = 46, R_512_0_1 = 36, R_512_0_2 = 19, R_512_0_3 = 37,
R_512_1_0 = 33, R_512_1_1 = 27, R_512_1_2 = 14, R_512_1_3 = 42,
R_512_2_0 = 17, R_512_2_1 = 49, R_512_2_2 = 36, R_512_2_3 = 39,
R_512_3_0 = 44, R_512_3_1 = 9, R_512_3_2 = 54, R_512_3_3 = 56,
R_512_4_0 = 39, R_512_4_1 = 30, R_512_4_2 = 34, R_512_4_3 = 24,
R_512_5_0 = 13, R_512_5_1 = 50, R_512_5_2 = 10, R_512_5_3 = 17,
R_512_6_0 = 25, R_512_6_1 = 29, R_512_6_2 = 39, R_512_6_3 = 43,
R_512_7_0 = 8, R_512_7_1 = 35, R_512_7_2 = 56, R_512_7_3 = 22,
/* Skein1024 round rotation constants */
R1024_0_0 = 24, R1024_0_1 = 13, R1024_0_2 = 8, R1024_0_3 =
47, R1024_0_4 = 8, R1024_0_5 = 17, R1024_0_6 = 22, R1024_0_7 = 37,
R1024_1_0 = 38, R1024_1_1 = 19, R1024_1_2 = 10, R1024_1_3 =
55, R1024_1_4 = 49, R1024_1_5 = 18, R1024_1_6 = 23, R1024_1_7 = 52,
R1024_2_0 = 33, R1024_2_1 = 4, R1024_2_2 = 51, R1024_2_3 =
13, R1024_2_4 = 34, R1024_2_5 = 41, R1024_2_6 = 59, R1024_2_7 = 17,
R1024_3_0 = 5, R1024_3_1 = 20, R1024_3_2 = 48, R1024_3_3 =
41, R1024_3_4 = 47, R1024_3_5 = 28, R1024_3_6 = 16, R1024_3_7 = 25,
R1024_4_0 = 41, R1024_4_1 = 9, R1024_4_2 = 37, R1024_4_3 =
31, R1024_4_4 = 12, R1024_4_5 = 47, R1024_4_6 = 44, R1024_4_7 = 30,
R1024_5_0 = 16, R1024_5_1 = 34, R1024_5_2 = 56, R1024_5_3 =
51, R1024_5_4 = 4, R1024_5_5 = 53, R1024_5_6 = 42, R1024_5_7 = 41,
R1024_6_0 = 31, R1024_6_1 = 44, R1024_6_2 = 47, R1024_6_3 =
46, R1024_6_4 = 19, R1024_6_5 = 42, R1024_6_6 = 44, R1024_6_7 = 25,
R1024_7_0 = 9, R1024_7_1 = 48, R1024_7_2 = 35, R1024_7_3 =
52, R1024_7_4 = 23, R1024_7_5 = 31, R1024_7_6 = 37, R1024_7_7 = 20
};
/* number of rounds for the different block sizes */
#define SKEIN_256_ROUNDS_TOTAL (72)
#define SKEIN_512_ROUNDS_TOTAL (72)
#define SKEIN1024_ROUNDS_TOTAL (80)
extern const uint64_t SKEIN_256_IV_128[];
extern const uint64_t SKEIN_256_IV_160[];
extern const uint64_t SKEIN_256_IV_224[];
extern const uint64_t SKEIN_256_IV_256[];
extern const uint64_t SKEIN_512_IV_128[];
extern const uint64_t SKEIN_512_IV_160[];
extern const uint64_t SKEIN_512_IV_224[];
extern const uint64_t SKEIN_512_IV_256[];
extern const uint64_t SKEIN_512_IV_384[];
extern const uint64_t SKEIN_512_IV_512[];
extern const uint64_t SKEIN1024_IV_384[];
extern const uint64_t SKEIN1024_IV_512[];
extern const uint64_t SKEIN1024_IV_1024[];
/* Functions to process blkCnt (nonzero) full block(s) of data. */
void Skein_256_Process_Block(Skein_256_Ctxt_t *ctx, const uint8_t *blkPtr,
size_t blkCnt, size_t byteCntAdd);
void Skein_512_Process_Block(Skein_512_Ctxt_t *ctx, const uint8_t *blkPtr,
size_t blkCnt, size_t byteCntAdd);
void Skein1024_Process_Block(Skein1024_Ctxt_t *ctx, const uint8_t *blkPtr,
size_t blkCnt, size_t byteCntAdd);
#endif /* _SKEIN_IMPL_H_ */
diff --git a/sys/contrib/openzfs/module/icp/core/kcf_prov_tabs.c b/sys/contrib/openzfs/module/icp/core/kcf_prov_tabs.c
index 94e6937bcd76..139b6920e1ec 100644
--- a/sys/contrib/openzfs/module/icp/core/kcf_prov_tabs.c
+++ b/sys/contrib/openzfs/module/icp/core/kcf_prov_tabs.c
@@ -1,645 +1,645 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* This file is part of the core Kernel Cryptographic Framework.
* It implements the management of tables of Providers. Entries to
* added and removed when cryptographic providers register with
* and unregister from the framework, respectively. The KCF scheduler
* and ioctl pseudo driver call this function to obtain the list
* of available providers.
*
* The provider table is indexed by crypto_provider_id_t. Each
* element of the table contains a pointer to a provider descriptor,
* or NULL if the entry is free.
*
* This file also implements helper functions to allocate and free
* provider descriptors.
*/
#include <sys/zfs_context.h>
#include <sys/crypto/common.h>
#include <sys/crypto/impl.h>
#include <sys/crypto/sched_impl.h>
#include <sys/crypto/spi.h>
#define KCF_MAX_PROVIDERS 512 /* max number of providers */
/*
* Prov_tab is an array of providers which is updated when
* a crypto provider registers with kcf. The provider calls the
* SPI routine, crypto_register_provider(), which in turn calls
* kcf_prov_tab_add_provider().
*
* A provider unregisters by calling crypto_unregister_provider()
* which triggers the removal of the prov_tab entry.
* It also calls kcf_remove_mech_provider().
*
* prov_tab entries are not updated from kcf.conf or by cryptoadm(1M).
*/
static kcf_provider_desc_t **prov_tab = NULL;
static kmutex_t prov_tab_mutex; /* ensure exclusive access to the table */
static uint_t prov_tab_num = 0; /* number of providers in table */
static uint_t prov_tab_max = KCF_MAX_PROVIDERS;
void
kcf_prov_tab_destroy(void)
{
mutex_destroy(&prov_tab_mutex);
if (prov_tab)
kmem_free(prov_tab, prov_tab_max *
sizeof (kcf_provider_desc_t *));
}
/*
* Initialize a mutex and the KCF providers table, prov_tab.
* The providers table is dynamically allocated with prov_tab_max entries.
* Called from kcf module _init().
*/
void
kcf_prov_tab_init(void)
{
mutex_init(&prov_tab_mutex, NULL, MUTEX_DEFAULT, NULL);
prov_tab = kmem_zalloc(prov_tab_max * sizeof (kcf_provider_desc_t *),
KM_SLEEP);
}
/*
* Add a provider to the provider table. If no free entry can be found
* for the new provider, returns CRYPTO_HOST_MEMORY. Otherwise, add
* the provider to the table, initialize the pd_prov_id field
* of the specified provider descriptor to the index in that table,
* and return CRYPTO_SUCCESS. Note that a REFHOLD is done on the
* provider when pointed to by a table entry.
*/
int
kcf_prov_tab_add_provider(kcf_provider_desc_t *prov_desc)
{
uint_t i;
ASSERT(prov_tab != NULL);
mutex_enter(&prov_tab_mutex);
/* find free slot in providers table */
for (i = 1; i < KCF_MAX_PROVIDERS && prov_tab[i] != NULL; i++)
;
if (i == KCF_MAX_PROVIDERS) {
/* ran out of providers entries */
mutex_exit(&prov_tab_mutex);
cmn_err(CE_WARN, "out of providers entries");
return (CRYPTO_HOST_MEMORY);
}
/* initialize entry */
prov_tab[i] = prov_desc;
KCF_PROV_REFHOLD(prov_desc);
KCF_PROV_IREFHOLD(prov_desc);
prov_tab_num++;
mutex_exit(&prov_tab_mutex);
/* update provider descriptor */
prov_desc->pd_prov_id = i;
/*
* The KCF-private provider handle is defined as the internal
* provider id.
*/
prov_desc->pd_kcf_prov_handle =
(crypto_kcf_provider_handle_t)prov_desc->pd_prov_id;
return (CRYPTO_SUCCESS);
}
/*
* Remove the provider specified by its id. A REFRELE is done on the
* corresponding provider descriptor before this function returns.
* Returns CRYPTO_UNKNOWN_PROVIDER if the provider id is not valid.
*/
int
kcf_prov_tab_rem_provider(crypto_provider_id_t prov_id)
{
kcf_provider_desc_t *prov_desc;
ASSERT(prov_tab != NULL);
ASSERT(prov_tab_num >= 0);
/*
* Validate provider id, since it can be specified by a 3rd-party
* provider.
*/
mutex_enter(&prov_tab_mutex);
if (prov_id >= KCF_MAX_PROVIDERS ||
((prov_desc = prov_tab[prov_id]) == NULL)) {
mutex_exit(&prov_tab_mutex);
return (CRYPTO_INVALID_PROVIDER_ID);
}
mutex_exit(&prov_tab_mutex);
/*
* The provider id must remain valid until the associated provider
* descriptor is freed. For this reason, we simply release our
* reference to the descriptor here. When the reference count
* reaches zero, kcf_free_provider_desc() will be invoked and
* the associated entry in the providers table will be released
* at that time.
*/
KCF_PROV_REFRELE(prov_desc);
KCF_PROV_IREFRELE(prov_desc);
return (CRYPTO_SUCCESS);
}
/*
* Returns the provider descriptor corresponding to the specified
* provider id. A REFHOLD is done on the descriptor before it is
* returned to the caller. It is the responsibility of the caller
* to do a REFRELE once it is done with the provider descriptor.
*/
kcf_provider_desc_t *
kcf_prov_tab_lookup(crypto_provider_id_t prov_id)
{
kcf_provider_desc_t *prov_desc;
mutex_enter(&prov_tab_mutex);
prov_desc = prov_tab[prov_id];
if (prov_desc == NULL) {
mutex_exit(&prov_tab_mutex);
return (NULL);
}
KCF_PROV_REFHOLD(prov_desc);
mutex_exit(&prov_tab_mutex);
return (prov_desc);
}
static void
allocate_ops_v1(crypto_ops_t *src, crypto_ops_t *dst, uint_t *mech_list_count)
{
if (src->co_control_ops != NULL)
dst->co_control_ops = kmem_alloc(sizeof (crypto_control_ops_t),
KM_SLEEP);
if (src->co_digest_ops != NULL)
dst->co_digest_ops = kmem_alloc(sizeof (crypto_digest_ops_t),
KM_SLEEP);
if (src->co_cipher_ops != NULL)
dst->co_cipher_ops = kmem_alloc(sizeof (crypto_cipher_ops_t),
KM_SLEEP);
if (src->co_mac_ops != NULL)
dst->co_mac_ops = kmem_alloc(sizeof (crypto_mac_ops_t),
KM_SLEEP);
if (src->co_sign_ops != NULL)
dst->co_sign_ops = kmem_alloc(sizeof (crypto_sign_ops_t),
KM_SLEEP);
if (src->co_verify_ops != NULL)
dst->co_verify_ops = kmem_alloc(sizeof (crypto_verify_ops_t),
KM_SLEEP);
if (src->co_dual_ops != NULL)
dst->co_dual_ops = kmem_alloc(sizeof (crypto_dual_ops_t),
KM_SLEEP);
if (src->co_dual_cipher_mac_ops != NULL)
dst->co_dual_cipher_mac_ops = kmem_alloc(
sizeof (crypto_dual_cipher_mac_ops_t), KM_SLEEP);
if (src->co_random_ops != NULL) {
dst->co_random_ops = kmem_alloc(
sizeof (crypto_random_number_ops_t), KM_SLEEP);
/*
* Allocate storage to store the array of supported mechanisms
* specified by provider. We allocate extra mechanism storage
* if the provider has random_ops since we keep an internal
* mechanism, SUN_RANDOM, in this case.
*/
(*mech_list_count)++;
}
if (src->co_session_ops != NULL)
dst->co_session_ops = kmem_alloc(sizeof (crypto_session_ops_t),
KM_SLEEP);
if (src->co_object_ops != NULL)
dst->co_object_ops = kmem_alloc(sizeof (crypto_object_ops_t),
KM_SLEEP);
if (src->co_key_ops != NULL)
dst->co_key_ops = kmem_alloc(sizeof (crypto_key_ops_t),
KM_SLEEP);
if (src->co_provider_ops != NULL)
dst->co_provider_ops = kmem_alloc(
sizeof (crypto_provider_management_ops_t), KM_SLEEP);
if (src->co_ctx_ops != NULL)
dst->co_ctx_ops = kmem_alloc(sizeof (crypto_ctx_ops_t),
KM_SLEEP);
}
static void
allocate_ops_v2(crypto_ops_t *src, crypto_ops_t *dst)
{
if (src->co_mech_ops != NULL)
dst->co_mech_ops = kmem_alloc(sizeof (crypto_mech_ops_t),
KM_SLEEP);
}
static void
allocate_ops_v3(crypto_ops_t *src, crypto_ops_t *dst)
{
if (src->co_nostore_key_ops != NULL)
dst->co_nostore_key_ops =
kmem_alloc(sizeof (crypto_nostore_key_ops_t), KM_SLEEP);
}
/*
* Allocate a provider descriptor. mech_list_count specifies the
* number of mechanisms supported by the providers, and is used
* to allocate storage for the mechanism table.
* This function may sleep while allocating memory, which is OK
* since it is invoked from user context during provider registration.
*/
kcf_provider_desc_t *
kcf_alloc_provider_desc(crypto_provider_info_t *info)
{
int i, j;
kcf_provider_desc_t *desc;
uint_t mech_list_count = info->pi_mech_list_count;
crypto_ops_t *src_ops = info->pi_ops_vector;
desc = kmem_zalloc(sizeof (kcf_provider_desc_t), KM_SLEEP);
/*
* pd_description serves two purposes
* - Appears as a blank padded PKCS#11 style string, that will be
* returned to applications in CK_SLOT_INFO.slotDescription.
* This means that we should not have a null character in the
* first CRYPTO_PROVIDER_DESCR_MAX_LEN bytes.
* - Appears as a null-terminated string that can be used by
* other kcf routines.
*
* So, we allocate enough room for one extra null terminator
* which keeps every one happy.
*/
desc->pd_description = kmem_alloc(CRYPTO_PROVIDER_DESCR_MAX_LEN + 1,
KM_SLEEP);
(void) memset(desc->pd_description, ' ',
CRYPTO_PROVIDER_DESCR_MAX_LEN);
desc->pd_description[CRYPTO_PROVIDER_DESCR_MAX_LEN] = '\0';
/*
* Since the framework does not require the ops vector specified
* by the providers during registration to be persistent,
* KCF needs to allocate storage where copies of the ops
* vectors are copied.
*/
desc->pd_ops_vector = kmem_zalloc(sizeof (crypto_ops_t), KM_SLEEP);
if (info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER) {
allocate_ops_v1(src_ops, desc->pd_ops_vector, &mech_list_count);
if (info->pi_interface_version >= CRYPTO_SPI_VERSION_2)
allocate_ops_v2(src_ops, desc->pd_ops_vector);
if (info->pi_interface_version == CRYPTO_SPI_VERSION_3)
allocate_ops_v3(src_ops, desc->pd_ops_vector);
}
desc->pd_mech_list_count = mech_list_count;
desc->pd_mechanisms = kmem_zalloc(sizeof (crypto_mech_info_t) *
mech_list_count, KM_SLEEP);
for (i = 0; i < KCF_OPS_CLASSSIZE; i++)
for (j = 0; j < KCF_MAXMECHTAB; j++)
desc->pd_mech_indx[i][j] = KCF_INVALID_INDX;
desc->pd_prov_id = KCF_PROVID_INVALID;
desc->pd_state = KCF_PROV_ALLOCATED;
mutex_init(&desc->pd_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&desc->pd_resume_cv, NULL, CV_DEFAULT, NULL);
cv_init(&desc->pd_remove_cv, NULL, CV_DEFAULT, NULL);
return (desc);
}
/*
* Called by KCF_PROV_REFRELE when a provider's reference count drops
* to zero. We free the descriptor when the last reference is released.
* However, for software providers, we do not free it when there is an
* unregister thread waiting. We signal that thread in this case and
* that thread is responsible for freeing the descriptor.
*/
void
kcf_provider_zero_refcnt(kcf_provider_desc_t *desc)
{
mutex_enter(&desc->pd_lock);
switch (desc->pd_prov_type) {
case CRYPTO_SW_PROVIDER:
if (desc->pd_state == KCF_PROV_REMOVED ||
desc->pd_state == KCF_PROV_DISABLED) {
desc->pd_state = KCF_PROV_FREED;
cv_broadcast(&desc->pd_remove_cv);
mutex_exit(&desc->pd_lock);
break;
}
- /* FALLTHRU */
+ /* FALLTHROUGH */
case CRYPTO_HW_PROVIDER:
case CRYPTO_LOGICAL_PROVIDER:
mutex_exit(&desc->pd_lock);
kcf_free_provider_desc(desc);
}
}
/*
* Free a provider descriptor.
*/
void
kcf_free_provider_desc(kcf_provider_desc_t *desc)
{
if (desc == NULL)
return;
mutex_enter(&prov_tab_mutex);
if (desc->pd_prov_id != KCF_PROVID_INVALID) {
/* release the associated providers table entry */
ASSERT(prov_tab[desc->pd_prov_id] != NULL);
prov_tab[desc->pd_prov_id] = NULL;
prov_tab_num--;
}
mutex_exit(&prov_tab_mutex);
/* free the kernel memory associated with the provider descriptor */
if (desc->pd_description != NULL)
kmem_free(desc->pd_description,
CRYPTO_PROVIDER_DESCR_MAX_LEN + 1);
if (desc->pd_ops_vector != NULL) {
if (desc->pd_ops_vector->co_control_ops != NULL)
kmem_free(desc->pd_ops_vector->co_control_ops,
sizeof (crypto_control_ops_t));
if (desc->pd_ops_vector->co_digest_ops != NULL)
kmem_free(desc->pd_ops_vector->co_digest_ops,
sizeof (crypto_digest_ops_t));
if (desc->pd_ops_vector->co_cipher_ops != NULL)
kmem_free(desc->pd_ops_vector->co_cipher_ops,
sizeof (crypto_cipher_ops_t));
if (desc->pd_ops_vector->co_mac_ops != NULL)
kmem_free(desc->pd_ops_vector->co_mac_ops,
sizeof (crypto_mac_ops_t));
if (desc->pd_ops_vector->co_sign_ops != NULL)
kmem_free(desc->pd_ops_vector->co_sign_ops,
sizeof (crypto_sign_ops_t));
if (desc->pd_ops_vector->co_verify_ops != NULL)
kmem_free(desc->pd_ops_vector->co_verify_ops,
sizeof (crypto_verify_ops_t));
if (desc->pd_ops_vector->co_dual_ops != NULL)
kmem_free(desc->pd_ops_vector->co_dual_ops,
sizeof (crypto_dual_ops_t));
if (desc->pd_ops_vector->co_dual_cipher_mac_ops != NULL)
kmem_free(desc->pd_ops_vector->co_dual_cipher_mac_ops,
sizeof (crypto_dual_cipher_mac_ops_t));
if (desc->pd_ops_vector->co_random_ops != NULL)
kmem_free(desc->pd_ops_vector->co_random_ops,
sizeof (crypto_random_number_ops_t));
if (desc->pd_ops_vector->co_session_ops != NULL)
kmem_free(desc->pd_ops_vector->co_session_ops,
sizeof (crypto_session_ops_t));
if (desc->pd_ops_vector->co_object_ops != NULL)
kmem_free(desc->pd_ops_vector->co_object_ops,
sizeof (crypto_object_ops_t));
if (desc->pd_ops_vector->co_key_ops != NULL)
kmem_free(desc->pd_ops_vector->co_key_ops,
sizeof (crypto_key_ops_t));
if (desc->pd_ops_vector->co_provider_ops != NULL)
kmem_free(desc->pd_ops_vector->co_provider_ops,
sizeof (crypto_provider_management_ops_t));
if (desc->pd_ops_vector->co_ctx_ops != NULL)
kmem_free(desc->pd_ops_vector->co_ctx_ops,
sizeof (crypto_ctx_ops_t));
if (desc->pd_ops_vector->co_mech_ops != NULL)
kmem_free(desc->pd_ops_vector->co_mech_ops,
sizeof (crypto_mech_ops_t));
if (desc->pd_ops_vector->co_nostore_key_ops != NULL)
kmem_free(desc->pd_ops_vector->co_nostore_key_ops,
sizeof (crypto_nostore_key_ops_t));
kmem_free(desc->pd_ops_vector, sizeof (crypto_ops_t));
}
if (desc->pd_mechanisms != NULL)
/* free the memory associated with the mechanism info's */
kmem_free(desc->pd_mechanisms, sizeof (crypto_mech_info_t) *
desc->pd_mech_list_count);
if (desc->pd_sched_info.ks_taskq != NULL)
taskq_destroy(desc->pd_sched_info.ks_taskq);
mutex_destroy(&desc->pd_lock);
cv_destroy(&desc->pd_resume_cv);
cv_destroy(&desc->pd_remove_cv);
kmem_free(desc, sizeof (kcf_provider_desc_t));
}
/*
* Returns an array of hardware and logical provider descriptors,
* a.k.a the PKCS#11 slot list. A REFHOLD is done on each descriptor
* before the array is returned. The entire table can be freed by
* calling kcf_free_provider_tab().
*/
int
kcf_get_slot_list(uint_t *count, kcf_provider_desc_t ***array,
boolean_t unverified)
{
kcf_provider_desc_t *prov_desc;
kcf_provider_desc_t **p = NULL;
char *last;
uint_t cnt = 0;
uint_t i, j;
int rval = CRYPTO_SUCCESS;
size_t n, final_size;
/* count the providers */
mutex_enter(&prov_tab_mutex);
for (i = 0; i < KCF_MAX_PROVIDERS; i++) {
if ((prov_desc = prov_tab[i]) != NULL &&
((prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER &&
(prov_desc->pd_flags & CRYPTO_HIDE_PROVIDER) == 0) ||
prov_desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)) {
if (KCF_IS_PROV_USABLE(prov_desc) ||
(unverified && KCF_IS_PROV_UNVERIFIED(prov_desc))) {
cnt++;
}
}
}
mutex_exit(&prov_tab_mutex);
if (cnt == 0)
goto out;
n = cnt * sizeof (kcf_provider_desc_t *);
again:
p = kmem_zalloc(n, KM_SLEEP);
/* pointer to last entry in the array */
last = (char *)&p[cnt-1];
mutex_enter(&prov_tab_mutex);
/* fill the slot list */
for (i = 0, j = 0; i < KCF_MAX_PROVIDERS; i++) {
if ((prov_desc = prov_tab[i]) != NULL &&
((prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER &&
(prov_desc->pd_flags & CRYPTO_HIDE_PROVIDER) == 0) ||
prov_desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)) {
if (KCF_IS_PROV_USABLE(prov_desc) ||
(unverified && KCF_IS_PROV_UNVERIFIED(prov_desc))) {
if ((char *)&p[j] > last) {
mutex_exit(&prov_tab_mutex);
kcf_free_provider_tab(cnt, p);
n = n << 1;
cnt = cnt << 1;
goto again;
}
p[j++] = prov_desc;
KCF_PROV_REFHOLD(prov_desc);
}
}
}
mutex_exit(&prov_tab_mutex);
final_size = j * sizeof (kcf_provider_desc_t *);
cnt = j;
ASSERT(final_size <= n);
/* check if buffer we allocated is too large */
if (final_size < n) {
char *final_buffer = NULL;
if (final_size > 0) {
final_buffer = kmem_alloc(final_size, KM_SLEEP);
bcopy(p, final_buffer, final_size);
}
kmem_free(p, n);
p = (kcf_provider_desc_t **)final_buffer;
}
out:
*count = cnt;
*array = p;
return (rval);
}
/*
* Free an array of hardware provider descriptors. A REFRELE
* is done on each descriptor before the table is freed.
*/
void
kcf_free_provider_tab(uint_t count, kcf_provider_desc_t **array)
{
kcf_provider_desc_t *prov_desc;
int i;
for (i = 0; i < count; i++) {
if ((prov_desc = array[i]) != NULL) {
KCF_PROV_REFRELE(prov_desc);
}
}
kmem_free(array, count * sizeof (kcf_provider_desc_t *));
}
/*
* Returns in the location pointed to by pd a pointer to the descriptor
* for the software provider for the specified mechanism.
* The provider descriptor is returned held and it is the caller's
* responsibility to release it when done. The mechanism entry
* is returned if the optional argument mep is non NULL.
*
* Returns one of the CRYPTO_ * error codes on failure, and
* CRYPTO_SUCCESS on success.
*/
int
kcf_get_sw_prov(crypto_mech_type_t mech_type, kcf_provider_desc_t **pd,
kcf_mech_entry_t **mep, boolean_t log_warn)
{
kcf_mech_entry_t *me;
/* get the mechanism entry for this mechanism */
if (kcf_get_mech_entry(mech_type, &me) != KCF_SUCCESS)
return (CRYPTO_MECHANISM_INVALID);
/*
* Get the software provider for this mechanism.
* Lock the mech_entry until we grab the 'pd'.
*/
mutex_enter(&me->me_mutex);
if (me->me_sw_prov == NULL ||
(*pd = me->me_sw_prov->pm_prov_desc) == NULL) {
/* no SW provider for this mechanism */
if (log_warn)
cmn_err(CE_WARN, "no SW provider for \"%s\"\n",
me->me_name);
mutex_exit(&me->me_mutex);
return (CRYPTO_MECH_NOT_SUPPORTED);
}
KCF_PROV_REFHOLD(*pd);
mutex_exit(&me->me_mutex);
if (mep != NULL)
*mep = me;
return (CRYPTO_SUCCESS);
}
diff --git a/sys/contrib/openzfs/module/icp/io/aes.c b/sys/contrib/openzfs/module/icp/io/aes.c
index e540af4473f7..f77583360235 100644
--- a/sys/contrib/openzfs/module/icp/io/aes.c
+++ b/sys/contrib/openzfs/module/icp/io/aes.c
@@ -1,1457 +1,1457 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
*/
/*
* AES provider for the Kernel Cryptographic Framework (KCF)
*/
#include <sys/zfs_context.h>
#include <sys/crypto/common.h>
#include <sys/crypto/impl.h>
#include <sys/crypto/spi.h>
#include <sys/crypto/icp.h>
#include <modes/modes.h>
#include <sys/modctl.h>
#define _AES_IMPL
#include <aes/aes_impl.h>
#include <modes/gcm_impl.h>
#define CRYPTO_PROVIDER_NAME "aes"
extern struct mod_ops mod_cryptoops;
/*
* Module linkage information for the kernel.
*/
static struct modlcrypto modlcrypto = {
&mod_cryptoops,
"AES Kernel SW Provider"
};
static struct modlinkage modlinkage = {
MODREV_1, { (void *)&modlcrypto, NULL }
};
/*
* Mechanism info structure passed to KCF during registration.
*/
static crypto_mech_info_t aes_mech_info_tab[] = {
/* AES_ECB */
{SUN_CKM_AES_ECB, AES_ECB_MECH_INFO_TYPE,
CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC |
CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC,
AES_MIN_KEY_BYTES, AES_MAX_KEY_BYTES, CRYPTO_KEYSIZE_UNIT_IN_BYTES},
/* AES_CBC */
{SUN_CKM_AES_CBC, AES_CBC_MECH_INFO_TYPE,
CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC |
CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC,
AES_MIN_KEY_BYTES, AES_MAX_KEY_BYTES, CRYPTO_KEYSIZE_UNIT_IN_BYTES},
/* AES_CTR */
{SUN_CKM_AES_CTR, AES_CTR_MECH_INFO_TYPE,
CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC |
CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC,
AES_MIN_KEY_BYTES, AES_MAX_KEY_BYTES, CRYPTO_KEYSIZE_UNIT_IN_BYTES},
/* AES_CCM */
{SUN_CKM_AES_CCM, AES_CCM_MECH_INFO_TYPE,
CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC |
CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC,
AES_MIN_KEY_BYTES, AES_MAX_KEY_BYTES, CRYPTO_KEYSIZE_UNIT_IN_BYTES},
/* AES_GCM */
{SUN_CKM_AES_GCM, AES_GCM_MECH_INFO_TYPE,
CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC |
CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC,
AES_MIN_KEY_BYTES, AES_MAX_KEY_BYTES, CRYPTO_KEYSIZE_UNIT_IN_BYTES},
/* AES_GMAC */
{SUN_CKM_AES_GMAC, AES_GMAC_MECH_INFO_TYPE,
CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC |
CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC |
CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC |
CRYPTO_FG_SIGN | CRYPTO_FG_SIGN_ATOMIC |
CRYPTO_FG_VERIFY | CRYPTO_FG_VERIFY_ATOMIC,
AES_MIN_KEY_BYTES, AES_MAX_KEY_BYTES, CRYPTO_KEYSIZE_UNIT_IN_BYTES}
};
static void aes_provider_status(crypto_provider_handle_t, uint_t *);
static crypto_control_ops_t aes_control_ops = {
aes_provider_status
};
static int aes_encrypt_init(crypto_ctx_t *, crypto_mechanism_t *,
crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
static int aes_decrypt_init(crypto_ctx_t *, crypto_mechanism_t *,
crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
static int aes_common_init(crypto_ctx_t *, crypto_mechanism_t *,
crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t, boolean_t);
static int aes_common_init_ctx(aes_ctx_t *, crypto_spi_ctx_template_t *,
crypto_mechanism_t *, crypto_key_t *, int, boolean_t);
static int aes_encrypt_final(crypto_ctx_t *, crypto_data_t *,
crypto_req_handle_t);
static int aes_decrypt_final(crypto_ctx_t *, crypto_data_t *,
crypto_req_handle_t);
static int aes_encrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
crypto_req_handle_t);
static int aes_encrypt_update(crypto_ctx_t *, crypto_data_t *,
crypto_data_t *, crypto_req_handle_t);
static int aes_encrypt_atomic(crypto_provider_handle_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
static int aes_decrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
crypto_req_handle_t);
static int aes_decrypt_update(crypto_ctx_t *, crypto_data_t *,
crypto_data_t *, crypto_req_handle_t);
static int aes_decrypt_atomic(crypto_provider_handle_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
static crypto_cipher_ops_t aes_cipher_ops = {
.encrypt_init = aes_encrypt_init,
.encrypt = aes_encrypt,
.encrypt_update = aes_encrypt_update,
.encrypt_final = aes_encrypt_final,
.encrypt_atomic = aes_encrypt_atomic,
.decrypt_init = aes_decrypt_init,
.decrypt = aes_decrypt,
.decrypt_update = aes_decrypt_update,
.decrypt_final = aes_decrypt_final,
.decrypt_atomic = aes_decrypt_atomic
};
static int aes_mac_atomic(crypto_provider_handle_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
crypto_spi_ctx_template_t, crypto_req_handle_t);
static int aes_mac_verify_atomic(crypto_provider_handle_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
crypto_spi_ctx_template_t, crypto_req_handle_t);
static crypto_mac_ops_t aes_mac_ops = {
.mac_init = NULL,
.mac = NULL,
.mac_update = NULL,
.mac_final = NULL,
.mac_atomic = aes_mac_atomic,
.mac_verify_atomic = aes_mac_verify_atomic
};
static int aes_create_ctx_template(crypto_provider_handle_t,
crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t *,
size_t *, crypto_req_handle_t);
static int aes_free_context(crypto_ctx_t *);
static crypto_ctx_ops_t aes_ctx_ops = {
.create_ctx_template = aes_create_ctx_template,
.free_context = aes_free_context
};
static crypto_ops_t aes_crypto_ops = {{{{{
&aes_control_ops,
NULL,
&aes_cipher_ops,
&aes_mac_ops,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
&aes_ctx_ops
}}}}};
static crypto_provider_info_t aes_prov_info = {{{{
CRYPTO_SPI_VERSION_1,
"AES Software Provider",
CRYPTO_SW_PROVIDER,
NULL,
&aes_crypto_ops,
sizeof (aes_mech_info_tab)/sizeof (crypto_mech_info_t),
aes_mech_info_tab
}}}};
static crypto_kcf_provider_handle_t aes_prov_handle = 0;
static crypto_data_t null_crypto_data = { CRYPTO_DATA_RAW };
int
aes_mod_init(void)
{
int ret;
/* Determine the fastest available implementation. */
aes_impl_init();
gcm_impl_init();
if ((ret = mod_install(&modlinkage)) != 0)
return (ret);
/* Register with KCF. If the registration fails, remove the module. */
if (crypto_register_provider(&aes_prov_info, &aes_prov_handle)) {
(void) mod_remove(&modlinkage);
return (EACCES);
}
return (0);
}
int
aes_mod_fini(void)
{
/* Unregister from KCF if module is registered */
if (aes_prov_handle != 0) {
if (crypto_unregister_provider(aes_prov_handle))
return (EBUSY);
aes_prov_handle = 0;
}
return (mod_remove(&modlinkage));
}
static int
aes_check_mech_param(crypto_mechanism_t *mechanism, aes_ctx_t **ctx, int kmflag)
{
void *p = NULL;
boolean_t param_required = B_TRUE;
size_t param_len;
void *(*alloc_fun)(int);
int rv = CRYPTO_SUCCESS;
switch (mechanism->cm_type) {
case AES_ECB_MECH_INFO_TYPE:
param_required = B_FALSE;
alloc_fun = ecb_alloc_ctx;
break;
case AES_CBC_MECH_INFO_TYPE:
param_len = AES_BLOCK_LEN;
alloc_fun = cbc_alloc_ctx;
break;
case AES_CTR_MECH_INFO_TYPE:
param_len = sizeof (CK_AES_CTR_PARAMS);
alloc_fun = ctr_alloc_ctx;
break;
case AES_CCM_MECH_INFO_TYPE:
param_len = sizeof (CK_AES_CCM_PARAMS);
alloc_fun = ccm_alloc_ctx;
break;
case AES_GCM_MECH_INFO_TYPE:
param_len = sizeof (CK_AES_GCM_PARAMS);
alloc_fun = gcm_alloc_ctx;
break;
case AES_GMAC_MECH_INFO_TYPE:
param_len = sizeof (CK_AES_GMAC_PARAMS);
alloc_fun = gmac_alloc_ctx;
break;
default:
rv = CRYPTO_MECHANISM_INVALID;
return (rv);
}
if (param_required && mechanism->cm_param != NULL &&
mechanism->cm_param_len != param_len) {
rv = CRYPTO_MECHANISM_PARAM_INVALID;
}
if (ctx != NULL) {
p = (alloc_fun)(kmflag);
*ctx = p;
}
return (rv);
}
/*
* Initialize key schedules for AES
*/
static int
init_keysched(crypto_key_t *key, void *newbie)
{
/*
* Only keys by value are supported by this module.
*/
switch (key->ck_format) {
case CRYPTO_KEY_RAW:
if (key->ck_length < AES_MINBITS ||
key->ck_length > AES_MAXBITS) {
return (CRYPTO_KEY_SIZE_RANGE);
}
/* key length must be either 128, 192, or 256 */
if ((key->ck_length & 63) != 0)
return (CRYPTO_KEY_SIZE_RANGE);
break;
default:
return (CRYPTO_KEY_TYPE_INCONSISTENT);
}
aes_init_keysched(key->ck_data, key->ck_length, newbie);
return (CRYPTO_SUCCESS);
}
/*
* KCF software provider control entry points.
*/
/* ARGSUSED */
static void
aes_provider_status(crypto_provider_handle_t provider, uint_t *status)
{
*status = CRYPTO_PROVIDER_READY;
}
static int
aes_encrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
crypto_key_t *key, crypto_spi_ctx_template_t template,
crypto_req_handle_t req)
{
return (aes_common_init(ctx, mechanism, key, template, req, B_TRUE));
}
static int
aes_decrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
crypto_key_t *key, crypto_spi_ctx_template_t template,
crypto_req_handle_t req)
{
return (aes_common_init(ctx, mechanism, key, template, req, B_FALSE));
}
/*
* KCF software provider encrypt entry points.
*/
static int
aes_common_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
crypto_key_t *key, crypto_spi_ctx_template_t template,
crypto_req_handle_t req, boolean_t is_encrypt_init)
{
aes_ctx_t *aes_ctx;
int rv;
int kmflag;
/*
* Only keys by value are supported by this module.
*/
if (key->ck_format != CRYPTO_KEY_RAW) {
return (CRYPTO_KEY_TYPE_INCONSISTENT);
}
kmflag = crypto_kmflag(req);
if ((rv = aes_check_mech_param(mechanism, &aes_ctx, kmflag))
!= CRYPTO_SUCCESS)
return (rv);
rv = aes_common_init_ctx(aes_ctx, template, mechanism, key, kmflag,
is_encrypt_init);
if (rv != CRYPTO_SUCCESS) {
crypto_free_mode_ctx(aes_ctx);
return (rv);
}
ctx->cc_provider_private = aes_ctx;
return (CRYPTO_SUCCESS);
}
static void
aes_copy_block64(uint8_t *in, uint64_t *out)
{
if (IS_P2ALIGNED(in, sizeof (uint64_t))) {
/* LINTED: pointer alignment */
out[0] = *(uint64_t *)&in[0];
/* LINTED: pointer alignment */
out[1] = *(uint64_t *)&in[8];
} else {
uint8_t *iv8 = (uint8_t *)&out[0];
AES_COPY_BLOCK(in, iv8);
}
}
static int
aes_encrypt(crypto_ctx_t *ctx, crypto_data_t *plaintext,
crypto_data_t *ciphertext, crypto_req_handle_t req)
{
int ret = CRYPTO_FAILED;
aes_ctx_t *aes_ctx;
size_t saved_length, saved_offset, length_needed;
ASSERT(ctx->cc_provider_private != NULL);
aes_ctx = ctx->cc_provider_private;
/*
* For block ciphers, plaintext must be a multiple of AES block size.
* This test is only valid for ciphers whose blocksize is a power of 2.
*/
if (((aes_ctx->ac_flags & (CTR_MODE|CCM_MODE|GCM_MODE|GMAC_MODE))
== 0) && (plaintext->cd_length & (AES_BLOCK_LEN - 1)) != 0)
return (CRYPTO_DATA_LEN_RANGE);
ASSERT(ciphertext != NULL);
/*
* We need to just return the length needed to store the output.
* We should not destroy the context for the following case.
*/
switch (aes_ctx->ac_flags & (CCM_MODE|GCM_MODE|GMAC_MODE)) {
case CCM_MODE:
length_needed = plaintext->cd_length + aes_ctx->ac_mac_len;
break;
case GCM_MODE:
length_needed = plaintext->cd_length + aes_ctx->ac_tag_len;
break;
case GMAC_MODE:
if (plaintext->cd_length != 0)
return (CRYPTO_ARGUMENTS_BAD);
length_needed = aes_ctx->ac_tag_len;
break;
default:
length_needed = plaintext->cd_length;
}
if (ciphertext->cd_length < length_needed) {
ciphertext->cd_length = length_needed;
return (CRYPTO_BUFFER_TOO_SMALL);
}
saved_length = ciphertext->cd_length;
saved_offset = ciphertext->cd_offset;
/*
* Do an update on the specified input data.
*/
ret = aes_encrypt_update(ctx, plaintext, ciphertext, req);
if (ret != CRYPTO_SUCCESS) {
return (ret);
}
/*
* For CCM mode, aes_ccm_encrypt_final() will take care of any
* left-over unprocessed data, and compute the MAC
*/
if (aes_ctx->ac_flags & CCM_MODE) {
/*
* ccm_encrypt_final() will compute the MAC and append
* it to existing ciphertext. So, need to adjust the left over
* length value accordingly
*/
/* order of following 2 lines MUST not be reversed */
ciphertext->cd_offset = ciphertext->cd_length;
ciphertext->cd_length = saved_length - ciphertext->cd_length;
ret = ccm_encrypt_final((ccm_ctx_t *)aes_ctx, ciphertext,
AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
if (ret != CRYPTO_SUCCESS) {
return (ret);
}
if (plaintext != ciphertext) {
ciphertext->cd_length =
ciphertext->cd_offset - saved_offset;
}
ciphertext->cd_offset = saved_offset;
} else if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE)) {
/*
* gcm_encrypt_final() will compute the MAC and append
* it to existing ciphertext. So, need to adjust the left over
* length value accordingly
*/
/* order of following 2 lines MUST not be reversed */
ciphertext->cd_offset = ciphertext->cd_length;
ciphertext->cd_length = saved_length - ciphertext->cd_length;
ret = gcm_encrypt_final((gcm_ctx_t *)aes_ctx, ciphertext,
AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
aes_xor_block);
if (ret != CRYPTO_SUCCESS) {
return (ret);
}
if (plaintext != ciphertext) {
ciphertext->cd_length =
ciphertext->cd_offset - saved_offset;
}
ciphertext->cd_offset = saved_offset;
}
ASSERT(aes_ctx->ac_remainder_len == 0);
(void) aes_free_context(ctx);
return (ret);
}
static int
aes_decrypt(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
crypto_data_t *plaintext, crypto_req_handle_t req)
{
int ret = CRYPTO_FAILED;
aes_ctx_t *aes_ctx;
off_t saved_offset;
size_t saved_length, length_needed;
ASSERT(ctx->cc_provider_private != NULL);
aes_ctx = ctx->cc_provider_private;
/*
* For block ciphers, plaintext must be a multiple of AES block size.
* This test is only valid for ciphers whose blocksize is a power of 2.
*/
if (((aes_ctx->ac_flags & (CTR_MODE|CCM_MODE|GCM_MODE|GMAC_MODE))
== 0) && (ciphertext->cd_length & (AES_BLOCK_LEN - 1)) != 0) {
return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
}
ASSERT(plaintext != NULL);
/*
* Return length needed to store the output.
* Do not destroy context when plaintext buffer is too small.
*
* CCM: plaintext is MAC len smaller than cipher text
* GCM: plaintext is TAG len smaller than cipher text
* GMAC: plaintext length must be zero
*/
switch (aes_ctx->ac_flags & (CCM_MODE|GCM_MODE|GMAC_MODE)) {
case CCM_MODE:
length_needed = aes_ctx->ac_processed_data_len;
break;
case GCM_MODE:
length_needed = ciphertext->cd_length - aes_ctx->ac_tag_len;
break;
case GMAC_MODE:
if (plaintext->cd_length != 0)
return (CRYPTO_ARGUMENTS_BAD);
length_needed = 0;
break;
default:
length_needed = ciphertext->cd_length;
}
if (plaintext->cd_length < length_needed) {
plaintext->cd_length = length_needed;
return (CRYPTO_BUFFER_TOO_SMALL);
}
saved_offset = plaintext->cd_offset;
saved_length = plaintext->cd_length;
/*
* Do an update on the specified input data.
*/
ret = aes_decrypt_update(ctx, ciphertext, plaintext, req);
if (ret != CRYPTO_SUCCESS) {
goto cleanup;
}
if (aes_ctx->ac_flags & CCM_MODE) {
ASSERT(aes_ctx->ac_processed_data_len == aes_ctx->ac_data_len);
ASSERT(aes_ctx->ac_processed_mac_len == aes_ctx->ac_mac_len);
/* order of following 2 lines MUST not be reversed */
plaintext->cd_offset = plaintext->cd_length;
plaintext->cd_length = saved_length - plaintext->cd_length;
ret = ccm_decrypt_final((ccm_ctx_t *)aes_ctx, plaintext,
AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
aes_xor_block);
if (ret == CRYPTO_SUCCESS) {
if (plaintext != ciphertext) {
plaintext->cd_length =
plaintext->cd_offset - saved_offset;
}
} else {
plaintext->cd_length = saved_length;
}
plaintext->cd_offset = saved_offset;
} else if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE)) {
/* order of following 2 lines MUST not be reversed */
plaintext->cd_offset = plaintext->cd_length;
plaintext->cd_length = saved_length - plaintext->cd_length;
ret = gcm_decrypt_final((gcm_ctx_t *)aes_ctx, plaintext,
AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
if (ret == CRYPTO_SUCCESS) {
if (plaintext != ciphertext) {
plaintext->cd_length =
plaintext->cd_offset - saved_offset;
}
} else {
plaintext->cd_length = saved_length;
}
plaintext->cd_offset = saved_offset;
}
ASSERT(aes_ctx->ac_remainder_len == 0);
cleanup:
(void) aes_free_context(ctx);
return (ret);
}
/* ARGSUSED */
static int
aes_encrypt_update(crypto_ctx_t *ctx, crypto_data_t *plaintext,
crypto_data_t *ciphertext, crypto_req_handle_t req)
{
off_t saved_offset;
size_t saved_length, out_len;
int ret = CRYPTO_SUCCESS;
aes_ctx_t *aes_ctx;
ASSERT(ctx->cc_provider_private != NULL);
aes_ctx = ctx->cc_provider_private;
ASSERT(ciphertext != NULL);
/* compute number of bytes that will hold the ciphertext */
out_len = aes_ctx->ac_remainder_len;
out_len += plaintext->cd_length;
out_len &= ~(AES_BLOCK_LEN - 1);
/* return length needed to store the output */
if (ciphertext->cd_length < out_len) {
ciphertext->cd_length = out_len;
return (CRYPTO_BUFFER_TOO_SMALL);
}
saved_offset = ciphertext->cd_offset;
saved_length = ciphertext->cd_length;
/*
* Do the AES update on the specified input data.
*/
switch (plaintext->cd_format) {
case CRYPTO_DATA_RAW:
ret = crypto_update_iov(ctx->cc_provider_private,
plaintext, ciphertext, aes_encrypt_contiguous_blocks,
aes_copy_block64);
break;
case CRYPTO_DATA_UIO:
ret = crypto_update_uio(ctx->cc_provider_private,
plaintext, ciphertext, aes_encrypt_contiguous_blocks,
aes_copy_block64);
break;
default:
ret = CRYPTO_ARGUMENTS_BAD;
}
/*
* Since AES counter mode is a stream cipher, we call
* ctr_mode_final() to pick up any remaining bytes.
* It is an internal function that does not destroy
* the context like *normal* final routines.
*/
if ((aes_ctx->ac_flags & CTR_MODE) && (aes_ctx->ac_remainder_len > 0)) {
ret = ctr_mode_final((ctr_ctx_t *)aes_ctx,
ciphertext, aes_encrypt_block);
}
if (ret == CRYPTO_SUCCESS) {
if (plaintext != ciphertext)
ciphertext->cd_length =
ciphertext->cd_offset - saved_offset;
} else {
ciphertext->cd_length = saved_length;
}
ciphertext->cd_offset = saved_offset;
return (ret);
}
static int
aes_decrypt_update(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
crypto_data_t *plaintext, crypto_req_handle_t req)
{
off_t saved_offset;
size_t saved_length, out_len;
int ret = CRYPTO_SUCCESS;
aes_ctx_t *aes_ctx;
ASSERT(ctx->cc_provider_private != NULL);
aes_ctx = ctx->cc_provider_private;
ASSERT(plaintext != NULL);
/*
* Compute number of bytes that will hold the plaintext.
* This is not necessary for CCM, GCM, and GMAC since these
* mechanisms never return plaintext for update operations.
*/
if ((aes_ctx->ac_flags & (CCM_MODE|GCM_MODE|GMAC_MODE)) == 0) {
out_len = aes_ctx->ac_remainder_len;
out_len += ciphertext->cd_length;
out_len &= ~(AES_BLOCK_LEN - 1);
/* return length needed to store the output */
if (plaintext->cd_length < out_len) {
plaintext->cd_length = out_len;
return (CRYPTO_BUFFER_TOO_SMALL);
}
}
saved_offset = plaintext->cd_offset;
saved_length = plaintext->cd_length;
if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE))
gcm_set_kmflag((gcm_ctx_t *)aes_ctx, crypto_kmflag(req));
/*
* Do the AES update on the specified input data.
*/
switch (ciphertext->cd_format) {
case CRYPTO_DATA_RAW:
ret = crypto_update_iov(ctx->cc_provider_private,
ciphertext, plaintext, aes_decrypt_contiguous_blocks,
aes_copy_block64);
break;
case CRYPTO_DATA_UIO:
ret = crypto_update_uio(ctx->cc_provider_private,
ciphertext, plaintext, aes_decrypt_contiguous_blocks,
aes_copy_block64);
break;
default:
ret = CRYPTO_ARGUMENTS_BAD;
}
/*
* Since AES counter mode is a stream cipher, we call
* ctr_mode_final() to pick up any remaining bytes.
* It is an internal function that does not destroy
* the context like *normal* final routines.
*/
if ((aes_ctx->ac_flags & CTR_MODE) && (aes_ctx->ac_remainder_len > 0)) {
ret = ctr_mode_final((ctr_ctx_t *)aes_ctx, plaintext,
aes_encrypt_block);
if (ret == CRYPTO_DATA_LEN_RANGE)
ret = CRYPTO_ENCRYPTED_DATA_LEN_RANGE;
}
if (ret == CRYPTO_SUCCESS) {
if (ciphertext != plaintext)
plaintext->cd_length =
plaintext->cd_offset - saved_offset;
} else {
plaintext->cd_length = saved_length;
}
plaintext->cd_offset = saved_offset;
return (ret);
}
/* ARGSUSED */
static int
aes_encrypt_final(crypto_ctx_t *ctx, crypto_data_t *data,
crypto_req_handle_t req)
{
aes_ctx_t *aes_ctx;
int ret;
ASSERT(ctx->cc_provider_private != NULL);
aes_ctx = ctx->cc_provider_private;
if (data->cd_format != CRYPTO_DATA_RAW &&
data->cd_format != CRYPTO_DATA_UIO) {
return (CRYPTO_ARGUMENTS_BAD);
}
if (aes_ctx->ac_flags & CTR_MODE) {
if (aes_ctx->ac_remainder_len > 0) {
ret = ctr_mode_final((ctr_ctx_t *)aes_ctx, data,
aes_encrypt_block);
if (ret != CRYPTO_SUCCESS)
return (ret);
}
} else if (aes_ctx->ac_flags & CCM_MODE) {
ret = ccm_encrypt_final((ccm_ctx_t *)aes_ctx, data,
AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
if (ret != CRYPTO_SUCCESS) {
return (ret);
}
} else if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE)) {
size_t saved_offset = data->cd_offset;
ret = gcm_encrypt_final((gcm_ctx_t *)aes_ctx, data,
AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
aes_xor_block);
if (ret != CRYPTO_SUCCESS) {
return (ret);
}
data->cd_length = data->cd_offset - saved_offset;
data->cd_offset = saved_offset;
} else {
/*
* There must be no unprocessed plaintext.
* This happens if the length of the last data is
* not a multiple of the AES block length.
*/
if (aes_ctx->ac_remainder_len > 0) {
return (CRYPTO_DATA_LEN_RANGE);
}
data->cd_length = 0;
}
(void) aes_free_context(ctx);
return (CRYPTO_SUCCESS);
}
/* ARGSUSED */
static int
aes_decrypt_final(crypto_ctx_t *ctx, crypto_data_t *data,
crypto_req_handle_t req)
{
aes_ctx_t *aes_ctx;
int ret;
off_t saved_offset;
size_t saved_length;
ASSERT(ctx->cc_provider_private != NULL);
aes_ctx = ctx->cc_provider_private;
if (data->cd_format != CRYPTO_DATA_RAW &&
data->cd_format != CRYPTO_DATA_UIO) {
return (CRYPTO_ARGUMENTS_BAD);
}
/*
* There must be no unprocessed ciphertext.
* This happens if the length of the last ciphertext is
* not a multiple of the AES block length.
*/
if (aes_ctx->ac_remainder_len > 0) {
if ((aes_ctx->ac_flags & CTR_MODE) == 0)
return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
else {
ret = ctr_mode_final((ctr_ctx_t *)aes_ctx, data,
aes_encrypt_block);
if (ret == CRYPTO_DATA_LEN_RANGE)
ret = CRYPTO_ENCRYPTED_DATA_LEN_RANGE;
if (ret != CRYPTO_SUCCESS)
return (ret);
}
}
if (aes_ctx->ac_flags & CCM_MODE) {
/*
* This is where all the plaintext is returned, make sure
* the plaintext buffer is big enough
*/
size_t pt_len = aes_ctx->ac_data_len;
if (data->cd_length < pt_len) {
data->cd_length = pt_len;
return (CRYPTO_BUFFER_TOO_SMALL);
}
ASSERT(aes_ctx->ac_processed_data_len == pt_len);
ASSERT(aes_ctx->ac_processed_mac_len == aes_ctx->ac_mac_len);
saved_offset = data->cd_offset;
saved_length = data->cd_length;
ret = ccm_decrypt_final((ccm_ctx_t *)aes_ctx, data,
AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
aes_xor_block);
if (ret == CRYPTO_SUCCESS) {
data->cd_length = data->cd_offset - saved_offset;
} else {
data->cd_length = saved_length;
}
data->cd_offset = saved_offset;
if (ret != CRYPTO_SUCCESS) {
return (ret);
}
} else if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE)) {
/*
* This is where all the plaintext is returned, make sure
* the plaintext buffer is big enough
*/
gcm_ctx_t *ctx = (gcm_ctx_t *)aes_ctx;
size_t pt_len = ctx->gcm_processed_data_len - ctx->gcm_tag_len;
if (data->cd_length < pt_len) {
data->cd_length = pt_len;
return (CRYPTO_BUFFER_TOO_SMALL);
}
saved_offset = data->cd_offset;
saved_length = data->cd_length;
ret = gcm_decrypt_final((gcm_ctx_t *)aes_ctx, data,
AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
if (ret == CRYPTO_SUCCESS) {
data->cd_length = data->cd_offset - saved_offset;
} else {
data->cd_length = saved_length;
}
data->cd_offset = saved_offset;
if (ret != CRYPTO_SUCCESS) {
return (ret);
}
}
if ((aes_ctx->ac_flags & (CTR_MODE|CCM_MODE|GCM_MODE|GMAC_MODE)) == 0) {
data->cd_length = 0;
}
(void) aes_free_context(ctx);
return (CRYPTO_SUCCESS);
}
/* ARGSUSED */
static int
aes_encrypt_atomic(crypto_provider_handle_t provider,
crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
crypto_key_t *key, crypto_data_t *plaintext, crypto_data_t *ciphertext,
crypto_spi_ctx_template_t template, crypto_req_handle_t req)
{
aes_ctx_t aes_ctx; /* on the stack */
off_t saved_offset;
size_t saved_length;
size_t length_needed;
int ret;
ASSERT(ciphertext != NULL);
/*
* CTR, CCM, GCM, and GMAC modes do not require that plaintext
* be a multiple of AES block size.
*/
switch (mechanism->cm_type) {
case AES_CTR_MECH_INFO_TYPE:
case AES_CCM_MECH_INFO_TYPE:
case AES_GCM_MECH_INFO_TYPE:
case AES_GMAC_MECH_INFO_TYPE:
break;
default:
if ((plaintext->cd_length & (AES_BLOCK_LEN - 1)) != 0)
return (CRYPTO_DATA_LEN_RANGE);
}
if ((ret = aes_check_mech_param(mechanism, NULL, 0)) != CRYPTO_SUCCESS)
return (ret);
bzero(&aes_ctx, sizeof (aes_ctx_t));
ret = aes_common_init_ctx(&aes_ctx, template, mechanism, key,
crypto_kmflag(req), B_TRUE);
if (ret != CRYPTO_SUCCESS)
return (ret);
switch (mechanism->cm_type) {
case AES_CCM_MECH_INFO_TYPE:
length_needed = plaintext->cd_length + aes_ctx.ac_mac_len;
break;
case AES_GMAC_MECH_INFO_TYPE:
if (plaintext->cd_length != 0)
return (CRYPTO_ARGUMENTS_BAD);
- /* FALLTHRU */
+ /* FALLTHROUGH */
case AES_GCM_MECH_INFO_TYPE:
length_needed = plaintext->cd_length + aes_ctx.ac_tag_len;
break;
default:
length_needed = plaintext->cd_length;
}
/* return size of buffer needed to store output */
if (ciphertext->cd_length < length_needed) {
ciphertext->cd_length = length_needed;
ret = CRYPTO_BUFFER_TOO_SMALL;
goto out;
}
saved_offset = ciphertext->cd_offset;
saved_length = ciphertext->cd_length;
/*
* Do an update on the specified input data.
*/
switch (plaintext->cd_format) {
case CRYPTO_DATA_RAW:
ret = crypto_update_iov(&aes_ctx, plaintext, ciphertext,
aes_encrypt_contiguous_blocks, aes_copy_block64);
break;
case CRYPTO_DATA_UIO:
ret = crypto_update_uio(&aes_ctx, plaintext, ciphertext,
aes_encrypt_contiguous_blocks, aes_copy_block64);
break;
default:
ret = CRYPTO_ARGUMENTS_BAD;
}
if (ret == CRYPTO_SUCCESS) {
if (mechanism->cm_type == AES_CCM_MECH_INFO_TYPE) {
ret = ccm_encrypt_final((ccm_ctx_t *)&aes_ctx,
ciphertext, AES_BLOCK_LEN, aes_encrypt_block,
aes_xor_block);
if (ret != CRYPTO_SUCCESS)
goto out;
ASSERT(aes_ctx.ac_remainder_len == 0);
} else if (mechanism->cm_type == AES_GCM_MECH_INFO_TYPE ||
mechanism->cm_type == AES_GMAC_MECH_INFO_TYPE) {
ret = gcm_encrypt_final((gcm_ctx_t *)&aes_ctx,
ciphertext, AES_BLOCK_LEN, aes_encrypt_block,
aes_copy_block, aes_xor_block);
if (ret != CRYPTO_SUCCESS)
goto out;
ASSERT(aes_ctx.ac_remainder_len == 0);
} else if (mechanism->cm_type == AES_CTR_MECH_INFO_TYPE) {
if (aes_ctx.ac_remainder_len > 0) {
ret = ctr_mode_final((ctr_ctx_t *)&aes_ctx,
ciphertext, aes_encrypt_block);
if (ret != CRYPTO_SUCCESS)
goto out;
}
} else {
ASSERT(aes_ctx.ac_remainder_len == 0);
}
if (plaintext != ciphertext) {
ciphertext->cd_length =
ciphertext->cd_offset - saved_offset;
}
} else {
ciphertext->cd_length = saved_length;
}
ciphertext->cd_offset = saved_offset;
out:
if (aes_ctx.ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) {
bzero(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len);
kmem_free(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len);
}
#ifdef CAN_USE_GCM_ASM
if (aes_ctx.ac_flags & (GCM_MODE|GMAC_MODE) &&
((gcm_ctx_t *)&aes_ctx)->gcm_Htable != NULL) {
gcm_ctx_t *ctx = (gcm_ctx_t *)&aes_ctx;
bzero(ctx->gcm_Htable, ctx->gcm_htab_len);
kmem_free(ctx->gcm_Htable, ctx->gcm_htab_len);
}
#endif
return (ret);
}
/* ARGSUSED */
static int
aes_decrypt_atomic(crypto_provider_handle_t provider,
crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
crypto_key_t *key, crypto_data_t *ciphertext, crypto_data_t *plaintext,
crypto_spi_ctx_template_t template, crypto_req_handle_t req)
{
aes_ctx_t aes_ctx; /* on the stack */
off_t saved_offset;
size_t saved_length;
size_t length_needed;
int ret;
ASSERT(plaintext != NULL);
/*
* CCM, GCM, CTR, and GMAC modes do not require that ciphertext
* be a multiple of AES block size.
*/
switch (mechanism->cm_type) {
case AES_CTR_MECH_INFO_TYPE:
case AES_CCM_MECH_INFO_TYPE:
case AES_GCM_MECH_INFO_TYPE:
case AES_GMAC_MECH_INFO_TYPE:
break;
default:
if ((ciphertext->cd_length & (AES_BLOCK_LEN - 1)) != 0)
return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
}
if ((ret = aes_check_mech_param(mechanism, NULL, 0)) != CRYPTO_SUCCESS)
return (ret);
bzero(&aes_ctx, sizeof (aes_ctx_t));
ret = aes_common_init_ctx(&aes_ctx, template, mechanism, key,
crypto_kmflag(req), B_FALSE);
if (ret != CRYPTO_SUCCESS)
return (ret);
switch (mechanism->cm_type) {
case AES_CCM_MECH_INFO_TYPE:
length_needed = aes_ctx.ac_data_len;
break;
case AES_GCM_MECH_INFO_TYPE:
length_needed = ciphertext->cd_length - aes_ctx.ac_tag_len;
break;
case AES_GMAC_MECH_INFO_TYPE:
if (plaintext->cd_length != 0)
return (CRYPTO_ARGUMENTS_BAD);
length_needed = 0;
break;
default:
length_needed = ciphertext->cd_length;
}
/* return size of buffer needed to store output */
if (plaintext->cd_length < length_needed) {
plaintext->cd_length = length_needed;
ret = CRYPTO_BUFFER_TOO_SMALL;
goto out;
}
saved_offset = plaintext->cd_offset;
saved_length = plaintext->cd_length;
if (mechanism->cm_type == AES_GCM_MECH_INFO_TYPE ||
mechanism->cm_type == AES_GMAC_MECH_INFO_TYPE)
gcm_set_kmflag((gcm_ctx_t *)&aes_ctx, crypto_kmflag(req));
/*
* Do an update on the specified input data.
*/
switch (ciphertext->cd_format) {
case CRYPTO_DATA_RAW:
ret = crypto_update_iov(&aes_ctx, ciphertext, plaintext,
aes_decrypt_contiguous_blocks, aes_copy_block64);
break;
case CRYPTO_DATA_UIO:
ret = crypto_update_uio(&aes_ctx, ciphertext, plaintext,
aes_decrypt_contiguous_blocks, aes_copy_block64);
break;
default:
ret = CRYPTO_ARGUMENTS_BAD;
}
if (ret == CRYPTO_SUCCESS) {
if (mechanism->cm_type == AES_CCM_MECH_INFO_TYPE) {
ASSERT(aes_ctx.ac_processed_data_len
== aes_ctx.ac_data_len);
ASSERT(aes_ctx.ac_processed_mac_len
== aes_ctx.ac_mac_len);
ret = ccm_decrypt_final((ccm_ctx_t *)&aes_ctx,
plaintext, AES_BLOCK_LEN, aes_encrypt_block,
aes_copy_block, aes_xor_block);
ASSERT(aes_ctx.ac_remainder_len == 0);
if ((ret == CRYPTO_SUCCESS) &&
(ciphertext != plaintext)) {
plaintext->cd_length =
plaintext->cd_offset - saved_offset;
} else {
plaintext->cd_length = saved_length;
}
} else if (mechanism->cm_type == AES_GCM_MECH_INFO_TYPE ||
mechanism->cm_type == AES_GMAC_MECH_INFO_TYPE) {
ret = gcm_decrypt_final((gcm_ctx_t *)&aes_ctx,
plaintext, AES_BLOCK_LEN, aes_encrypt_block,
aes_xor_block);
ASSERT(aes_ctx.ac_remainder_len == 0);
if ((ret == CRYPTO_SUCCESS) &&
(ciphertext != plaintext)) {
plaintext->cd_length =
plaintext->cd_offset - saved_offset;
} else {
plaintext->cd_length = saved_length;
}
} else if (mechanism->cm_type != AES_CTR_MECH_INFO_TYPE) {
ASSERT(aes_ctx.ac_remainder_len == 0);
if (ciphertext != plaintext)
plaintext->cd_length =
plaintext->cd_offset - saved_offset;
} else {
if (aes_ctx.ac_remainder_len > 0) {
ret = ctr_mode_final((ctr_ctx_t *)&aes_ctx,
plaintext, aes_encrypt_block);
if (ret == CRYPTO_DATA_LEN_RANGE)
ret = CRYPTO_ENCRYPTED_DATA_LEN_RANGE;
if (ret != CRYPTO_SUCCESS)
goto out;
}
if (ciphertext != plaintext)
plaintext->cd_length =
plaintext->cd_offset - saved_offset;
}
} else {
plaintext->cd_length = saved_length;
}
plaintext->cd_offset = saved_offset;
out:
if (aes_ctx.ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) {
bzero(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len);
kmem_free(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len);
}
if (aes_ctx.ac_flags & CCM_MODE) {
if (aes_ctx.ac_pt_buf != NULL) {
vmem_free(aes_ctx.ac_pt_buf, aes_ctx.ac_data_len);
}
} else if (aes_ctx.ac_flags & (GCM_MODE|GMAC_MODE)) {
if (((gcm_ctx_t *)&aes_ctx)->gcm_pt_buf != NULL) {
vmem_free(((gcm_ctx_t *)&aes_ctx)->gcm_pt_buf,
((gcm_ctx_t *)&aes_ctx)->gcm_pt_buf_len);
}
#ifdef CAN_USE_GCM_ASM
if (((gcm_ctx_t *)&aes_ctx)->gcm_Htable != NULL) {
gcm_ctx_t *ctx = (gcm_ctx_t *)&aes_ctx;
bzero(ctx->gcm_Htable, ctx->gcm_htab_len);
kmem_free(ctx->gcm_Htable, ctx->gcm_htab_len);
}
#endif
}
return (ret);
}
/*
* KCF software provider context template entry points.
*/
/* ARGSUSED */
static int
aes_create_ctx_template(crypto_provider_handle_t provider,
crypto_mechanism_t *mechanism, crypto_key_t *key,
crypto_spi_ctx_template_t *tmpl, size_t *tmpl_size, crypto_req_handle_t req)
{
void *keysched;
size_t size;
int rv;
if (mechanism->cm_type != AES_ECB_MECH_INFO_TYPE &&
mechanism->cm_type != AES_CBC_MECH_INFO_TYPE &&
mechanism->cm_type != AES_CTR_MECH_INFO_TYPE &&
mechanism->cm_type != AES_CCM_MECH_INFO_TYPE &&
mechanism->cm_type != AES_GCM_MECH_INFO_TYPE &&
mechanism->cm_type != AES_GMAC_MECH_INFO_TYPE)
return (CRYPTO_MECHANISM_INVALID);
if ((keysched = aes_alloc_keysched(&size,
crypto_kmflag(req))) == NULL) {
return (CRYPTO_HOST_MEMORY);
}
/*
* Initialize key schedule. Key length information is stored
* in the key.
*/
if ((rv = init_keysched(key, keysched)) != CRYPTO_SUCCESS) {
bzero(keysched, size);
kmem_free(keysched, size);
return (rv);
}
*tmpl = keysched;
*tmpl_size = size;
return (CRYPTO_SUCCESS);
}
static int
aes_free_context(crypto_ctx_t *ctx)
{
aes_ctx_t *aes_ctx = ctx->cc_provider_private;
if (aes_ctx != NULL) {
if (aes_ctx->ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) {
ASSERT(aes_ctx->ac_keysched_len != 0);
bzero(aes_ctx->ac_keysched, aes_ctx->ac_keysched_len);
kmem_free(aes_ctx->ac_keysched,
aes_ctx->ac_keysched_len);
}
crypto_free_mode_ctx(aes_ctx);
ctx->cc_provider_private = NULL;
}
return (CRYPTO_SUCCESS);
}
static int
aes_common_init_ctx(aes_ctx_t *aes_ctx, crypto_spi_ctx_template_t *template,
crypto_mechanism_t *mechanism, crypto_key_t *key, int kmflag,
boolean_t is_encrypt_init)
{
int rv = CRYPTO_SUCCESS;
void *keysched;
size_t size = 0;
if (template == NULL) {
if ((keysched = aes_alloc_keysched(&size, kmflag)) == NULL)
return (CRYPTO_HOST_MEMORY);
/*
* Initialize key schedule.
* Key length is stored in the key.
*/
if ((rv = init_keysched(key, keysched)) != CRYPTO_SUCCESS) {
kmem_free(keysched, size);
return (rv);
}
aes_ctx->ac_flags |= PROVIDER_OWNS_KEY_SCHEDULE;
aes_ctx->ac_keysched_len = size;
} else {
keysched = template;
}
aes_ctx->ac_keysched = keysched;
switch (mechanism->cm_type) {
case AES_CBC_MECH_INFO_TYPE:
rv = cbc_init_ctx((cbc_ctx_t *)aes_ctx, mechanism->cm_param,
mechanism->cm_param_len, AES_BLOCK_LEN, aes_copy_block64);
break;
case AES_CTR_MECH_INFO_TYPE: {
CK_AES_CTR_PARAMS *pp;
if (mechanism->cm_param == NULL ||
mechanism->cm_param_len != sizeof (CK_AES_CTR_PARAMS)) {
return (CRYPTO_MECHANISM_PARAM_INVALID);
}
pp = (CK_AES_CTR_PARAMS *)(void *)mechanism->cm_param;
rv = ctr_init_ctx((ctr_ctx_t *)aes_ctx, pp->ulCounterBits,
pp->cb, aes_copy_block);
break;
}
case AES_CCM_MECH_INFO_TYPE:
if (mechanism->cm_param == NULL ||
mechanism->cm_param_len != sizeof (CK_AES_CCM_PARAMS)) {
return (CRYPTO_MECHANISM_PARAM_INVALID);
}
rv = ccm_init_ctx((ccm_ctx_t *)aes_ctx, mechanism->cm_param,
kmflag, is_encrypt_init, AES_BLOCK_LEN, aes_encrypt_block,
aes_xor_block);
break;
case AES_GCM_MECH_INFO_TYPE:
if (mechanism->cm_param == NULL ||
mechanism->cm_param_len != sizeof (CK_AES_GCM_PARAMS)) {
return (CRYPTO_MECHANISM_PARAM_INVALID);
}
rv = gcm_init_ctx((gcm_ctx_t *)aes_ctx, mechanism->cm_param,
AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
aes_xor_block);
break;
case AES_GMAC_MECH_INFO_TYPE:
if (mechanism->cm_param == NULL ||
mechanism->cm_param_len != sizeof (CK_AES_GMAC_PARAMS)) {
return (CRYPTO_MECHANISM_PARAM_INVALID);
}
rv = gmac_init_ctx((gcm_ctx_t *)aes_ctx, mechanism->cm_param,
AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
aes_xor_block);
break;
case AES_ECB_MECH_INFO_TYPE:
aes_ctx->ac_flags |= ECB_MODE;
}
if (rv != CRYPTO_SUCCESS) {
if (aes_ctx->ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) {
bzero(keysched, size);
kmem_free(keysched, size);
}
}
return (rv);
}
static int
process_gmac_mech(crypto_mechanism_t *mech, crypto_data_t *data,
CK_AES_GCM_PARAMS *gcm_params)
{
/* LINTED: pointer alignment */
CK_AES_GMAC_PARAMS *params = (CK_AES_GMAC_PARAMS *)mech->cm_param;
if (mech->cm_type != AES_GMAC_MECH_INFO_TYPE)
return (CRYPTO_MECHANISM_INVALID);
if (mech->cm_param_len != sizeof (CK_AES_GMAC_PARAMS))
return (CRYPTO_MECHANISM_PARAM_INVALID);
if (params->pIv == NULL)
return (CRYPTO_MECHANISM_PARAM_INVALID);
gcm_params->pIv = params->pIv;
gcm_params->ulIvLen = AES_GMAC_IV_LEN;
gcm_params->ulTagBits = AES_GMAC_TAG_BITS;
if (data == NULL)
return (CRYPTO_SUCCESS);
if (data->cd_format != CRYPTO_DATA_RAW)
return (CRYPTO_ARGUMENTS_BAD);
gcm_params->pAAD = (uchar_t *)data->cd_raw.iov_base;
gcm_params->ulAADLen = data->cd_length;
return (CRYPTO_SUCCESS);
}
static int
aes_mac_atomic(crypto_provider_handle_t provider,
crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
crypto_spi_ctx_template_t template, crypto_req_handle_t req)
{
CK_AES_GCM_PARAMS gcm_params;
crypto_mechanism_t gcm_mech;
int rv;
if ((rv = process_gmac_mech(mechanism, data, &gcm_params))
!= CRYPTO_SUCCESS)
return (rv);
gcm_mech.cm_type = AES_GCM_MECH_INFO_TYPE;
gcm_mech.cm_param_len = sizeof (CK_AES_GCM_PARAMS);
gcm_mech.cm_param = (char *)&gcm_params;
return (aes_encrypt_atomic(provider, session_id, &gcm_mech,
key, &null_crypto_data, mac, template, req));
}
static int
aes_mac_verify_atomic(crypto_provider_handle_t provider,
crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
crypto_spi_ctx_template_t template, crypto_req_handle_t req)
{
CK_AES_GCM_PARAMS gcm_params;
crypto_mechanism_t gcm_mech;
int rv;
if ((rv = process_gmac_mech(mechanism, data, &gcm_params))
!= CRYPTO_SUCCESS)
return (rv);
gcm_mech.cm_type = AES_GCM_MECH_INFO_TYPE;
gcm_mech.cm_param_len = sizeof (CK_AES_GCM_PARAMS);
gcm_mech.cm_param = (char *)&gcm_params;
return (aes_decrypt_atomic(provider, session_id, &gcm_mech,
key, mac, &null_crypto_data, template, req));
}
diff --git a/sys/contrib/openzfs/module/icp/io/skein_mod.c b/sys/contrib/openzfs/module/icp/io/skein_mod.c
index 5ee36af12bcb..ac7d201eb708 100644
--- a/sys/contrib/openzfs/module/icp/io/skein_mod.c
+++ b/sys/contrib/openzfs/module/icp/io/skein_mod.c
@@ -1,729 +1,728 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2013 Saso Kiselkov. All rights reserved.
*/
#include <sys/modctl.h>
#include <sys/crypto/common.h>
#include <sys/crypto/icp.h>
#include <sys/crypto/spi.h>
#include <sys/sysmacros.h>
#define SKEIN_MODULE_IMPL
#include <sys/skein.h>
/*
* Like the sha2 module, we create the skein module with two modlinkages:
* - modlmisc to allow direct calls to Skein_* API functions.
* - modlcrypto to integrate well into the Kernel Crypto Framework (KCF).
*/
static struct modlmisc modlmisc = {
&mod_cryptoops,
"Skein Message-Digest Algorithm"
};
static struct modlcrypto modlcrypto = {
&mod_cryptoops,
"Skein Kernel SW Provider"
};
static struct modlinkage modlinkage = {
MODREV_1, {&modlmisc, &modlcrypto, NULL}
};
static crypto_mech_info_t skein_mech_info_tab[] = {
{CKM_SKEIN_256, SKEIN_256_MECH_INFO_TYPE,
CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC,
0, 0, CRYPTO_KEYSIZE_UNIT_IN_BITS},
{CKM_SKEIN_256_MAC, SKEIN_256_MAC_MECH_INFO_TYPE,
CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC, 1, INT_MAX,
CRYPTO_KEYSIZE_UNIT_IN_BYTES},
{CKM_SKEIN_512, SKEIN_512_MECH_INFO_TYPE,
CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC,
0, 0, CRYPTO_KEYSIZE_UNIT_IN_BITS},
{CKM_SKEIN_512_MAC, SKEIN_512_MAC_MECH_INFO_TYPE,
CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC, 1, INT_MAX,
CRYPTO_KEYSIZE_UNIT_IN_BYTES},
{CKM_SKEIN1024, SKEIN1024_MECH_INFO_TYPE,
CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC,
0, 0, CRYPTO_KEYSIZE_UNIT_IN_BITS},
{CKM_SKEIN1024_MAC, SKEIN1024_MAC_MECH_INFO_TYPE,
CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC, 1, INT_MAX,
CRYPTO_KEYSIZE_UNIT_IN_BYTES}
};
static void skein_provider_status(crypto_provider_handle_t, uint_t *);
static crypto_control_ops_t skein_control_ops = {
skein_provider_status
};
static int skein_digest_init(crypto_ctx_t *, crypto_mechanism_t *,
crypto_req_handle_t);
static int skein_digest(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
crypto_req_handle_t);
static int skein_update(crypto_ctx_t *, crypto_data_t *, crypto_req_handle_t);
static int skein_final(crypto_ctx_t *, crypto_data_t *, crypto_req_handle_t);
static int skein_digest_atomic(crypto_provider_handle_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_data_t *, crypto_data_t *,
crypto_req_handle_t);
static crypto_digest_ops_t skein_digest_ops = {
.digest_init = skein_digest_init,
.digest = skein_digest,
.digest_update = skein_update,
.digest_key = NULL,
.digest_final = skein_final,
.digest_atomic = skein_digest_atomic
};
static int skein_mac_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *,
crypto_spi_ctx_template_t, crypto_req_handle_t);
static int skein_mac_atomic(crypto_provider_handle_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
crypto_spi_ctx_template_t, crypto_req_handle_t);
static crypto_mac_ops_t skein_mac_ops = {
.mac_init = skein_mac_init,
.mac = NULL,
.mac_update = skein_update, /* using regular digest update is OK here */
.mac_final = skein_final, /* using regular digest final is OK here */
.mac_atomic = skein_mac_atomic,
.mac_verify_atomic = NULL
};
static int skein_create_ctx_template(crypto_provider_handle_t,
crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t *,
size_t *, crypto_req_handle_t);
static int skein_free_context(crypto_ctx_t *);
static crypto_ctx_ops_t skein_ctx_ops = {
.create_ctx_template = skein_create_ctx_template,
.free_context = skein_free_context
};
static crypto_ops_t skein_crypto_ops = {{{{{
&skein_control_ops,
&skein_digest_ops,
NULL,
&skein_mac_ops,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
&skein_ctx_ops,
}}}}};
static crypto_provider_info_t skein_prov_info = {{{{
CRYPTO_SPI_VERSION_1,
"Skein Software Provider",
CRYPTO_SW_PROVIDER,
NULL,
&skein_crypto_ops,
sizeof (skein_mech_info_tab) / sizeof (crypto_mech_info_t),
skein_mech_info_tab
}}}};
static crypto_kcf_provider_handle_t skein_prov_handle = 0;
typedef struct skein_ctx {
skein_mech_type_t sc_mech_type;
size_t sc_digest_bitlen;
/*LINTED(E_ANONYMOUS_UNION_DECL)*/
union {
Skein_256_Ctxt_t sc_256;
Skein_512_Ctxt_t sc_512;
Skein1024_Ctxt_t sc_1024;
};
} skein_ctx_t;
#define SKEIN_CTX(_ctx_) ((skein_ctx_t *)((_ctx_)->cc_provider_private))
#define SKEIN_CTX_LVALUE(_ctx_) (_ctx_)->cc_provider_private
#define SKEIN_OP(_skein_ctx, _op, ...) \
do { \
skein_ctx_t *sc = (_skein_ctx); \
switch (sc->sc_mech_type) { \
case SKEIN_256_MECH_INFO_TYPE: \
case SKEIN_256_MAC_MECH_INFO_TYPE: \
(void) Skein_256_ ## _op(&sc->sc_256, __VA_ARGS__);\
break; \
case SKEIN_512_MECH_INFO_TYPE: \
case SKEIN_512_MAC_MECH_INFO_TYPE: \
(void) Skein_512_ ## _op(&sc->sc_512, __VA_ARGS__);\
break; \
case SKEIN1024_MECH_INFO_TYPE: \
case SKEIN1024_MAC_MECH_INFO_TYPE: \
(void) Skein1024_ ## _op(&sc->sc_1024, __VA_ARGS__);\
break; \
} \
- _NOTE(CONSTCOND) \
} while (0)
static int
skein_get_digest_bitlen(const crypto_mechanism_t *mechanism, size_t *result)
{
if (mechanism->cm_param != NULL) {
/*LINTED(E_BAD_PTR_CAST_ALIGN)*/
skein_param_t *param = (skein_param_t *)mechanism->cm_param;
if (mechanism->cm_param_len != sizeof (*param) ||
param->sp_digest_bitlen == 0) {
return (CRYPTO_MECHANISM_PARAM_INVALID);
}
*result = param->sp_digest_bitlen;
} else {
switch (mechanism->cm_type) {
case SKEIN_256_MECH_INFO_TYPE:
*result = 256;
break;
case SKEIN_512_MECH_INFO_TYPE:
*result = 512;
break;
case SKEIN1024_MECH_INFO_TYPE:
*result = 1024;
break;
default:
return (CRYPTO_MECHANISM_INVALID);
}
}
return (CRYPTO_SUCCESS);
}
int
skein_mod_init(void)
{
int error;
if ((error = mod_install(&modlinkage)) != 0)
return (error);
/*
* Try to register with KCF - failure shouldn't unload us, since we
* still may want to continue providing misc/skein functionality.
*/
(void) crypto_register_provider(&skein_prov_info, &skein_prov_handle);
return (0);
}
int
skein_mod_fini(void)
{
int ret;
if (skein_prov_handle != 0) {
if ((ret = crypto_unregister_provider(skein_prov_handle)) !=
CRYPTO_SUCCESS) {
cmn_err(CE_WARN,
"skein _fini: crypto_unregister_provider() "
"failed (0x%x)", ret);
return (EBUSY);
}
skein_prov_handle = 0;
}
return (mod_remove(&modlinkage));
}
/*
* KCF software provider control entry points.
*/
/* ARGSUSED */
static void
skein_provider_status(crypto_provider_handle_t provider, uint_t *status)
{
*status = CRYPTO_PROVIDER_READY;
}
/*
* General Skein hashing helper functions.
*/
/*
* Performs an Update on a context with uio input data.
*/
static int
skein_digest_update_uio(skein_ctx_t *ctx, const crypto_data_t *data)
{
off_t offset = data->cd_offset;
size_t length = data->cd_length;
uint_t vec_idx = 0;
size_t cur_len;
zfs_uio_t *uio = data->cd_uio;
/* we support only kernel buffer */
if (zfs_uio_segflg(uio) != UIO_SYSSPACE)
return (CRYPTO_ARGUMENTS_BAD);
/*
* Jump to the first iovec containing data to be
* digested.
*/
offset = zfs_uio_index_at_offset(uio, offset, &vec_idx);
if (vec_idx == zfs_uio_iovcnt(uio)) {
/*
* The caller specified an offset that is larger than the
* total size of the buffers it provided.
*/
return (CRYPTO_DATA_LEN_RANGE);
}
/*
* Now do the digesting on the iovecs.
*/
while (vec_idx < zfs_uio_iovcnt(uio) && length > 0) {
cur_len = MIN(zfs_uio_iovlen(uio, vec_idx) - offset, length);
SKEIN_OP(ctx, Update, (uint8_t *)zfs_uio_iovbase(uio, vec_idx)
+ offset, cur_len);
length -= cur_len;
vec_idx++;
offset = 0;
}
if (vec_idx == zfs_uio_iovcnt(uio) && length > 0) {
/*
* The end of the specified iovec's was reached but
* the length requested could not be processed, i.e.
* The caller requested to digest more data than it provided.
*/
return (CRYPTO_DATA_LEN_RANGE);
}
return (CRYPTO_SUCCESS);
}
/*
* Performs a Final on a context and writes to a uio digest output.
*/
static int
skein_digest_final_uio(skein_ctx_t *ctx, crypto_data_t *digest,
crypto_req_handle_t req)
{
off_t offset = digest->cd_offset;
uint_t vec_idx = 0;
zfs_uio_t *uio = digest->cd_uio;
/* we support only kernel buffer */
if (zfs_uio_segflg(uio) != UIO_SYSSPACE)
return (CRYPTO_ARGUMENTS_BAD);
/*
* Jump to the first iovec containing ptr to the digest to be returned.
*/
offset = zfs_uio_index_at_offset(uio, offset, &vec_idx);
if (vec_idx == zfs_uio_iovcnt(uio)) {
/*
* The caller specified an offset that is larger than the
* total size of the buffers it provided.
*/
return (CRYPTO_DATA_LEN_RANGE);
}
if (offset + CRYPTO_BITS2BYTES(ctx->sc_digest_bitlen) <=
zfs_uio_iovlen(uio, vec_idx)) {
/* The computed digest will fit in the current iovec. */
SKEIN_OP(ctx, Final,
(uchar_t *)zfs_uio_iovbase(uio, vec_idx) + offset);
} else {
uint8_t *digest_tmp;
off_t scratch_offset = 0;
size_t length = CRYPTO_BITS2BYTES(ctx->sc_digest_bitlen);
size_t cur_len;
digest_tmp = kmem_alloc(CRYPTO_BITS2BYTES(
ctx->sc_digest_bitlen), crypto_kmflag(req));
if (digest_tmp == NULL)
return (CRYPTO_HOST_MEMORY);
SKEIN_OP(ctx, Final, digest_tmp);
while (vec_idx < zfs_uio_iovcnt(uio) && length > 0) {
cur_len = MIN(zfs_uio_iovlen(uio, vec_idx) - offset,
length);
bcopy(digest_tmp + scratch_offset,
zfs_uio_iovbase(uio, vec_idx) + offset, cur_len);
length -= cur_len;
vec_idx++;
scratch_offset += cur_len;
offset = 0;
}
kmem_free(digest_tmp, CRYPTO_BITS2BYTES(ctx->sc_digest_bitlen));
if (vec_idx == zfs_uio_iovcnt(uio) && length > 0) {
/*
* The end of the specified iovec's was reached but
* the length requested could not be processed, i.e.
* The caller requested to digest more data than it
* provided.
*/
return (CRYPTO_DATA_LEN_RANGE);
}
}
return (CRYPTO_SUCCESS);
}
/*
* KCF software provider digest entry points.
*/
/*
* Initializes a skein digest context to the configuration in `mechanism'.
* The mechanism cm_type must be one of SKEIN_*_MECH_INFO_TYPE. The cm_param
* field may contain a skein_param_t structure indicating the length of the
* digest the algorithm should produce. Otherwise the default output lengths
* are applied (32 bytes for Skein-256, 64 bytes for Skein-512 and 128 bytes
* for Skein-1024).
*/
static int
skein_digest_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
crypto_req_handle_t req)
{
int error = CRYPTO_SUCCESS;
if (!VALID_SKEIN_DIGEST_MECH(mechanism->cm_type))
return (CRYPTO_MECHANISM_INVALID);
SKEIN_CTX_LVALUE(ctx) = kmem_alloc(sizeof (*SKEIN_CTX(ctx)),
crypto_kmflag(req));
if (SKEIN_CTX(ctx) == NULL)
return (CRYPTO_HOST_MEMORY);
SKEIN_CTX(ctx)->sc_mech_type = mechanism->cm_type;
error = skein_get_digest_bitlen(mechanism,
&SKEIN_CTX(ctx)->sc_digest_bitlen);
if (error != CRYPTO_SUCCESS)
goto errout;
SKEIN_OP(SKEIN_CTX(ctx), Init, SKEIN_CTX(ctx)->sc_digest_bitlen);
return (CRYPTO_SUCCESS);
errout:
bzero(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx)));
kmem_free(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx)));
SKEIN_CTX_LVALUE(ctx) = NULL;
return (error);
}
/*
* Executes a skein_update and skein_digest on a pre-initialized crypto
* context in a single step. See the documentation to these functions to
* see what to pass here.
*/
static int
skein_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest,
crypto_req_handle_t req)
{
int error = CRYPTO_SUCCESS;
ASSERT(SKEIN_CTX(ctx) != NULL);
if (digest->cd_length <
CRYPTO_BITS2BYTES(SKEIN_CTX(ctx)->sc_digest_bitlen)) {
digest->cd_length =
CRYPTO_BITS2BYTES(SKEIN_CTX(ctx)->sc_digest_bitlen);
return (CRYPTO_BUFFER_TOO_SMALL);
}
error = skein_update(ctx, data, req);
if (error != CRYPTO_SUCCESS) {
bzero(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx)));
kmem_free(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx)));
SKEIN_CTX_LVALUE(ctx) = NULL;
digest->cd_length = 0;
return (error);
}
error = skein_final(ctx, digest, req);
return (error);
}
/*
* Performs a skein Update with the input message in `data' (successive calls
* can push more data). This is used both for digest and MAC operation.
* Supported input data formats are raw, uio and mblk.
*/
/*ARGSUSED*/
static int
skein_update(crypto_ctx_t *ctx, crypto_data_t *data, crypto_req_handle_t req)
{
int error = CRYPTO_SUCCESS;
ASSERT(SKEIN_CTX(ctx) != NULL);
switch (data->cd_format) {
case CRYPTO_DATA_RAW:
SKEIN_OP(SKEIN_CTX(ctx), Update,
(uint8_t *)data->cd_raw.iov_base + data->cd_offset,
data->cd_length);
break;
case CRYPTO_DATA_UIO:
error = skein_digest_update_uio(SKEIN_CTX(ctx), data);
break;
default:
error = CRYPTO_ARGUMENTS_BAD;
}
return (error);
}
/*
* Performs a skein Final, writing the output to `digest'. This is used both
* for digest and MAC operation.
* Supported output digest formats are raw, uio and mblk.
*/
/*ARGSUSED*/
static int
skein_final(crypto_ctx_t *ctx, crypto_data_t *digest, crypto_req_handle_t req)
{
int error = CRYPTO_SUCCESS;
ASSERT(SKEIN_CTX(ctx) != NULL);
if (digest->cd_length <
CRYPTO_BITS2BYTES(SKEIN_CTX(ctx)->sc_digest_bitlen)) {
digest->cd_length =
CRYPTO_BITS2BYTES(SKEIN_CTX(ctx)->sc_digest_bitlen);
return (CRYPTO_BUFFER_TOO_SMALL);
}
switch (digest->cd_format) {
case CRYPTO_DATA_RAW:
SKEIN_OP(SKEIN_CTX(ctx), Final,
(uint8_t *)digest->cd_raw.iov_base + digest->cd_offset);
break;
case CRYPTO_DATA_UIO:
error = skein_digest_final_uio(SKEIN_CTX(ctx), digest, req);
break;
default:
error = CRYPTO_ARGUMENTS_BAD;
}
if (error == CRYPTO_SUCCESS)
digest->cd_length =
CRYPTO_BITS2BYTES(SKEIN_CTX(ctx)->sc_digest_bitlen);
else
digest->cd_length = 0;
bzero(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx)));
kmem_free(SKEIN_CTX(ctx), sizeof (*(SKEIN_CTX(ctx))));
SKEIN_CTX_LVALUE(ctx) = NULL;
return (error);
}
/*
* Performs a full skein digest computation in a single call, configuring the
* algorithm according to `mechanism', reading the input to be digested from
* `data' and writing the output to `digest'.
* Supported input/output formats are raw, uio and mblk.
*/
/*ARGSUSED*/
static int
skein_digest_atomic(crypto_provider_handle_t provider,
crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
crypto_data_t *data, crypto_data_t *digest, crypto_req_handle_t req)
{
int error;
skein_ctx_t skein_ctx;
crypto_ctx_t ctx;
SKEIN_CTX_LVALUE(&ctx) = &skein_ctx;
/* Init */
if (!VALID_SKEIN_DIGEST_MECH(mechanism->cm_type))
return (CRYPTO_MECHANISM_INVALID);
skein_ctx.sc_mech_type = mechanism->cm_type;
error = skein_get_digest_bitlen(mechanism, &skein_ctx.sc_digest_bitlen);
if (error != CRYPTO_SUCCESS)
goto out;
SKEIN_OP(&skein_ctx, Init, skein_ctx.sc_digest_bitlen);
if ((error = skein_update(&ctx, data, digest)) != CRYPTO_SUCCESS)
goto out;
if ((error = skein_final(&ctx, data, digest)) != CRYPTO_SUCCESS)
goto out;
out:
if (error == CRYPTO_SUCCESS)
digest->cd_length =
CRYPTO_BITS2BYTES(skein_ctx.sc_digest_bitlen);
else
digest->cd_length = 0;
bzero(&skein_ctx, sizeof (skein_ctx));
return (error);
}
/*
* Helper function that builds a Skein MAC context from the provided
* mechanism and key.
*/
static int
skein_mac_ctx_build(skein_ctx_t *ctx, crypto_mechanism_t *mechanism,
crypto_key_t *key)
{
int error;
if (!VALID_SKEIN_MAC_MECH(mechanism->cm_type))
return (CRYPTO_MECHANISM_INVALID);
if (key->ck_format != CRYPTO_KEY_RAW)
return (CRYPTO_ARGUMENTS_BAD);
ctx->sc_mech_type = mechanism->cm_type;
error = skein_get_digest_bitlen(mechanism, &ctx->sc_digest_bitlen);
if (error != CRYPTO_SUCCESS)
return (error);
SKEIN_OP(ctx, InitExt, ctx->sc_digest_bitlen, 0, key->ck_data,
CRYPTO_BITS2BYTES(key->ck_length));
return (CRYPTO_SUCCESS);
}
/*
* KCF software provide mac entry points.
*/
/*
* Initializes a skein MAC context. You may pass a ctx_template, in which
* case the template will be reused to make initialization more efficient.
* Otherwise a new context will be constructed. The mechanism cm_type must
* be one of SKEIN_*_MAC_MECH_INFO_TYPE. Same as in skein_digest_init, you
* may pass a skein_param_t in cm_param to configure the length of the
* digest. The key must be in raw format.
*/
static int
skein_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
crypto_req_handle_t req)
{
int error;
SKEIN_CTX_LVALUE(ctx) = kmem_alloc(sizeof (*SKEIN_CTX(ctx)),
crypto_kmflag(req));
if (SKEIN_CTX(ctx) == NULL)
return (CRYPTO_HOST_MEMORY);
if (ctx_template != NULL) {
bcopy(ctx_template, SKEIN_CTX(ctx),
sizeof (*SKEIN_CTX(ctx)));
} else {
error = skein_mac_ctx_build(SKEIN_CTX(ctx), mechanism, key);
if (error != CRYPTO_SUCCESS)
goto errout;
}
return (CRYPTO_SUCCESS);
errout:
bzero(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx)));
kmem_free(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx)));
return (error);
}
/*
* The MAC update and final calls are reused from the regular digest code.
*/
/*ARGSUSED*/
/*
* Same as skein_digest_atomic, performs an atomic Skein MAC operation in
* one step. All the same properties apply to the arguments of this
* function as to those of the partial operations above.
*/
static int
skein_mac_atomic(crypto_provider_handle_t provider,
crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
{
/* faux crypto context just for skein_digest_{update,final} */
int error;
crypto_ctx_t ctx;
skein_ctx_t skein_ctx;
SKEIN_CTX_LVALUE(&ctx) = &skein_ctx;
if (ctx_template != NULL) {
bcopy(ctx_template, &skein_ctx, sizeof (skein_ctx));
} else {
error = skein_mac_ctx_build(&skein_ctx, mechanism, key);
if (error != CRYPTO_SUCCESS)
goto errout;
}
if ((error = skein_update(&ctx, data, req)) != CRYPTO_SUCCESS)
goto errout;
if ((error = skein_final(&ctx, mac, req)) != CRYPTO_SUCCESS)
goto errout;
return (CRYPTO_SUCCESS);
errout:
bzero(&skein_ctx, sizeof (skein_ctx));
return (error);
}
/*
* KCF software provider context management entry points.
*/
/*
* Constructs a context template for the Skein MAC algorithm. The same
* properties apply to the arguments of this function as to those of
* skein_mac_init.
*/
/*ARGSUSED*/
static int
skein_create_ctx_template(crypto_provider_handle_t provider,
crypto_mechanism_t *mechanism, crypto_key_t *key,
crypto_spi_ctx_template_t *ctx_template, size_t *ctx_template_size,
crypto_req_handle_t req)
{
int error;
skein_ctx_t *ctx_tmpl;
ctx_tmpl = kmem_alloc(sizeof (*ctx_tmpl), crypto_kmflag(req));
if (ctx_tmpl == NULL)
return (CRYPTO_HOST_MEMORY);
error = skein_mac_ctx_build(ctx_tmpl, mechanism, key);
if (error != CRYPTO_SUCCESS)
goto errout;
*ctx_template = ctx_tmpl;
*ctx_template_size = sizeof (*ctx_tmpl);
return (CRYPTO_SUCCESS);
errout:
bzero(ctx_tmpl, sizeof (*ctx_tmpl));
kmem_free(ctx_tmpl, sizeof (*ctx_tmpl));
return (error);
}
/*
* Frees a skein context in a parent crypto context.
*/
static int
skein_free_context(crypto_ctx_t *ctx)
{
if (SKEIN_CTX(ctx) != NULL) {
bzero(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx)));
kmem_free(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx)));
SKEIN_CTX_LVALUE(ctx) = NULL;
}
return (CRYPTO_SUCCESS);
}
diff --git a/sys/contrib/openzfs/module/lua/llex.c b/sys/contrib/openzfs/module/lua/llex.c
index 50c301f599f1..0c3488a551f6 100644
--- a/sys/contrib/openzfs/module/lua/llex.c
+++ b/sys/contrib/openzfs/module/lua/llex.c
@@ -1,531 +1,531 @@
/* BEGIN CSTYLED */
/*
** $Id: llex.c,v 2.63.1.3 2015/02/09 17:56:34 roberto Exp $
** Lexical Analyzer
** See Copyright Notice in lua.h
*/
#define llex_c
#define LUA_CORE
#include <sys/lua/lua.h>
#include "lctype.h"
#include "ldo.h"
#include "llex.h"
#include "lobject.h"
#include "lparser.h"
#include "lstate.h"
#include "lstring.h"
#include "ltable.h"
#include "lzio.h"
#define next(ls) (ls->current = zgetc(ls->z))
#define currIsNewline(ls) (ls->current == '\n' || ls->current == '\r')
/* ORDER RESERVED */
static const char *const luaX_tokens [] = {
"and", "break", "do", "else", "elseif",
"end", "false", "for", "function", "goto", "if",
"in", "local", "nil", "not", "or", "repeat",
"return", "then", "true", "until", "while",
"..", "...", "==", ">=", "<=", "~=", "::", "<eof>",
"<number>", "<name>", "<string>"
};
#define save_and_next(ls) (save(ls, ls->current), next(ls))
static l_noret lexerror (LexState *ls, const char *msg, int token);
static void save (LexState *ls, int c) {
Mbuffer *b = ls->buff;
if (luaZ_bufflen(b) + 1 > luaZ_sizebuffer(b)) {
size_t newsize;
if (luaZ_sizebuffer(b) >= MAX_SIZET/2)
lexerror(ls, "lexical element too long", 0);
newsize = luaZ_sizebuffer(b) * 2;
luaZ_resizebuffer(ls->L, b, newsize);
}
b->buffer[luaZ_bufflen(b)++] = cast(char, c);
}
void luaX_init (lua_State *L) {
int i;
for (i=0; i<NUM_RESERVED; i++) {
TString *ts = luaS_new(L, luaX_tokens[i]);
luaS_fix(ts); /* reserved words are never collected */
ts->tsv.extra = cast_byte(i+1); /* reserved word */
}
}
const char *luaX_token2str (LexState *ls, int token) {
if (token < FIRST_RESERVED) { /* single-byte symbols? */
lua_assert(token == cast(unsigned char, token));
return (lisprint(token)) ? luaO_pushfstring(ls->L, LUA_QL("%c"), token) :
luaO_pushfstring(ls->L, "char(%d)", token);
}
else {
const char *s = luaX_tokens[token - FIRST_RESERVED];
if (token < TK_EOS) /* fixed format (symbols and reserved words)? */
return luaO_pushfstring(ls->L, LUA_QS, s);
else /* names, strings, and numerals */
return s;
}
}
static const char *txtToken (LexState *ls, int token) {
switch (token) {
case TK_NAME:
case TK_STRING:
case TK_NUMBER:
save(ls, '\0');
return luaO_pushfstring(ls->L, LUA_QS, luaZ_buffer(ls->buff));
default:
return luaX_token2str(ls, token);
}
}
static l_noret lexerror (LexState *ls, const char *msg, int token) {
char buff[LUA_IDSIZE];
luaO_chunkid(buff, getstr(ls->source), LUA_IDSIZE);
msg = luaO_pushfstring(ls->L, "%s:%d: %s", buff, ls->linenumber, msg);
if (token)
luaO_pushfstring(ls->L, "%s near %s", msg, txtToken(ls, token));
luaD_throw(ls->L, LUA_ERRSYNTAX);
}
l_noret luaX_syntaxerror (LexState *ls, const char *msg) {
lexerror(ls, msg, ls->t.token);
}
/*
** creates a new string and anchors it in function's table so that
** it will not be collected until the end of the function's compilation
** (by that time it should be anchored in function's prototype)
*/
TString *luaX_newstring (LexState *ls, const char *str, size_t l) {
lua_State *L = ls->L;
TValue *o; /* entry for `str' */
TString *ts = luaS_newlstr(L, str, l); /* create new string */
setsvalue2s(L, L->top++, ts); /* temporarily anchor it in stack */
o = luaH_set(L, ls->fs->h, L->top - 1);
if (ttisnil(o)) { /* not in use yet? (see 'addK') */
/* boolean value does not need GC barrier;
table has no metatable, so it does not need to invalidate cache */
setbvalue(o, 1); /* t[string] = true */
luaC_checkGC(L);
}
else { /* string already present */
ts = rawtsvalue(keyfromval(o)); /* re-use value previously stored */
}
L->top--; /* remove string from stack */
return ts;
}
/*
** increment line number and skips newline sequence (any of
** \n, \r, \n\r, or \r\n)
*/
static void inclinenumber (LexState *ls) {
int old = ls->current;
lua_assert(currIsNewline(ls));
next(ls); /* skip `\n' or `\r' */
if (currIsNewline(ls) && ls->current != old)
next(ls); /* skip `\n\r' or `\r\n' */
if (++ls->linenumber >= MAX_INT)
lexerror(ls, "chunk has too many lines", 0);
}
void luaX_setinput (lua_State *L, LexState *ls, ZIO *z, TString *source,
int firstchar) {
ls->decpoint = '.';
ls->L = L;
ls->current = firstchar;
ls->lookahead.token = TK_EOS; /* no look-ahead token */
ls->z = z;
ls->fs = NULL;
ls->linenumber = 1;
ls->lastline = 1;
ls->source = source;
ls->envn = luaS_new(L, LUA_ENV); /* create env name */
luaS_fix(ls->envn); /* never collect this name */
luaZ_resizebuffer(ls->L, ls->buff, LUA_MINBUFFER); /* initialize buffer */
}
/*
** =======================================================
** LEXICAL ANALYZER
** =======================================================
*/
static int check_next (LexState *ls, const char *set) {
if (ls->current == '\0' || !strchr(set, ls->current))
return 0;
save_and_next(ls);
return 1;
}
/*
** change all characters 'from' in buffer to 'to'
*/
static void buffreplace (LexState *ls, char from, char to) {
size_t n = luaZ_bufflen(ls->buff);
char *p = luaZ_buffer(ls->buff);
while (n--)
if (p[n] == from) p[n] = to;
}
#if !defined(getlocaledecpoint)
#define getlocaledecpoint() (localeconv()->decimal_point[0])
#endif
#define buff2d(b,e) luaO_str2d(luaZ_buffer(b), luaZ_bufflen(b) - 1, e)
/*
** in case of format error, try to change decimal point separator to
** the one defined in the current locale and check again
*/
static void trydecpoint (LexState *ls, SemInfo *seminfo) {
char old = ls->decpoint;
ls->decpoint = getlocaledecpoint();
buffreplace(ls, old, ls->decpoint); /* try new decimal separator */
if (!buff2d(ls->buff, &seminfo->r)) {
/* format error with correct decimal point: no more options */
buffreplace(ls, ls->decpoint, '.'); /* undo change (for error message) */
lexerror(ls, "malformed number", TK_NUMBER);
}
}
/* LUA_NUMBER */
/*
** this function is quite liberal in what it accepts, as 'luaO_str2d'
** will reject ill-formed numerals.
*/
static void read_numeral (LexState *ls, SemInfo *seminfo) {
const char *expo = "Ee";
int first = ls->current;
lua_assert(lisdigit(ls->current));
save_and_next(ls);
if (first == '0' && check_next(ls, "Xx")) /* hexadecimal? */
expo = "Pp";
for (;;) {
if (check_next(ls, expo)) /* exponent part? */
(void) check_next(ls, "+-"); /* optional exponent sign */
if (lisxdigit(ls->current) || ls->current == '.')
save_and_next(ls);
else break;
}
save(ls, '\0');
buffreplace(ls, '.', ls->decpoint); /* follow locale for decimal point */
if (!buff2d(ls->buff, &seminfo->r)) /* format error? */
trydecpoint(ls, seminfo); /* try to update decimal point separator */
}
/*
** skip a sequence '[=*[' or ']=*]' and return its number of '='s or
** -1 if sequence is malformed
*/
static int skip_sep (LexState *ls) {
int count = 0;
int s = ls->current;
lua_assert(s == '[' || s == ']');
save_and_next(ls);
while (ls->current == '=') {
save_and_next(ls);
count++;
}
return (ls->current == s) ? count : (-count) - 1;
}
static void read_long_string (LexState *ls, SemInfo *seminfo, int sep) {
save_and_next(ls); /* skip 2nd `[' */
if (currIsNewline(ls)) /* string starts with a newline? */
inclinenumber(ls); /* skip it */
for (;;) {
switch (ls->current) {
case EOZ:
lexerror(ls, (seminfo) ? "unfinished long string" :
"unfinished long comment", TK_EOS);
break; /* to avoid warnings */
case ']': {
if (skip_sep(ls) == sep) {
save_and_next(ls); /* skip 2nd `]' */
goto endloop;
}
break;
}
case '\n': case '\r': {
save(ls, '\n');
inclinenumber(ls);
if (!seminfo) luaZ_resetbuffer(ls->buff); /* avoid wasting space */
break;
}
default: {
if (seminfo) save_and_next(ls);
else next(ls);
}
}
} endloop:
if (seminfo)
seminfo->ts = luaX_newstring(ls, luaZ_buffer(ls->buff) + (2 + sep),
luaZ_bufflen(ls->buff) - 2*(2 + sep));
}
static void escerror (LexState *ls, int *c, int n, const char *msg) {
int i;
luaZ_resetbuffer(ls->buff); /* prepare error message */
save(ls, '\\');
for (i = 0; i < n && c[i] != EOZ; i++)
save(ls, c[i]);
lexerror(ls, msg, TK_STRING);
}
static int readhexaesc (LexState *ls) {
int c[3], i; /* keep input for error message */
int r = 0; /* result accumulator */
c[0] = 'x'; /* for error message */
for (i = 1; i < 3; i++) { /* read two hexadecimal digits */
c[i] = next(ls);
if (!lisxdigit(c[i]))
escerror(ls, c, i + 1, "hexadecimal digit expected");
r = (r << 4) + luaO_hexavalue(c[i]);
}
return r;
}
static int readdecesc (LexState *ls) {
int c[3], i;
int r = 0; /* result accumulator */
for (i = 0; i < 3 && lisdigit(ls->current); i++) { /* read up to 3 digits */
c[i] = ls->current;
r = 10*r + c[i] - '0';
next(ls);
}
if (r > UCHAR_MAX)
escerror(ls, c, i, "decimal escape too large");
return r;
}
static void read_string (LexState *ls, int del, SemInfo *seminfo) {
save_and_next(ls); /* keep delimiter (for error messages) */
while (ls->current != del) {
switch (ls->current) {
case EOZ:
lexerror(ls, "unfinished string", TK_EOS);
break; /* to avoid warnings */
case '\n':
case '\r':
lexerror(ls, "unfinished string", TK_STRING);
break; /* to avoid warnings */
case '\\': { /* escape sequences */
int c; /* final character to be saved */
next(ls); /* do not save the `\' */
switch (ls->current) {
case 'a': c = '\a'; goto read_save;
case 'b': c = '\b'; goto read_save;
case 'f': c = '\f'; goto read_save;
case 'n': c = '\n'; goto read_save;
case 'r': c = '\r'; goto read_save;
case 't': c = '\t'; goto read_save;
case 'v': c = '\v'; goto read_save;
case 'x': c = readhexaesc(ls); goto read_save;
case '\n': case '\r':
inclinenumber(ls); c = '\n'; goto only_save;
case '\\': case '\"': case '\'':
c = ls->current; goto read_save;
case EOZ: goto no_save; /* will raise an error next loop */
case 'z': { /* zap following span of spaces */
next(ls); /* skip the 'z' */
while (lisspace(ls->current)) {
if (currIsNewline(ls)) inclinenumber(ls);
else next(ls);
}
goto no_save;
}
default: {
if (!lisdigit(ls->current))
escerror(ls, &ls->current, 1, "invalid escape sequence");
/* digital escape \ddd */
c = readdecesc(ls);
goto only_save;
}
}
read_save: next(ls); /* read next character */
only_save: save(ls, c); /* save 'c' */
no_save: break;
}
default:
save_and_next(ls);
}
}
save_and_next(ls); /* skip delimiter */
seminfo->ts = luaX_newstring(ls, luaZ_buffer(ls->buff) + 1,
luaZ_bufflen(ls->buff) - 2);
}
static int llex (LexState *ls, SemInfo *seminfo) {
luaZ_resetbuffer(ls->buff);
for (;;) {
switch (ls->current) {
case '\n': case '\r': { /* line breaks */
inclinenumber(ls);
break;
}
case ' ': case '\f': case '\t': case '\v': { /* spaces */
next(ls);
break;
}
case '-': { /* '-' or '--' (comment) */
next(ls);
if (ls->current != '-') return '-';
/* else is a comment */
next(ls);
if (ls->current == '[') { /* long comment? */
int sep = skip_sep(ls);
luaZ_resetbuffer(ls->buff); /* `skip_sep' may dirty the buffer */
if (sep >= 0) {
read_long_string(ls, NULL, sep); /* skip long comment */
luaZ_resetbuffer(ls->buff); /* previous call may dirty the buff. */
break;
}
}
/* else short comment */
while (!currIsNewline(ls) && ls->current != EOZ)
next(ls); /* skip until end of line (or end of file) */
break;
}
case '[': { /* long string or simply '[' */
int sep = skip_sep(ls);
if (sep >= 0) {
read_long_string(ls, seminfo, sep);
return TK_STRING;
} else if (sep == -1) {
return '[';
} else {
lexerror(ls, "invalid long string delimiter", TK_STRING);
break;
}
}
case '=': {
next(ls);
if (ls->current != '=') return '=';
else { next(ls); return TK_EQ; }
}
case '<': {
next(ls);
if (ls->current != '=') return '<';
else { next(ls); return TK_LE; }
}
case '>': {
next(ls);
if (ls->current != '=') return '>';
else { next(ls); return TK_GE; }
}
case '~': {
next(ls);
if (ls->current != '=') return '~';
else { next(ls); return TK_NE; }
}
case ':': {
next(ls);
if (ls->current != ':') return ':';
else { next(ls); return TK_DBCOLON; }
}
case '"': case '\'': { /* short literal strings */
read_string(ls, ls->current, seminfo);
return TK_STRING;
}
case '.': { /* '.', '..', '...', or number */
save_and_next(ls);
if (check_next(ls, ".")) {
if (check_next(ls, "."))
return TK_DOTS; /* '...' */
else return TK_CONCAT; /* '..' */
}
else if (!lisdigit(ls->current)) return '.';
/* else go through */
}
- /* FALLTHROUGH */
+ /* FALLTHROUGH */
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9': {
read_numeral(ls, seminfo);
return TK_NUMBER;
}
case EOZ: {
return TK_EOS;
}
default: {
if (lislalpha(ls->current)) { /* identifier or reserved word? */
TString *ts;
do {
save_and_next(ls);
} while (lislalnum(ls->current));
ts = luaX_newstring(ls, luaZ_buffer(ls->buff),
luaZ_bufflen(ls->buff));
seminfo->ts = ts;
if (isreserved(ts)) /* reserved word? */
return ts->tsv.extra - 1 + FIRST_RESERVED;
else {
return TK_NAME;
}
}
else { /* single-char tokens (+ - / ...) */
int c = ls->current;
next(ls);
return c;
}
}
}
}
}
void luaX_next (LexState *ls) {
ls->lastline = ls->linenumber;
if (ls->lookahead.token != TK_EOS) { /* is there a look-ahead token? */
ls->t = ls->lookahead; /* use this one */
ls->lookahead.token = TK_EOS; /* and discharge it */
}
else
ls->t.token = llex(ls, &ls->t.seminfo); /* read next token */
}
int luaX_lookahead (LexState *ls) {
lua_assert(ls->lookahead.token == TK_EOS);
ls->lookahead.token = llex(ls, &ls->lookahead.seminfo);
return ls->lookahead.token;
}
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/lua/ltable.c b/sys/contrib/openzfs/module/lua/ltable.c
index f60418721bef..0ba462cfd885 100644
--- a/sys/contrib/openzfs/module/lua/ltable.c
+++ b/sys/contrib/openzfs/module/lua/ltable.c
@@ -1,592 +1,592 @@
/* BEGIN CSTYLED */
/*
** $Id: ltable.c,v 2.72.1.1 2013/04/12 18:48:47 roberto Exp $
** Lua tables (hash)
** See Copyright Notice in lua.h
*/
/*
** Implementation of tables (aka arrays, objects, or hash tables).
** Tables keep its elements in two parts: an array part and a hash part.
** Non-negative integer keys are all candidates to be kept in the array
** part. The actual size of the array is the largest `n' such that at
** least half the slots between 0 and n are in use.
** Hash uses a mix of chained scatter table with Brent's variation.
** A main invariant of these tables is that, if an element is not
** in its main position (i.e. the `original' position that its hash gives
** to it), then the colliding element is in its own main position.
** Hence even when the load factor reaches 100%, performance remains good.
*/
#define ltable_c
#define LUA_CORE
#include <sys/lua/lua.h>
#include "ldebug.h"
#include "ldo.h"
#include "lgc.h"
#include "lmem.h"
#include "lobject.h"
#include "lstate.h"
#include "lstring.h"
#include "ltable.h"
#include "lvm.h"
/*
** max size of array part is 2^MAXBITS
*/
#if LUAI_BITSINT >= 32
#define MAXBITS 30
#else
#define MAXBITS (LUAI_BITSINT-2)
#endif
#define MAXASIZE (1 << MAXBITS)
#define hashpow2(t,n) (gnode(t, lmod((n), sizenode(t))))
#define hashstr(t,str) hashpow2(t, (str)->tsv.hash)
#define hashboolean(t,p) hashpow2(t, p)
/*
** for some types, it is better to avoid modulus by power of 2, as
** they tend to have many 2 factors.
*/
#define hashmod(t,n) (gnode(t, ((n) % ((sizenode(t)-1)|1))))
#define hashpointer(t,p) hashmod(t, IntPoint(p))
#define dummynode (&dummynode_)
#define isdummy(n) ((n) == dummynode)
static const Node dummynode_ = {
{NILCONSTANT}, /* value */
{{NILCONSTANT, NULL}} /* key */
};
/*
** hash for lua_Numbers
*/
static Node *hashnum (const Table *t, lua_Number n) {
int i;
luai_hashnum(i, n);
if (i < 0) {
if (cast(unsigned int, i) == 0u - i) /* use unsigned to avoid overflows */
i = 0; /* handle INT_MIN */
i = -i; /* must be a positive value */
}
return hashmod(t, i);
}
/*
** returns the `main' position of an element in a table (that is, the index
** of its hash value)
*/
static Node *mainposition (const Table *t, const TValue *key) {
switch (ttype(key)) {
case LUA_TNUMBER:
return hashnum(t, nvalue(key));
case LUA_TLNGSTR: {
TString *s = rawtsvalue(key);
if (s->tsv.extra == 0) { /* no hash? */
s->tsv.hash = luaS_hash(getstr(s), s->tsv.len, s->tsv.hash);
s->tsv.extra = 1; /* now it has its hash */
}
return hashstr(t, rawtsvalue(key));
}
case LUA_TSHRSTR:
return hashstr(t, rawtsvalue(key));
case LUA_TBOOLEAN:
return hashboolean(t, bvalue(key));
case LUA_TLIGHTUSERDATA:
return hashpointer(t, pvalue(key));
case LUA_TLCF:
return hashpointer(t, fvalue(key));
default:
return hashpointer(t, gcvalue(key));
}
}
/*
** returns the index for `key' if `key' is an appropriate key to live in
** the array part of the table, -1 otherwise.
*/
static int arrayindex (const TValue *key) {
if (ttisnumber(key)) {
lua_Number n = nvalue(key);
int k;
lua_number2int(k, n);
if (luai_numeq(cast_num(k), n))
return k;
}
return -1; /* `key' did not match some condition */
}
/*
** returns the index of a `key' for table traversals. First goes all
** elements in the array part, then elements in the hash part. The
** beginning of a traversal is signaled by -1.
*/
static int findindex (lua_State *L, Table *t, StkId key) {
int i;
if (ttisnil(key)) return -1; /* first iteration */
i = arrayindex(key);
if (0 < i && i <= t->sizearray) /* is `key' inside array part? */
return i-1; /* yes; that's the index (corrected to C) */
else {
Node *n = mainposition(t, key);
for (;;) { /* check whether `key' is somewhere in the chain */
/* key may be dead already, but it is ok to use it in `next' */
if (luaV_rawequalobj(gkey(n), key) ||
(ttisdeadkey(gkey(n)) && iscollectable(key) &&
deadvalue(gkey(n)) == gcvalue(key))) {
i = cast_int(n - gnode(t, 0)); /* key index in hash table */
/* hash elements are numbered after array ones */
return i + t->sizearray;
}
else n = gnext(n);
if (n == NULL)
luaG_runerror(L, "invalid key to " LUA_QL("next")); /* key not found */
}
}
}
int luaH_next (lua_State *L, Table *t, StkId key) {
int i = findindex(L, t, key); /* find original element */
for (i++; i < t->sizearray; i++) { /* try first array part */
if (!ttisnil(&t->array[i])) { /* a non-nil value? */
setnvalue(key, cast_num(i+1));
setobj2s(L, key+1, &t->array[i]);
return 1;
}
}
for (i -= t->sizearray; i < sizenode(t); i++) { /* then hash part */
if (!ttisnil(gval(gnode(t, i)))) { /* a non-nil value? */
setobj2s(L, key, gkey(gnode(t, i)));
setobj2s(L, key+1, gval(gnode(t, i)));
return 1;
}
}
return 0; /* no more elements */
}
/*
** {=============================================================
** Rehash
** ==============================================================
*/
static int computesizes (int nums[], int *narray) {
int i;
int twotoi; /* 2^i */
int a = 0; /* number of elements smaller than 2^i */
int na = 0; /* number of elements to go to array part */
int n = 0; /* optimal size for array part */
for (i = 0, twotoi = 1; twotoi/2 < *narray; i++, twotoi *= 2) {
if (nums[i] > 0) {
a += nums[i];
if (a > twotoi/2) { /* more than half elements present? */
n = twotoi; /* optimal size (till now) */
na = a; /* all elements smaller than n will go to array part */
}
}
if (a == *narray) break; /* all elements already counted */
}
*narray = n;
lua_assert(*narray/2 <= na && na <= *narray);
return na;
}
static int countint (const TValue *key, int *nums) {
int k = arrayindex(key);
if (0 < k && k <= MAXASIZE) { /* is `key' an appropriate array index? */
nums[luaO_ceillog2(k)]++; /* count as such */
return 1;
}
else
return 0;
}
static int numusearray (const Table *t, int *nums) {
int lg;
int ttlg; /* 2^lg */
int ause = 0; /* summation of `nums' */
int i = 1; /* count to traverse all array keys */
for (lg=0, ttlg=1; lg<=MAXBITS; lg++, ttlg*=2) { /* for each slice */
int lc = 0; /* counter */
int lim = ttlg;
if (lim > t->sizearray) {
lim = t->sizearray; /* adjust upper limit */
if (i > lim)
break; /* no more elements to count */
}
/* count elements in range (2^(lg-1), 2^lg] */
for (; i <= lim; i++) {
if (!ttisnil(&t->array[i-1]))
lc++;
}
nums[lg] += lc;
ause += lc;
}
return ause;
}
static int numusehash (const Table *t, int *nums, int *pnasize) {
int totaluse = 0; /* total number of elements */
int ause = 0; /* summation of `nums' */
int i = sizenode(t);
while (i--) {
Node *n = &t->node[i];
if (!ttisnil(gval(n))) {
ause += countint(gkey(n), nums);
totaluse++;
}
}
*pnasize += ause;
return totaluse;
}
static void setarrayvector (lua_State *L, Table *t, int size) {
int i;
luaM_reallocvector(L, t->array, t->sizearray, size, TValue);
for (i=t->sizearray; i<size; i++)
setnilvalue(&t->array[i]);
t->sizearray = size;
}
static void setnodevector (lua_State *L, Table *t, int size) {
int lsize;
if (size == 0) { /* no elements to hash part? */
t->node = cast(Node *, dummynode); /* use common `dummynode' */
lsize = 0;
}
else {
int i;
lsize = luaO_ceillog2(size);
if (lsize > MAXBITS)
luaG_runerror(L, "table overflow");
size = twoto(lsize);
t->node = luaM_newvector(L, size, Node);
for (i=0; i<size; i++) {
Node *n = gnode(t, i);
gnext(n) = NULL;
setnilvalue(gkey(n));
setnilvalue(gval(n));
}
}
t->lsizenode = cast_byte(lsize);
t->lastfree = gnode(t, size); /* all positions are free */
}
void luaH_resize (lua_State *L, Table *t, int nasize, int nhsize) {
int i;
int oldasize = t->sizearray;
int oldhsize = t->lsizenode;
Node *nold = t->node; /* save old hash ... */
if (nasize > oldasize) /* array part must grow? */
setarrayvector(L, t, nasize);
/* create new hash part with appropriate size */
setnodevector(L, t, nhsize);
if (nasize < oldasize) { /* array part must shrink? */
t->sizearray = nasize;
/* re-insert elements from vanishing slice */
for (i=nasize; i<oldasize; i++) {
if (!ttisnil(&t->array[i]))
luaH_setint(L, t, i + 1, &t->array[i]);
}
/* shrink array */
luaM_reallocvector(L, t->array, oldasize, nasize, TValue);
}
/* re-insert elements from hash part */
for (i = twoto(oldhsize) - 1; i >= 0; i--) {
Node *old = nold+i;
if (!ttisnil(gval(old))) {
/* doesn't need barrier/invalidate cache, as entry was
already present in the table */
setobjt2t(L, luaH_set(L, t, gkey(old)), gval(old));
}
}
if (!isdummy(nold))
luaM_freearray(L, nold, cast(size_t, twoto(oldhsize))); /* free old array */
}
void luaH_resizearray (lua_State *L, Table *t, int nasize) {
int nsize = isdummy(t->node) ? 0 : sizenode(t);
luaH_resize(L, t, nasize, nsize);
}
static void rehash (lua_State *L, Table *t, const TValue *ek) {
int nasize, na;
int nums[MAXBITS+1]; /* nums[i] = number of keys with 2^(i-1) < k <= 2^i */
int i;
int totaluse;
for (i=0; i<=MAXBITS; i++) nums[i] = 0; /* reset counts */
nasize = numusearray(t, nums); /* count keys in array part */
totaluse = nasize; /* all those keys are integer keys */
totaluse += numusehash(t, nums, &nasize); /* count keys in hash part */
/* count extra key */
nasize += countint(ek, nums);
totaluse++;
/* compute new size for array part */
na = computesizes(nums, &nasize);
/* resize the table to new computed sizes */
luaH_resize(L, t, nasize, totaluse - na);
}
/*
** }=============================================================
*/
Table *luaH_new (lua_State *L) {
Table *t = &luaC_newobj(L, LUA_TTABLE, sizeof(Table), NULL, 0)->h;
t->metatable = NULL;
t->flags = cast_byte(~0);
t->array = NULL;
t->sizearray = 0;
setnodevector(L, t, 0);
return t;
}
void luaH_free (lua_State *L, Table *t) {
if (!isdummy(t->node))
luaM_freearray(L, t->node, cast(size_t, sizenode(t)));
luaM_freearray(L, t->array, t->sizearray);
luaM_free(L, t);
}
static Node *getfreepos (Table *t) {
while (t->lastfree > t->node) {
t->lastfree--;
if (ttisnil(gkey(t->lastfree)))
return t->lastfree;
}
return NULL; /* could not find a free place */
}
/*
** inserts a new key into a hash table; first, check whether key's main
** position is free. If not, check whether colliding node is in its main
** position or not: if it is not, move colliding node to an empty place and
** put new key in its main position; otherwise (colliding node is in its main
** position), new key goes to an empty position.
*/
TValue *luaH_newkey (lua_State *L, Table *t, const TValue *key) {
Node *mp;
if (ttisnil(key)) luaG_runerror(L, "table index is nil");
#if defined LUA_HAS_FLOAT_NUMBERS
else if (ttisnumber(key) && luai_numisnan(L, nvalue(key)))
luaG_runerror(L, "table index is NaN");
#endif
mp = mainposition(t, key);
if (!ttisnil(gval(mp)) || isdummy(mp)) { /* main position is taken? */
Node *othern;
Node *n = getfreepos(t); /* get a free place */
if (n == NULL) { /* cannot find a free place? */
rehash(L, t, key); /* grow table */
/* whatever called 'newkey' take care of TM cache and GC barrier */
return luaH_set(L, t, key); /* insert key into grown table */
}
lua_assert(!isdummy(n));
othern = mainposition(t, gkey(mp));
if (othern != mp) { /* is colliding node out of its main position? */
/* yes; move colliding node into free position */
while (gnext(othern) != mp) othern = gnext(othern); /* find previous */
gnext(othern) = n; /* redo the chain with `n' in place of `mp' */
*n = *mp; /* copy colliding node into free pos. (mp->next also goes) */
gnext(mp) = NULL; /* now `mp' is free */
setnilvalue(gval(mp));
}
else { /* colliding node is in its own main position */
/* new node will go into free position */
gnext(n) = gnext(mp); /* chain new position */
gnext(mp) = n;
mp = n;
}
}
setobj2t(L, gkey(mp), key);
luaC_barrierback(L, obj2gco(t), key);
lua_assert(ttisnil(gval(mp)));
return gval(mp);
}
/*
** search function for integers
*/
const TValue *luaH_getint (Table *t, int key) {
/* (1 <= key && key <= t->sizearray) */
if (cast(unsigned int, key-1) < cast(unsigned int, t->sizearray))
return &t->array[key-1];
else {
lua_Number nk = cast_num(key);
Node *n = hashnum(t, nk);
do { /* check whether `key' is somewhere in the chain */
if (ttisnumber(gkey(n)) && luai_numeq(nvalue(gkey(n)), nk))
return gval(n); /* that's it */
else n = gnext(n);
} while (n);
return luaO_nilobject;
}
}
/*
** search function for short strings
*/
const TValue *luaH_getstr (Table *t, TString *key) {
Node *n = hashstr(t, key);
lua_assert(key->tsv.tt == LUA_TSHRSTR);
do { /* check whether `key' is somewhere in the chain */
if (ttisshrstring(gkey(n)) && eqshrstr(rawtsvalue(gkey(n)), key))
return gval(n); /* that's it */
else n = gnext(n);
} while (n);
return luaO_nilobject;
}
/*
** main search function
*/
const TValue *luaH_get (Table *t, const TValue *key) {
switch (ttype(key)) {
case LUA_TSHRSTR: return luaH_getstr(t, rawtsvalue(key));
case LUA_TNIL: return luaO_nilobject;
case LUA_TNUMBER: {
int k;
lua_Number n = nvalue(key);
lua_number2int(k, n);
if (luai_numeq(cast_num(k), n)) /* index is int? */
return luaH_getint(t, k); /* use specialized version */
/* else go through */
}
- /* FALLTHROUGH */
+ /* FALLTHROUGH */
default: {
Node *n = mainposition(t, key);
do { /* check whether `key' is somewhere in the chain */
if (luaV_rawequalobj(gkey(n), key))
return gval(n); /* that's it */
else n = gnext(n);
} while (n);
return luaO_nilobject;
}
}
}
/*
** beware: when using this function you probably need to check a GC
** barrier and invalidate the TM cache.
*/
TValue *luaH_set (lua_State *L, Table *t, const TValue *key) {
const TValue *p = luaH_get(t, key);
if (p != luaO_nilobject)
return cast(TValue *, p);
else return luaH_newkey(L, t, key);
}
void luaH_setint (lua_State *L, Table *t, int key, TValue *value) {
const TValue *p = luaH_getint(t, key);
TValue *cell;
if (p != luaO_nilobject)
cell = cast(TValue *, p);
else {
TValue k;
setnvalue(&k, cast_num(key));
cell = luaH_newkey(L, t, &k);
}
setobj2t(L, cell, value);
}
static int unbound_search (Table *t, unsigned int j) {
unsigned int i = j; /* i is zero or a present index */
j++;
/* find `i' and `j' such that i is present and j is not */
while (!ttisnil(luaH_getint(t, j))) {
i = j;
j *= 2;
if (j > cast(unsigned int, MAX_INT)) { /* overflow? */
/* table was built with bad purposes: resort to linear search */
i = 1;
while (!ttisnil(luaH_getint(t, i))) i++;
return i - 1;
}
}
/* now do a binary search between them */
while (j - i > 1) {
unsigned int m = (i+j)/2;
if (ttisnil(luaH_getint(t, m))) j = m;
else i = m;
}
return i;
}
/*
** Try to find a boundary in table `t'. A `boundary' is an integer index
** such that t[i] is non-nil and t[i+1] is nil (and 0 if t[1] is nil).
*/
int luaH_getn (Table *t) {
unsigned int j = t->sizearray;
if (j > 0 && ttisnil(&t->array[j - 1])) {
/* there is a boundary in the array part: (binary) search for it */
unsigned int i = 0;
while (j - i > 1) {
unsigned int m = (i+j)/2;
if (ttisnil(&t->array[m - 1])) j = m;
else i = m;
}
return i;
}
/* else must find a boundary in hash part */
else if (isdummy(t->node)) /* hash part is empty? */
return j; /* that is easy... */
else return unbound_search(t, j);
}
#if defined(LUA_DEBUG)
Node *luaH_mainposition (const Table *t, const TValue *key) {
return mainposition(t, key);
}
int luaH_isdummy (Node *n) { return isdummy(n); }
#endif
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/abd_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/abd_os.c
index 95a83542fadc..8bc1ef1325e9 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/abd_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/abd_os.c
@@ -1,508 +1,508 @@
/*
* This file and its contents are supplied under the terms of the
* Common Development and Distribution License ("CDDL"), version 1.0.
* You may only use this file in accordance with the terms of version
* 1.0 of the CDDL.
*
* A full copy of the text of the CDDL should have accompanied this
* source. A copy of the CDDL is also available via the Internet at
* http://www.illumos.org/license/CDDL.
*/
/*
* Copyright (c) 2014 by Chunwei Chen. All rights reserved.
* Copyright (c) 2016 by Delphix. All rights reserved.
*/
/*
* See abd.c for a general overview of the arc buffered data (ABD).
*
* Using a large proportion of scattered ABDs decreases ARC fragmentation since
* when we are at the limit of allocatable space, using equal-size chunks will
* allow us to quickly reclaim enough space for a new large allocation (assuming
* it is also scattered).
*
* ABDs are allocated scattered by default unless the caller uses
* abd_alloc_linear() or zfs_abd_scatter_enabled is disabled.
*/
#include <sys/abd_impl.h>
#include <sys/param.h>
#include <sys/types.h>
#include <sys/zio.h>
#include <sys/zfs_context.h>
#include <sys/zfs_znode.h>
typedef struct abd_stats {
kstat_named_t abdstat_struct_size;
kstat_named_t abdstat_scatter_cnt;
kstat_named_t abdstat_scatter_data_size;
kstat_named_t abdstat_scatter_chunk_waste;
kstat_named_t abdstat_linear_cnt;
kstat_named_t abdstat_linear_data_size;
} abd_stats_t;
static abd_stats_t abd_stats = {
/* Amount of memory occupied by all of the abd_t struct allocations */
{ "struct_size", KSTAT_DATA_UINT64 },
/*
* The number of scatter ABDs which are currently allocated, excluding
* ABDs which don't own their data (for instance the ones which were
* allocated through abd_get_offset()).
*/
{ "scatter_cnt", KSTAT_DATA_UINT64 },
/* Amount of data stored in all scatter ABDs tracked by scatter_cnt */
{ "scatter_data_size", KSTAT_DATA_UINT64 },
/*
* The amount of space wasted at the end of the last chunk across all
* scatter ABDs tracked by scatter_cnt.
*/
{ "scatter_chunk_waste", KSTAT_DATA_UINT64 },
/*
* The number of linear ABDs which are currently allocated, excluding
* ABDs which don't own their data (for instance the ones which were
* allocated through abd_get_offset() and abd_get_from_buf()). If an
* ABD takes ownership of its buf then it will become tracked.
*/
{ "linear_cnt", KSTAT_DATA_UINT64 },
/* Amount of data stored in all linear ABDs tracked by linear_cnt */
{ "linear_data_size", KSTAT_DATA_UINT64 },
};
struct {
wmsum_t abdstat_struct_size;
wmsum_t abdstat_scatter_cnt;
wmsum_t abdstat_scatter_data_size;
wmsum_t abdstat_scatter_chunk_waste;
wmsum_t abdstat_linear_cnt;
wmsum_t abdstat_linear_data_size;
} abd_sums;
/*
* zfs_abd_scatter_min_size is the minimum allocation size to use scatter
* ABD's for. Smaller allocations will use linear ABD's which use
* zio_[data_]buf_alloc().
*
* Scatter ABD's use at least one page each, so sub-page allocations waste
* some space when allocated as scatter (e.g. 2KB scatter allocation wastes
* half of each page). Using linear ABD's for small allocations means that
* they will be put on slabs which contain many allocations.
*
* Linear ABDs for multi-page allocations are easier to use, and in some cases
* it allows to avoid buffer copying. But allocation and especially free
* of multi-page linear ABDs are expensive operations due to KVA mapping and
* unmapping, and with time they cause KVA fragmentations.
*/
size_t zfs_abd_scatter_min_size = PAGE_SIZE + 1;
#if defined(_KERNEL)
SYSCTL_DECL(_vfs_zfs);
SYSCTL_INT(_vfs_zfs, OID_AUTO, abd_scatter_enabled, CTLFLAG_RWTUN,
&zfs_abd_scatter_enabled, 0, "Enable scattered ARC data buffers");
SYSCTL_ULONG(_vfs_zfs, OID_AUTO, abd_scatter_min_size, CTLFLAG_RWTUN,
&zfs_abd_scatter_min_size, 0, "Minimum size of scatter allocations.");
#endif
kmem_cache_t *abd_chunk_cache;
static kstat_t *abd_ksp;
/*
* We use a scattered SPA_MAXBLOCKSIZE sized ABD whose chunks are
* just a single zero'd page-sized buffer. This allows us to conserve
* memory by only using a single zero buffer for the scatter chunks.
*/
abd_t *abd_zero_scatter = NULL;
static char *abd_zero_buf = NULL;
static uint_t
abd_chunkcnt_for_bytes(size_t size)
{
return ((size + PAGE_MASK) >> PAGE_SHIFT);
}
static inline uint_t
abd_scatter_chunkcnt(abd_t *abd)
{
ASSERT(!abd_is_linear(abd));
return (abd_chunkcnt_for_bytes(
ABD_SCATTER(abd).abd_offset + abd->abd_size));
}
boolean_t
abd_size_alloc_linear(size_t size)
{
- return (size < zfs_abd_scatter_min_size ? B_TRUE : B_FALSE);
+ return (!zfs_abd_scatter_enabled || size < zfs_abd_scatter_min_size);
}
void
abd_update_scatter_stats(abd_t *abd, abd_stats_op_t op)
{
uint_t n = abd_scatter_chunkcnt(abd);
ASSERT(op == ABDSTAT_INCR || op == ABDSTAT_DECR);
int waste = (n << PAGE_SHIFT) - abd->abd_size;
if (op == ABDSTAT_INCR) {
ABDSTAT_BUMP(abdstat_scatter_cnt);
ABDSTAT_INCR(abdstat_scatter_data_size, abd->abd_size);
ABDSTAT_INCR(abdstat_scatter_chunk_waste, waste);
arc_space_consume(waste, ARC_SPACE_ABD_CHUNK_WASTE);
} else {
ABDSTAT_BUMPDOWN(abdstat_scatter_cnt);
ABDSTAT_INCR(abdstat_scatter_data_size, -(int)abd->abd_size);
ABDSTAT_INCR(abdstat_scatter_chunk_waste, -waste);
arc_space_return(waste, ARC_SPACE_ABD_CHUNK_WASTE);
}
}
void
abd_update_linear_stats(abd_t *abd, abd_stats_op_t op)
{
ASSERT(op == ABDSTAT_INCR || op == ABDSTAT_DECR);
if (op == ABDSTAT_INCR) {
ABDSTAT_BUMP(abdstat_linear_cnt);
ABDSTAT_INCR(abdstat_linear_data_size, abd->abd_size);
} else {
ABDSTAT_BUMPDOWN(abdstat_linear_cnt);
ABDSTAT_INCR(abdstat_linear_data_size, -(int)abd->abd_size);
}
}
void
abd_verify_scatter(abd_t *abd)
{
uint_t i, n;
/*
* There is no scatter linear pages in FreeBSD so there is
* an error if the ABD has been marked as a linear page.
*/
ASSERT(!abd_is_linear_page(abd));
ASSERT3U(ABD_SCATTER(abd).abd_offset, <, PAGE_SIZE);
n = abd_scatter_chunkcnt(abd);
for (i = 0; i < n; i++) {
ASSERT3P(ABD_SCATTER(abd).abd_chunks[i], !=, NULL);
}
}
void
abd_alloc_chunks(abd_t *abd, size_t size)
{
uint_t i, n;
n = abd_chunkcnt_for_bytes(size);
for (i = 0; i < n; i++) {
ABD_SCATTER(abd).abd_chunks[i] =
kmem_cache_alloc(abd_chunk_cache, KM_PUSHPAGE);
}
}
void
abd_free_chunks(abd_t *abd)
{
uint_t i, n;
n = abd_scatter_chunkcnt(abd);
for (i = 0; i < n; i++) {
kmem_cache_free(abd_chunk_cache,
ABD_SCATTER(abd).abd_chunks[i]);
}
}
abd_t *
abd_alloc_struct_impl(size_t size)
{
uint_t chunkcnt = abd_chunkcnt_for_bytes(size);
/*
* In the event we are allocating a gang ABD, the size passed in
* will be 0. We must make sure to set abd_size to the size of an
* ABD struct as opposed to an ABD scatter with 0 chunks. The gang
* ABD struct allocation accounts for an additional 24 bytes over
* a scatter ABD with 0 chunks.
*/
size_t abd_size = MAX(sizeof (abd_t),
offsetof(abd_t, abd_u.abd_scatter.abd_chunks[chunkcnt]));
abd_t *abd = kmem_alloc(abd_size, KM_PUSHPAGE);
ASSERT3P(abd, !=, NULL);
ABDSTAT_INCR(abdstat_struct_size, abd_size);
return (abd);
}
void
abd_free_struct_impl(abd_t *abd)
{
uint_t chunkcnt = abd_is_linear(abd) || abd_is_gang(abd) ? 0 :
abd_scatter_chunkcnt(abd);
ssize_t size = MAX(sizeof (abd_t),
offsetof(abd_t, abd_u.abd_scatter.abd_chunks[chunkcnt]));
kmem_free(abd, size);
ABDSTAT_INCR(abdstat_struct_size, -size);
}
/*
* Allocate scatter ABD of size SPA_MAXBLOCKSIZE, where
* each chunk in the scatterlist will be set to abd_zero_buf.
*/
static void
abd_alloc_zero_scatter(void)
{
uint_t i, n;
n = abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE);
abd_zero_buf = kmem_cache_alloc(abd_chunk_cache, KM_PUSHPAGE);
abd_zero_scatter = abd_alloc_struct(SPA_MAXBLOCKSIZE);
abd_zero_scatter->abd_flags |= ABD_FLAG_OWNER | ABD_FLAG_ZEROS;
abd_zero_scatter->abd_size = SPA_MAXBLOCKSIZE;
ABD_SCATTER(abd_zero_scatter).abd_offset = 0;
for (i = 0; i < n; i++) {
ABD_SCATTER(abd_zero_scatter).abd_chunks[i] =
abd_zero_buf;
}
ABDSTAT_BUMP(abdstat_scatter_cnt);
ABDSTAT_INCR(abdstat_scatter_data_size, PAGE_SIZE);
}
static void
abd_free_zero_scatter(void)
{
ABDSTAT_BUMPDOWN(abdstat_scatter_cnt);
ABDSTAT_INCR(abdstat_scatter_data_size, -(int)PAGE_SIZE);
abd_free_struct(abd_zero_scatter);
abd_zero_scatter = NULL;
kmem_cache_free(abd_chunk_cache, abd_zero_buf);
}
static int
abd_kstats_update(kstat_t *ksp, int rw)
{
abd_stats_t *as = ksp->ks_data;
if (rw == KSTAT_WRITE)
return (EACCES);
as->abdstat_struct_size.value.ui64 =
wmsum_value(&abd_sums.abdstat_struct_size);
as->abdstat_scatter_cnt.value.ui64 =
wmsum_value(&abd_sums.abdstat_scatter_cnt);
as->abdstat_scatter_data_size.value.ui64 =
wmsum_value(&abd_sums.abdstat_scatter_data_size);
as->abdstat_scatter_chunk_waste.value.ui64 =
wmsum_value(&abd_sums.abdstat_scatter_chunk_waste);
as->abdstat_linear_cnt.value.ui64 =
wmsum_value(&abd_sums.abdstat_linear_cnt);
as->abdstat_linear_data_size.value.ui64 =
wmsum_value(&abd_sums.abdstat_linear_data_size);
return (0);
}
void
abd_init(void)
{
abd_chunk_cache = kmem_cache_create("abd_chunk", PAGE_SIZE, 0,
NULL, NULL, NULL, NULL, 0, KMC_NODEBUG);
wmsum_init(&abd_sums.abdstat_struct_size, 0);
wmsum_init(&abd_sums.abdstat_scatter_cnt, 0);
wmsum_init(&abd_sums.abdstat_scatter_data_size, 0);
wmsum_init(&abd_sums.abdstat_scatter_chunk_waste, 0);
wmsum_init(&abd_sums.abdstat_linear_cnt, 0);
wmsum_init(&abd_sums.abdstat_linear_data_size, 0);
abd_ksp = kstat_create("zfs", 0, "abdstats", "misc", KSTAT_TYPE_NAMED,
sizeof (abd_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
if (abd_ksp != NULL) {
abd_ksp->ks_data = &abd_stats;
abd_ksp->ks_update = abd_kstats_update;
kstat_install(abd_ksp);
}
abd_alloc_zero_scatter();
}
void
abd_fini(void)
{
abd_free_zero_scatter();
if (abd_ksp != NULL) {
kstat_delete(abd_ksp);
abd_ksp = NULL;
}
wmsum_fini(&abd_sums.abdstat_struct_size);
wmsum_fini(&abd_sums.abdstat_scatter_cnt);
wmsum_fini(&abd_sums.abdstat_scatter_data_size);
wmsum_fini(&abd_sums.abdstat_scatter_chunk_waste);
wmsum_fini(&abd_sums.abdstat_linear_cnt);
wmsum_fini(&abd_sums.abdstat_linear_data_size);
kmem_cache_destroy(abd_chunk_cache);
abd_chunk_cache = NULL;
}
void
abd_free_linear_page(abd_t *abd)
{
/*
* FreeBSD does not have scatter linear pages
* so there is an error.
*/
VERIFY(0);
}
/*
* If we're going to use this ABD for doing I/O using the block layer, the
* consumer of the ABD data doesn't care if it's scattered or not, and we don't
* plan to store this ABD in memory for a long period of time, we should
* allocate the ABD type that requires the least data copying to do the I/O.
*
* Currently this is linear ABDs, however if ldi_strategy() can ever issue I/Os
* using a scatter/gather list we should switch to that and replace this call
* with vanilla abd_alloc().
*/
abd_t *
abd_alloc_for_io(size_t size, boolean_t is_metadata)
{
return (abd_alloc_linear(size, is_metadata));
}
abd_t *
abd_get_offset_scatter(abd_t *abd, abd_t *sabd, size_t off,
size_t size)
{
abd_verify(sabd);
ASSERT3U(off, <=, sabd->abd_size);
size_t new_offset = ABD_SCATTER(sabd).abd_offset + off;
size_t chunkcnt = abd_chunkcnt_for_bytes(
(new_offset & PAGE_MASK) + size);
ASSERT3U(chunkcnt, <=, abd_scatter_chunkcnt(sabd));
/*
* If an abd struct is provided, it is only the minimum size. If we
* need additional chunks, we need to allocate a new struct.
*/
if (abd != NULL &&
offsetof(abd_t, abd_u.abd_scatter.abd_chunks[chunkcnt]) >
sizeof (abd_t)) {
abd = NULL;
}
if (abd == NULL)
abd = abd_alloc_struct(chunkcnt << PAGE_SHIFT);
/*
* Even if this buf is filesystem metadata, we only track that
* if we own the underlying data buffer, which is not true in
* this case. Therefore, we don't ever use ABD_FLAG_META here.
*/
ABD_SCATTER(abd).abd_offset = new_offset & PAGE_MASK;
/* Copy the scatterlist starting at the correct offset */
(void) memcpy(&ABD_SCATTER(abd).abd_chunks,
&ABD_SCATTER(sabd).abd_chunks[new_offset >> PAGE_SHIFT],
chunkcnt * sizeof (void *));
return (abd);
}
/*
* Initialize the abd_iter.
*/
void
abd_iter_init(struct abd_iter *aiter, abd_t *abd)
{
ASSERT(!abd_is_gang(abd));
abd_verify(abd);
aiter->iter_abd = abd;
aiter->iter_pos = 0;
aiter->iter_mapaddr = NULL;
aiter->iter_mapsize = 0;
}
/*
* This is just a helper function to see if we have exhausted the
* abd_iter and reached the end.
*/
boolean_t
abd_iter_at_end(struct abd_iter *aiter)
{
return (aiter->iter_pos == aiter->iter_abd->abd_size);
}
/*
* Advance the iterator by a certain amount. Cannot be called when a chunk is
* in use. This can be safely called when the aiter has already exhausted, in
* which case this does nothing.
*/
void
abd_iter_advance(struct abd_iter *aiter, size_t amount)
{
ASSERT3P(aiter->iter_mapaddr, ==, NULL);
ASSERT0(aiter->iter_mapsize);
/* There's nothing left to advance to, so do nothing */
if (abd_iter_at_end(aiter))
return;
aiter->iter_pos += amount;
}
/*
* Map the current chunk into aiter. This can be safely called when the aiter
* has already exhausted, in which case this does nothing.
*/
void
abd_iter_map(struct abd_iter *aiter)
{
void *paddr;
ASSERT3P(aiter->iter_mapaddr, ==, NULL);
ASSERT0(aiter->iter_mapsize);
/* There's nothing left to iterate over, so do nothing */
if (abd_iter_at_end(aiter))
return;
abd_t *abd = aiter->iter_abd;
size_t offset = aiter->iter_pos;
if (abd_is_linear(abd)) {
aiter->iter_mapsize = abd->abd_size - offset;
paddr = ABD_LINEAR_BUF(abd);
} else {
offset += ABD_SCATTER(abd).abd_offset;
paddr = ABD_SCATTER(abd).abd_chunks[offset >> PAGE_SHIFT];
offset &= PAGE_MASK;
aiter->iter_mapsize = MIN(PAGE_SIZE - offset,
abd->abd_size - aiter->iter_pos);
}
aiter->iter_mapaddr = (char *)paddr + offset;
}
/*
* Unmap the current chunk from aiter. This can be safely called when the aiter
* has already exhausted, in which case this does nothing.
*/
void
abd_iter_unmap(struct abd_iter *aiter)
{
if (!abd_iter_at_end(aiter)) {
ASSERT3P(aiter->iter_mapaddr, !=, NULL);
ASSERT3U(aiter->iter_mapsize, >, 0);
}
aiter->iter_mapaddr = NULL;
aiter->iter_mapsize = 0;
}
void
abd_cache_reap_now(void)
{
kmem_cache_reap_soon(abd_chunk_cache);
}
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/vdev_geom.c b/sys/contrib/openzfs/module/os/freebsd/zfs/vdev_geom.c
index 6ac37da1c58a..4ffa21495e74 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/vdev_geom.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/vdev_geom.c
@@ -1,1321 +1,1325 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
* All rights reserved.
*
* Portions Copyright (c) 2012 Martin Matuska <mm@FreeBSD.org>
*/
#include <sys/zfs_context.h>
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/bio.h>
#include <sys/buf.h>
#include <sys/file.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_os.h>
#include <sys/fs/zfs.h>
#include <sys/zio.h>
#include <vm/vm_page.h>
#include <geom/geom.h>
#include <geom/geom_disk.h>
#include <geom/geom_int.h>
#ifndef g_topology_locked
#define g_topology_locked() sx_xlocked(&topology_lock)
#endif
/*
* Virtual device vector for GEOM.
*/
static g_attrchanged_t vdev_geom_attrchanged;
struct g_class zfs_vdev_class = {
.name = "ZFS::VDEV",
.version = G_VERSION,
.attrchanged = vdev_geom_attrchanged,
};
struct consumer_vdev_elem {
SLIST_ENTRY(consumer_vdev_elem) elems;
vdev_t *vd;
};
SLIST_HEAD(consumer_priv_t, consumer_vdev_elem);
/* BEGIN CSTYLED */
_Static_assert(sizeof (((struct g_consumer *)NULL)->private)
== sizeof (struct consumer_priv_t*),
"consumer_priv_t* can't be stored in g_consumer.private");
DECLARE_GEOM_CLASS(zfs_vdev_class, zfs_vdev);
SYSCTL_DECL(_vfs_zfs_vdev);
/* Don't send BIO_FLUSH. */
static int vdev_geom_bio_flush_disable;
SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, bio_flush_disable, CTLFLAG_RWTUN,
&vdev_geom_bio_flush_disable, 0, "Disable BIO_FLUSH");
/* Don't send BIO_DELETE. */
static int vdev_geom_bio_delete_disable;
SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, bio_delete_disable, CTLFLAG_RWTUN,
&vdev_geom_bio_delete_disable, 0, "Disable BIO_DELETE");
/* END CSTYLED */
/* Declare local functions */
static void vdev_geom_detach(struct g_consumer *cp, boolean_t open_for_read);
/*
* Thread local storage used to indicate when a thread is probing geoms
* for their guids. If NULL, this thread is not tasting geoms. If non NULL,
* it is looking for a replacement for the vdev_t* that is its value.
*/
uint_t zfs_geom_probe_vdev_key;
static void
vdev_geom_set_physpath(vdev_t *vd, struct g_consumer *cp,
boolean_t do_null_update)
{
boolean_t needs_update = B_FALSE;
char *physpath;
int error, physpath_len;
physpath_len = MAXPATHLEN;
physpath = g_malloc(physpath_len, M_WAITOK|M_ZERO);
error = g_io_getattr("GEOM::physpath", cp, &physpath_len, physpath);
if (error == 0) {
char *old_physpath;
/* g_topology lock ensures that vdev has not been closed */
g_topology_assert();
old_physpath = vd->vdev_physpath;
vd->vdev_physpath = spa_strdup(physpath);
if (old_physpath != NULL) {
needs_update = (strcmp(old_physpath,
vd->vdev_physpath) != 0);
spa_strfree(old_physpath);
} else
needs_update = do_null_update;
}
g_free(physpath);
/*
* If the physical path changed, update the config.
* Only request an update for previously unset physpaths if
* requested by the caller.
*/
if (needs_update)
spa_async_request(vd->vdev_spa, SPA_ASYNC_CONFIG_UPDATE);
}
static void
vdev_geom_attrchanged(struct g_consumer *cp, const char *attr)
{
struct consumer_priv_t *priv;
struct consumer_vdev_elem *elem;
priv = (struct consumer_priv_t *)&cp->private;
if (SLIST_EMPTY(priv))
return;
SLIST_FOREACH(elem, priv, elems) {
vdev_t *vd = elem->vd;
if (strcmp(attr, "GEOM::physpath") == 0) {
vdev_geom_set_physpath(vd, cp, /* null_update */B_TRUE);
return;
}
}
}
static void
vdev_geom_resize(struct g_consumer *cp)
{
struct consumer_priv_t *priv;
struct consumer_vdev_elem *elem;
spa_t *spa;
vdev_t *vd;
priv = (struct consumer_priv_t *)&cp->private;
if (SLIST_EMPTY(priv))
return;
SLIST_FOREACH(elem, priv, elems) {
vd = elem->vd;
if (vd->vdev_state != VDEV_STATE_HEALTHY)
continue;
spa = vd->vdev_spa;
if (!spa->spa_autoexpand)
continue;
vdev_online(spa, vd->vdev_guid, ZFS_ONLINE_EXPAND, NULL);
}
}
static void
vdev_geom_orphan(struct g_consumer *cp)
{
struct consumer_priv_t *priv;
// cppcheck-suppress uninitvar
struct consumer_vdev_elem *elem;
g_topology_assert();
priv = (struct consumer_priv_t *)&cp->private;
if (SLIST_EMPTY(priv))
/* Vdev close in progress. Ignore the event. */
return;
/*
* Orphan callbacks occur from the GEOM event thread.
* Concurrent with this call, new I/O requests may be
* working their way through GEOM about to find out
* (only once executed by the g_down thread) that we've
* been orphaned from our disk provider. These I/Os
* must be retired before we can detach our consumer.
* This is most easily achieved by acquiring the
* SPA ZIO configuration lock as a writer, but doing
* so with the GEOM topology lock held would cause
* a lock order reversal. Instead, rely on the SPA's
* async removal support to invoke a close on this
* vdev once it is safe to do so.
*/
// cppcheck-suppress All
SLIST_FOREACH(elem, priv, elems) {
// cppcheck-suppress uninitvar
vdev_t *vd = elem->vd;
vd->vdev_remove_wanted = B_TRUE;
spa_async_request(vd->vdev_spa, SPA_ASYNC_REMOVE);
}
}
static struct g_consumer *
vdev_geom_attach(struct g_provider *pp, vdev_t *vd, boolean_t sanity)
{
struct g_geom *gp;
struct g_consumer *cp;
int error;
g_topology_assert();
ZFS_LOG(1, "Attaching to %s.", pp->name);
if (sanity) {
if (pp->sectorsize > VDEV_PAD_SIZE || !ISP2(pp->sectorsize)) {
ZFS_LOG(1, "Failing attach of %s. "
"Incompatible sectorsize %d\n",
pp->name, pp->sectorsize);
return (NULL);
} else if (pp->mediasize < SPA_MINDEVSIZE) {
ZFS_LOG(1, "Failing attach of %s. "
"Incompatible mediasize %ju\n",
pp->name, pp->mediasize);
return (NULL);
}
}
/* Do we have geom already? No? Create one. */
LIST_FOREACH(gp, &zfs_vdev_class.geom, geom) {
if (gp->flags & G_GEOM_WITHER)
continue;
if (strcmp(gp->name, "zfs::vdev") != 0)
continue;
break;
}
if (gp == NULL) {
gp = g_new_geomf(&zfs_vdev_class, "zfs::vdev");
gp->orphan = vdev_geom_orphan;
gp->attrchanged = vdev_geom_attrchanged;
gp->resize = vdev_geom_resize;
cp = g_new_consumer(gp);
error = g_attach(cp, pp);
if (error != 0) {
ZFS_LOG(1, "%s(%d): g_attach failed: %d\n", __func__,
__LINE__, error);
vdev_geom_detach(cp, B_FALSE);
return (NULL);
}
error = g_access(cp, 1, 0, 1);
if (error != 0) {
ZFS_LOG(1, "%s(%d): g_access failed: %d\n", __func__,
__LINE__, error);
vdev_geom_detach(cp, B_FALSE);
return (NULL);
}
ZFS_LOG(1, "Created geom and consumer for %s.", pp->name);
} else {
/* Check if we are already connected to this provider. */
LIST_FOREACH(cp, &gp->consumer, consumer) {
if (cp->provider == pp) {
ZFS_LOG(1, "Found consumer for %s.", pp->name);
break;
}
}
if (cp == NULL) {
cp = g_new_consumer(gp);
error = g_attach(cp, pp);
if (error != 0) {
ZFS_LOG(1, "%s(%d): g_attach failed: %d\n",
__func__, __LINE__, error);
vdev_geom_detach(cp, B_FALSE);
return (NULL);
}
error = g_access(cp, 1, 0, 1);
if (error != 0) {
ZFS_LOG(1, "%s(%d): g_access failed: %d\n",
__func__, __LINE__, error);
vdev_geom_detach(cp, B_FALSE);
return (NULL);
}
ZFS_LOG(1, "Created consumer for %s.", pp->name);
} else {
error = g_access(cp, 1, 0, 1);
if (error != 0) {
ZFS_LOG(1, "%s(%d): g_access failed: %d\n",
__func__, __LINE__, error);
return (NULL);
}
ZFS_LOG(1, "Used existing consumer for %s.", pp->name);
}
}
if (vd != NULL)
vd->vdev_tsd = cp;
cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
return (cp);
}
static void
vdev_geom_detach(struct g_consumer *cp, boolean_t open_for_read)
{
struct g_geom *gp;
g_topology_assert();
ZFS_LOG(1, "Detaching from %s.",
cp->provider && cp->provider->name ? cp->provider->name : "NULL");
gp = cp->geom;
if (open_for_read)
g_access(cp, -1, 0, -1);
/* Destroy consumer on last close. */
if (cp->acr == 0 && cp->ace == 0) {
if (cp->acw > 0)
g_access(cp, 0, -cp->acw, 0);
if (cp->provider != NULL) {
ZFS_LOG(1, "Destroying consumer for %s.",
cp->provider->name ? cp->provider->name : "NULL");
g_detach(cp);
}
g_destroy_consumer(cp);
}
/* Destroy geom if there are no consumers left. */
if (LIST_EMPTY(&gp->consumer)) {
ZFS_LOG(1, "Destroyed geom %s.", gp->name);
g_wither_geom(gp, ENXIO);
}
}
static void
vdev_geom_close_locked(vdev_t *vd)
{
struct g_consumer *cp;
struct consumer_priv_t *priv;
struct consumer_vdev_elem *elem, *elem_temp;
g_topology_assert();
cp = vd->vdev_tsd;
vd->vdev_delayed_close = B_FALSE;
if (cp == NULL)
return;
ZFS_LOG(1, "Closing access to %s.", cp->provider->name);
KASSERT(cp->private != NULL, ("%s: cp->private is NULL", __func__));
priv = (struct consumer_priv_t *)&cp->private;
vd->vdev_tsd = NULL;
SLIST_FOREACH_SAFE(elem, priv, elems, elem_temp) {
if (elem->vd == vd) {
SLIST_REMOVE(priv, elem, consumer_vdev_elem, elems);
g_free(elem);
}
}
vdev_geom_detach(cp, B_TRUE);
}
/*
* Issue one or more bios to the vdev in parallel
* cmds, datas, offsets, errors, and sizes are arrays of length ncmds. Each IO
* operation is described by parallel entries from each array. There may be
* more bios actually issued than entries in the array
*/
static void
vdev_geom_io(struct g_consumer *cp, int *cmds, void **datas, off_t *offsets,
off_t *sizes, int *errors, int ncmds)
{
struct bio **bios;
uint8_t *p;
off_t off, maxio, s, end;
int i, n_bios, j;
size_t bios_size;
#if __FreeBSD_version > 1300130
maxio = maxphys - (maxphys % cp->provider->sectorsize);
#else
maxio = MAXPHYS - (MAXPHYS % cp->provider->sectorsize);
#endif
n_bios = 0;
/* How many bios are required for all commands ? */
for (i = 0; i < ncmds; i++)
n_bios += (sizes[i] + maxio - 1) / maxio;
/* Allocate memory for the bios */
bios_size = n_bios * sizeof (struct bio *);
bios = kmem_zalloc(bios_size, KM_SLEEP);
/* Prepare and issue all of the bios */
for (i = j = 0; i < ncmds; i++) {
off = offsets[i];
p = datas[i];
s = sizes[i];
end = off + s;
ASSERT0(off % cp->provider->sectorsize);
ASSERT0(s % cp->provider->sectorsize);
for (; off < end; off += maxio, p += maxio, s -= maxio, j++) {
bios[j] = g_alloc_bio();
bios[j]->bio_cmd = cmds[i];
bios[j]->bio_done = NULL;
bios[j]->bio_offset = off;
bios[j]->bio_length = MIN(s, maxio);
bios[j]->bio_data = (caddr_t)p;
g_io_request(bios[j], cp);
}
}
ASSERT3S(j, ==, n_bios);
/* Wait for all of the bios to complete, and clean them up */
for (i = j = 0; i < ncmds; i++) {
off = offsets[i];
s = sizes[i];
end = off + s;
for (; off < end; off += maxio, s -= maxio, j++) {
errors[i] = biowait(bios[j], "vdev_geom_io") ||
errors[i];
g_destroy_bio(bios[j]);
}
}
kmem_free(bios, bios_size);
}
/*
* Read the vdev config from a device. Return the number of valid labels that
* were found. The vdev config will be returned in config if and only if at
* least one valid label was found.
*/
static int
vdev_geom_read_config(struct g_consumer *cp, nvlist_t **configp)
{
struct g_provider *pp;
nvlist_t *config;
vdev_phys_t *vdev_lists[VDEV_LABELS];
char *buf;
size_t buflen;
uint64_t psize, state, txg;
off_t offsets[VDEV_LABELS];
off_t size;
off_t sizes[VDEV_LABELS];
int cmds[VDEV_LABELS];
int errors[VDEV_LABELS];
int l, nlabels;
g_topology_assert_not();
pp = cp->provider;
ZFS_LOG(1, "Reading config from %s...", pp->name);
psize = pp->mediasize;
psize = P2ALIGN(psize, (uint64_t)sizeof (vdev_label_t));
size = sizeof (*vdev_lists[0]) + pp->sectorsize -
((sizeof (*vdev_lists[0]) - 1) % pp->sectorsize) - 1;
buflen = sizeof (vdev_lists[0]->vp_nvlist);
/* Create all of the IO requests */
for (l = 0; l < VDEV_LABELS; l++) {
cmds[l] = BIO_READ;
vdev_lists[l] = kmem_alloc(size, KM_SLEEP);
offsets[l] = vdev_label_offset(psize, l, 0) + VDEV_SKIP_SIZE;
sizes[l] = size;
errors[l] = 0;
ASSERT0(offsets[l] % pp->sectorsize);
}
/* Issue the IO requests */
vdev_geom_io(cp, cmds, (void**)vdev_lists, offsets, sizes, errors,
VDEV_LABELS);
/* Parse the labels */
config = *configp = NULL;
nlabels = 0;
for (l = 0; l < VDEV_LABELS; l++) {
if (errors[l] != 0)
continue;
buf = vdev_lists[l]->vp_nvlist;
if (nvlist_unpack(buf, buflen, &config, 0) != 0)
continue;
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
&state) != 0 || state > POOL_STATE_L2CACHE) {
nvlist_free(config);
continue;
}
if (state != POOL_STATE_SPARE &&
state != POOL_STATE_L2CACHE &&
(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
&txg) != 0 || txg == 0)) {
nvlist_free(config);
continue;
}
if (*configp != NULL)
nvlist_free(*configp);
*configp = config;
nlabels++;
}
/* Free the label storage */
for (l = 0; l < VDEV_LABELS; l++)
kmem_free(vdev_lists[l], size);
return (nlabels);
}
static void
resize_configs(nvlist_t ***configs, uint64_t *count, uint64_t id)
{
nvlist_t **new_configs;
uint64_t i;
if (id < *count)
return;
new_configs = kmem_zalloc((id + 1) * sizeof (nvlist_t *),
KM_SLEEP);
for (i = 0; i < *count; i++)
new_configs[i] = (*configs)[i];
if (*configs != NULL)
kmem_free(*configs, *count * sizeof (void *));
*configs = new_configs;
*count = id + 1;
}
static void
process_vdev_config(nvlist_t ***configs, uint64_t *count, nvlist_t *cfg,
const char *name, uint64_t *known_pool_guid)
{
nvlist_t *vdev_tree;
uint64_t pool_guid;
uint64_t vdev_guid;
uint64_t id, txg, known_txg;
char *pname;
if (nvlist_lookup_string(cfg, ZPOOL_CONFIG_POOL_NAME, &pname) != 0 ||
strcmp(pname, name) != 0)
goto ignore;
if (nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_POOL_GUID, &pool_guid) != 0)
goto ignore;
if (nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_TOP_GUID, &vdev_guid) != 0)
goto ignore;
if (nvlist_lookup_nvlist(cfg, ZPOOL_CONFIG_VDEV_TREE, &vdev_tree) != 0)
goto ignore;
if (nvlist_lookup_uint64(vdev_tree, ZPOOL_CONFIG_ID, &id) != 0)
goto ignore;
txg = fnvlist_lookup_uint64(cfg, ZPOOL_CONFIG_POOL_TXG);
if (*known_pool_guid != 0) {
if (pool_guid != *known_pool_guid)
goto ignore;
} else
*known_pool_guid = pool_guid;
resize_configs(configs, count, id);
if ((*configs)[id] != NULL) {
known_txg = fnvlist_lookup_uint64((*configs)[id],
ZPOOL_CONFIG_POOL_TXG);
if (txg <= known_txg)
goto ignore;
nvlist_free((*configs)[id]);
}
(*configs)[id] = cfg;
return;
ignore:
nvlist_free(cfg);
}
int
vdev_geom_read_pool_label(const char *name,
nvlist_t ***configs, uint64_t *count)
{
struct g_class *mp;
struct g_geom *gp;
struct g_provider *pp;
struct g_consumer *zcp;
nvlist_t *vdev_cfg;
uint64_t pool_guid;
int nlabels;
DROP_GIANT();
g_topology_lock();
*configs = NULL;
*count = 0;
pool_guid = 0;
LIST_FOREACH(mp, &g_classes, class) {
if (mp == &zfs_vdev_class)
continue;
LIST_FOREACH(gp, &mp->geom, geom) {
if (gp->flags & G_GEOM_WITHER)
continue;
LIST_FOREACH(pp, &gp->provider, provider) {
if (pp->flags & G_PF_WITHER)
continue;
zcp = vdev_geom_attach(pp, NULL, B_TRUE);
if (zcp == NULL)
continue;
g_topology_unlock();
nlabels = vdev_geom_read_config(zcp, &vdev_cfg);
g_topology_lock();
vdev_geom_detach(zcp, B_TRUE);
if (nlabels == 0)
continue;
ZFS_LOG(1, "successfully read vdev config");
process_vdev_config(configs, count,
vdev_cfg, name, &pool_guid);
}
}
}
g_topology_unlock();
PICKUP_GIANT();
return (*count > 0 ? 0 : ENOENT);
}
enum match {
NO_MATCH = 0, /* No matching labels found */
TOPGUID_MATCH = 1, /* Labels match top guid, not vdev guid */
ZERO_MATCH = 1, /* Should never be returned */
ONE_MATCH = 2, /* 1 label matching the vdev_guid */
TWO_MATCH = 3, /* 2 label matching the vdev_guid */
THREE_MATCH = 4, /* 3 label matching the vdev_guid */
FULL_MATCH = 5 /* all labels match the vdev_guid */
};
static enum match
vdev_attach_ok(vdev_t *vd, struct g_provider *pp)
{
nvlist_t *config;
uint64_t pool_guid, top_guid, vdev_guid;
struct g_consumer *cp;
int nlabels;
cp = vdev_geom_attach(pp, NULL, B_TRUE);
if (cp == NULL) {
ZFS_LOG(1, "Unable to attach tasting instance to %s.",
pp->name);
return (NO_MATCH);
}
g_topology_unlock();
nlabels = vdev_geom_read_config(cp, &config);
g_topology_lock();
vdev_geom_detach(cp, B_TRUE);
if (nlabels == 0) {
ZFS_LOG(1, "Unable to read config from %s.", pp->name);
return (NO_MATCH);
}
pool_guid = 0;
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid);
top_guid = 0;
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID, &top_guid);
vdev_guid = 0;
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid);
nvlist_free(config);
/*
* Check that the label's pool guid matches the desired guid.
* Inactive spares and L2ARCs do not have any pool guid in the label.
*/
if (pool_guid != 0 && pool_guid != spa_guid(vd->vdev_spa)) {
ZFS_LOG(1, "pool guid mismatch for provider %s: %ju != %ju.",
pp->name,
(uintmax_t)spa_guid(vd->vdev_spa), (uintmax_t)pool_guid);
return (NO_MATCH);
}
/*
* Check that the label's vdev guid matches the desired guid.
* The second condition handles possible race on vdev detach, when
* remaining vdev receives GUID of destroyed top level mirror vdev.
*/
if (vdev_guid == vd->vdev_guid) {
ZFS_LOG(1, "guids match for provider %s.", pp->name);
return (ZERO_MATCH + nlabels);
} else if (top_guid == vd->vdev_guid && vd == vd->vdev_top) {
ZFS_LOG(1, "top vdev guid match for provider %s.", pp->name);
return (TOPGUID_MATCH);
}
ZFS_LOG(1, "vdev guid mismatch for provider %s: %ju != %ju.",
pp->name, (uintmax_t)vd->vdev_guid, (uintmax_t)vdev_guid);
return (NO_MATCH);
}
static struct g_consumer *
vdev_geom_attach_by_guids(vdev_t *vd)
{
struct g_class *mp;
struct g_geom *gp;
struct g_provider *pp, *best_pp;
struct g_consumer *cp;
const char *vdpath;
enum match match, best_match;
g_topology_assert();
vdpath = vd->vdev_path + sizeof ("/dev/") - 1;
cp = NULL;
best_pp = NULL;
best_match = NO_MATCH;
LIST_FOREACH(mp, &g_classes, class) {
if (mp == &zfs_vdev_class)
continue;
LIST_FOREACH(gp, &mp->geom, geom) {
if (gp->flags & G_GEOM_WITHER)
continue;
LIST_FOREACH(pp, &gp->provider, provider) {
match = vdev_attach_ok(vd, pp);
if (match > best_match) {
best_match = match;
best_pp = pp;
} else if (match == best_match) {
if (strcmp(pp->name, vdpath) == 0) {
best_pp = pp;
}
}
if (match == FULL_MATCH)
goto out;
}
}
}
out:
if (best_pp) {
cp = vdev_geom_attach(best_pp, vd, B_TRUE);
if (cp == NULL) {
printf("ZFS WARNING: Unable to attach to %s.\n",
best_pp->name);
}
}
return (cp);
}
static struct g_consumer *
vdev_geom_open_by_guids(vdev_t *vd)
{
struct g_consumer *cp;
char *buf;
size_t len;
g_topology_assert();
ZFS_LOG(1, "Searching by guids [%ju:%ju].",
(uintmax_t)spa_guid(vd->vdev_spa), (uintmax_t)vd->vdev_guid);
cp = vdev_geom_attach_by_guids(vd);
if (cp != NULL) {
len = strlen(cp->provider->name) + strlen("/dev/") + 1;
buf = kmem_alloc(len, KM_SLEEP);
snprintf(buf, len, "/dev/%s", cp->provider->name);
spa_strfree(vd->vdev_path);
vd->vdev_path = buf;
ZFS_LOG(1, "Attach by guid [%ju:%ju] succeeded, provider %s.",
(uintmax_t)spa_guid(vd->vdev_spa),
(uintmax_t)vd->vdev_guid, cp->provider->name);
} else {
ZFS_LOG(1, "Search by guid [%ju:%ju] failed.",
(uintmax_t)spa_guid(vd->vdev_spa),
(uintmax_t)vd->vdev_guid);
}
return (cp);
}
static struct g_consumer *
vdev_geom_open_by_path(vdev_t *vd, int check_guid)
{
struct g_provider *pp;
struct g_consumer *cp;
g_topology_assert();
cp = NULL;
pp = g_provider_by_name(vd->vdev_path + sizeof ("/dev/") - 1);
if (pp != NULL) {
ZFS_LOG(1, "Found provider by name %s.", vd->vdev_path);
if (!check_guid || vdev_attach_ok(vd, pp) == FULL_MATCH)
cp = vdev_geom_attach(pp, vd, B_FALSE);
}
return (cp);
}
static int
vdev_geom_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
uint64_t *logical_ashift, uint64_t *physical_ashift)
{
struct g_provider *pp;
struct g_consumer *cp;
int error, has_trim;
uint16_t rate;
/*
* Set the TLS to indicate downstack that we
* should not access zvols
*/
VERIFY0(tsd_set(zfs_geom_probe_vdev_key, vd));
/*
* We must have a pathname, and it must be absolute.
*/
if (vd->vdev_path == NULL || strncmp(vd->vdev_path, "/dev/", 5) != 0) {
vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
return (EINVAL);
}
/*
* Reopen the device if it's not currently open. Otherwise,
* just update the physical size of the device.
*/
if ((cp = vd->vdev_tsd) != NULL) {
ASSERT(vd->vdev_reopening);
goto skip_open;
}
DROP_GIANT();
g_topology_lock();
error = 0;
if (vd->vdev_spa->spa_is_splitting ||
((vd->vdev_prevstate == VDEV_STATE_UNKNOWN &&
(vd->vdev_spa->spa_load_state == SPA_LOAD_NONE ||
vd->vdev_spa->spa_load_state == SPA_LOAD_CREATE)))) {
/*
* We are dealing with a vdev that hasn't been previously
* opened (since boot), and we are not loading an
* existing pool configuration. This looks like a
* vdev add operation to a new or existing pool.
* Assume the user really wants to do this, and find
* GEOM provider by its name, ignoring GUID mismatches.
*
* XXPOLICY: It would be safer to only allow a device
* that is unlabeled or labeled but missing
* GUID information to be opened in this fashion,
* unless we are doing a split, in which case we
* should allow any guid.
*/
cp = vdev_geom_open_by_path(vd, 0);
} else {
/*
* Try using the recorded path for this device, but only
* accept it if its label data contains the expected GUIDs.
*/
cp = vdev_geom_open_by_path(vd, 1);
if (cp == NULL) {
/*
* The device at vd->vdev_path doesn't have the
* expected GUIDs. The disks might have merely
* moved around so try all other GEOM providers
* to find one with the right GUIDs.
*/
cp = vdev_geom_open_by_guids(vd);
}
}
/* Clear the TLS now that tasting is done */
VERIFY0(tsd_set(zfs_geom_probe_vdev_key, NULL));
if (cp == NULL) {
ZFS_LOG(1, "Vdev %s not found.", vd->vdev_path);
error = ENOENT;
} else {
struct consumer_priv_t *priv;
struct consumer_vdev_elem *elem;
int spamode;
priv = (struct consumer_priv_t *)&cp->private;
if (cp->private == NULL)
SLIST_INIT(priv);
elem = g_malloc(sizeof (*elem), M_WAITOK|M_ZERO);
elem->vd = vd;
SLIST_INSERT_HEAD(priv, elem, elems);
spamode = spa_mode(vd->vdev_spa);
if (cp->provider->sectorsize > VDEV_PAD_SIZE ||
!ISP2(cp->provider->sectorsize)) {
ZFS_LOG(1, "Provider %s has unsupported sectorsize.",
cp->provider->name);
vdev_geom_close_locked(vd);
error = EINVAL;
cp = NULL;
} else if (cp->acw == 0 && (spamode & FWRITE) != 0) {
int i;
for (i = 0; i < 5; i++) {
error = g_access(cp, 0, 1, 0);
if (error == 0)
break;
g_topology_unlock();
tsleep(vd, 0, "vdev", hz / 2);
g_topology_lock();
}
if (error != 0) {
printf("ZFS WARNING: Unable to open %s for "
"writing (error=%d).\n",
cp->provider->name, error);
vdev_geom_close_locked(vd);
cp = NULL;
}
}
}
/* Fetch initial physical path information for this device. */
if (cp != NULL) {
vdev_geom_attrchanged(cp, "GEOM::physpath");
/* Set other GEOM characteristics */
vdev_geom_set_physpath(vd, cp, /* do_null_update */B_FALSE);
}
g_topology_unlock();
PICKUP_GIANT();
if (cp == NULL) {
vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
vdev_dbgmsg(vd, "vdev_geom_open: failed to open [error=%d]",
error);
return (error);
}
skip_open:
pp = cp->provider;
/*
* Determine the actual size of the device.
*/
*max_psize = *psize = pp->mediasize;
/*
* Determine the device's minimum transfer size and preferred
* transfer size.
*/
*logical_ashift = highbit(MAX(pp->sectorsize, SPA_MINBLOCKSIZE)) - 1;
*physical_ashift = 0;
if (pp->stripesize && pp->stripesize > (1 << *logical_ashift) &&
ISP2(pp->stripesize) && pp->stripesize <= (1 << ASHIFT_MAX) &&
pp->stripeoffset == 0)
*physical_ashift = highbit(pp->stripesize) - 1;
/*
* Clear the nowritecache settings, so that on a vdev_reopen()
* we will try again.
*/
vd->vdev_nowritecache = B_FALSE;
/* Inform the ZIO pipeline that we are non-rotational. */
error = g_getattr("GEOM::rotation_rate", cp, &rate);
if (error == 0 && rate == DISK_RR_NON_ROTATING)
vd->vdev_nonrot = B_TRUE;
else
vd->vdev_nonrot = B_FALSE;
/* Set when device reports it supports TRIM. */
error = g_getattr("GEOM::candelete", cp, &has_trim);
vd->vdev_has_trim = (error == 0 && has_trim);
/* Set when device reports it supports secure TRIM. */
/* unavailable on FreeBSD */
vd->vdev_has_securetrim = B_FALSE;
return (0);
}
static void
vdev_geom_close(vdev_t *vd)
{
struct g_consumer *cp;
boolean_t locked;
cp = vd->vdev_tsd;
DROP_GIANT();
locked = g_topology_locked();
if (!locked)
g_topology_lock();
if (!vd->vdev_reopening ||
(cp != NULL && ((cp->flags & G_CF_ORPHAN) != 0 ||
(cp->provider != NULL && cp->provider->error != 0))))
vdev_geom_close_locked(vd);
if (!locked)
g_topology_unlock();
PICKUP_GIANT();
}
static void
vdev_geom_io_intr(struct bio *bp)
{
vdev_t *vd;
zio_t *zio;
zio = bp->bio_caller1;
vd = zio->io_vd;
zio->io_error = bp->bio_error;
if (zio->io_error == 0 && bp->bio_resid != 0)
zio->io_error = SET_ERROR(EIO);
switch (zio->io_error) {
case ENOTSUP:
/*
* If we get ENOTSUP for BIO_FLUSH or BIO_DELETE we know
* that future attempts will never succeed. In this case
* we set a persistent flag so that we don't bother with
* requests in the future.
*/
switch (bp->bio_cmd) {
case BIO_FLUSH:
vd->vdev_nowritecache = B_TRUE;
break;
case BIO_DELETE:
break;
}
break;
case ENXIO:
if (!vd->vdev_remove_wanted) {
/*
* If provider's error is set we assume it is being
* removed.
*/
if (bp->bio_to->error != 0) {
vd->vdev_remove_wanted = B_TRUE;
spa_async_request(zio->io_spa,
SPA_ASYNC_REMOVE);
} else if (!vd->vdev_delayed_close) {
vd->vdev_delayed_close = B_TRUE;
}
}
break;
}
/*
* We have to split bio freeing into two parts, because the ABD code
* cannot be called in this context and vdev_op_io_done is not called
* for ZIO_TYPE_IOCTL zio-s.
*/
if (zio->io_type != ZIO_TYPE_READ && zio->io_type != ZIO_TYPE_WRITE) {
g_destroy_bio(bp);
zio->io_bio = NULL;
}
zio_delay_interrupt(zio);
}
struct vdev_geom_check_unmapped_cb_state {
int pages;
uint_t end;
};
/*
* Callback to check the ABD segment size/alignment and count the pages.
* GEOM requires data buffer to look virtually contiguous. It means only
* the first page of the buffer may not start and only the last may not
* end on a page boundary. All other physical pages must be full.
*/
static int
vdev_geom_check_unmapped_cb(void *buf, size_t len, void *priv)
{
struct vdev_geom_check_unmapped_cb_state *s = priv;
vm_offset_t off = (vm_offset_t)buf & PAGE_MASK;
if (s->pages != 0 && off != 0)
return (1);
if (s->end != 0)
return (1);
s->end = (off + len) & PAGE_MASK;
s->pages += (off + len + PAGE_MASK) >> PAGE_SHIFT;
return (0);
}
/*
* Check whether we can use unmapped I/O for this ZIO on this device to
* avoid data copying between scattered and/or gang ABD buffer and linear.
*/
static int
vdev_geom_check_unmapped(zio_t *zio, struct g_consumer *cp)
{
struct vdev_geom_check_unmapped_cb_state s;
+ /* If unmapped I/O is administratively disabled, respect that. */
+ if (!unmapped_buf_allowed)
+ return (0);
+
/* If the buffer is already linear, then nothing to do here. */
if (abd_is_linear(zio->io_abd))
return (0);
/*
* If unmapped I/O is not supported by the GEOM provider,
* then we can't do anything and have to copy the data.
*/
if ((cp->provider->flags & G_PF_ACCEPT_UNMAPPED) == 0)
return (0);
/* Check the buffer chunks sizes/alignments and count pages. */
s.pages = s.end = 0;
if (abd_iterate_func(zio->io_abd, 0, zio->io_size,
vdev_geom_check_unmapped_cb, &s))
return (0);
return (s.pages);
}
/*
* Callback to translate the ABD segment into array of physical pages.
*/
static int
vdev_geom_fill_unmap_cb(void *buf, size_t len, void *priv)
{
struct bio *bp = priv;
vm_offset_t addr = (vm_offset_t)buf;
vm_offset_t end = addr + len;
if (bp->bio_ma_n == 0)
bp->bio_ma_offset = addr & PAGE_MASK;
do {
bp->bio_ma[bp->bio_ma_n++] =
PHYS_TO_VM_PAGE(pmap_kextract(addr));
addr += PAGE_SIZE;
} while (addr < end);
return (0);
}
static void
vdev_geom_io_start(zio_t *zio)
{
vdev_t *vd;
struct g_consumer *cp;
struct bio *bp;
vd = zio->io_vd;
switch (zio->io_type) {
case ZIO_TYPE_IOCTL:
/* XXPOLICY */
if (!vdev_readable(vd)) {
zio->io_error = SET_ERROR(ENXIO);
zio_interrupt(zio);
return;
} else {
switch (zio->io_cmd) {
case DKIOCFLUSHWRITECACHE:
if (zfs_nocacheflush ||
vdev_geom_bio_flush_disable)
break;
if (vd->vdev_nowritecache) {
zio->io_error = SET_ERROR(ENOTSUP);
break;
}
goto sendreq;
default:
zio->io_error = SET_ERROR(ENOTSUP);
}
}
zio_execute(zio);
return;
case ZIO_TYPE_TRIM:
if (!vdev_geom_bio_delete_disable) {
goto sendreq;
}
zio_execute(zio);
return;
default:
;
/* PASSTHROUGH --- placate compiler */
}
sendreq:
ASSERT(zio->io_type == ZIO_TYPE_READ ||
zio->io_type == ZIO_TYPE_WRITE ||
zio->io_type == ZIO_TYPE_TRIM ||
zio->io_type == ZIO_TYPE_IOCTL);
cp = vd->vdev_tsd;
if (cp == NULL) {
zio->io_error = SET_ERROR(ENXIO);
zio_interrupt(zio);
return;
}
bp = g_alloc_bio();
bp->bio_caller1 = zio;
switch (zio->io_type) {
case ZIO_TYPE_READ:
case ZIO_TYPE_WRITE:
zio->io_target_timestamp = zio_handle_io_delay(zio);
bp->bio_offset = zio->io_offset;
bp->bio_length = zio->io_size;
if (zio->io_type == ZIO_TYPE_READ)
bp->bio_cmd = BIO_READ;
else
bp->bio_cmd = BIO_WRITE;
/*
* If possible, represent scattered and/or gang ABD buffer to
* GEOM as an array of physical pages. It allows to satisfy
* requirement of virtually contiguous buffer without copying.
*/
int pgs = vdev_geom_check_unmapped(zio, cp);
if (pgs > 0) {
bp->bio_ma = malloc(sizeof (struct vm_page *) * pgs,
M_DEVBUF, M_WAITOK);
bp->bio_ma_n = 0;
bp->bio_ma_offset = 0;
abd_iterate_func(zio->io_abd, 0, zio->io_size,
vdev_geom_fill_unmap_cb, bp);
bp->bio_data = unmapped_buf;
bp->bio_flags |= BIO_UNMAPPED;
} else {
if (zio->io_type == ZIO_TYPE_READ) {
bp->bio_data = abd_borrow_buf(zio->io_abd,
zio->io_size);
} else {
bp->bio_data = abd_borrow_buf_copy(zio->io_abd,
zio->io_size);
}
}
break;
case ZIO_TYPE_TRIM:
bp->bio_cmd = BIO_DELETE;
bp->bio_data = NULL;
bp->bio_offset = zio->io_offset;
bp->bio_length = zio->io_size;
break;
case ZIO_TYPE_IOCTL:
bp->bio_cmd = BIO_FLUSH;
bp->bio_data = NULL;
bp->bio_offset = cp->provider->mediasize;
bp->bio_length = 0;
break;
default:
panic("invalid zio->io_type: %d\n", zio->io_type);
}
bp->bio_done = vdev_geom_io_intr;
zio->io_bio = bp;
g_io_request(bp, cp);
}
static void
vdev_geom_io_done(zio_t *zio)
{
struct bio *bp = zio->io_bio;
if (zio->io_type != ZIO_TYPE_READ && zio->io_type != ZIO_TYPE_WRITE) {
ASSERT3P(bp, ==, NULL);
return;
}
if (bp == NULL) {
ASSERT3S(zio->io_error, ==, ENXIO);
return;
}
if (bp->bio_ma != NULL) {
free(bp->bio_ma, M_DEVBUF);
} else {
if (zio->io_type == ZIO_TYPE_READ) {
abd_return_buf_copy(zio->io_abd, bp->bio_data,
zio->io_size);
} else {
abd_return_buf(zio->io_abd, bp->bio_data,
zio->io_size);
}
}
g_destroy_bio(bp);
zio->io_bio = NULL;
}
static void
vdev_geom_hold(vdev_t *vd)
{
}
static void
vdev_geom_rele(vdev_t *vd)
{
}
vdev_ops_t vdev_disk_ops = {
.vdev_op_init = NULL,
.vdev_op_fini = NULL,
.vdev_op_open = vdev_geom_open,
.vdev_op_close = vdev_geom_close,
.vdev_op_asize = vdev_default_asize,
.vdev_op_min_asize = vdev_default_min_asize,
.vdev_op_min_alloc = NULL,
.vdev_op_io_start = vdev_geom_io_start,
.vdev_op_io_done = vdev_geom_io_done,
.vdev_op_state_change = NULL,
.vdev_op_need_resilver = NULL,
.vdev_op_hold = vdev_geom_hold,
.vdev_op_rele = vdev_geom_rele,
.vdev_op_remap = NULL,
.vdev_op_xlate = vdev_default_xlate,
.vdev_op_rebuild_asize = NULL,
.vdev_op_metaslab_init = NULL,
.vdev_op_config_generate = NULL,
.vdev_op_nparity = NULL,
.vdev_op_ndisks = NULL,
.vdev_op_type = VDEV_TYPE_DISK, /* name of this vdev type */
.vdev_op_leaf = B_TRUE /* leaf vdev */
};
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_acl.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_acl.c
index 9b410863019e..9d42755b963b 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_acl.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_acl.c
@@ -1,2672 +1,2672 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
* Copyright 2017 Nexenta Systems, Inc. All rights reserved.
*/
#include <sys/types.h>
#include <sys/param.h>
#include <sys/time.h>
#include <sys/systm.h>
#include <sys/sysmacros.h>
#include <sys/resource.h>
#include <sys/vfs.h>
#include <sys/vnode.h>
#include <sys/file.h>
#include <sys/stat.h>
#include <sys/kmem.h>
#include <sys/cmn_err.h>
#include <sys/errno.h>
#include <sys/unistd.h>
#include <sys/sdt.h>
#include <sys/fs/zfs.h>
#include <sys/policy.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_fuid.h>
#include <sys/zfs_acl.h>
#include <sys/zfs_dir.h>
#include <sys/zfs_quota.h>
#include <sys/zfs_vfsops.h>
#include <sys/dmu.h>
#include <sys/dnode.h>
#include <sys/zap.h>
#include <sys/sa.h>
#include <acl/acl_common.h>
#define ALLOW ACE_ACCESS_ALLOWED_ACE_TYPE
#define DENY ACE_ACCESS_DENIED_ACE_TYPE
#define MAX_ACE_TYPE ACE_SYSTEM_ALARM_CALLBACK_OBJECT_ACE_TYPE
#define MIN_ACE_TYPE ALLOW
#define OWNING_GROUP (ACE_GROUP|ACE_IDENTIFIER_GROUP)
#define EVERYONE_ALLOW_MASK (ACE_READ_ACL|ACE_READ_ATTRIBUTES | \
ACE_READ_NAMED_ATTRS|ACE_SYNCHRONIZE)
#define EVERYONE_DENY_MASK (ACE_WRITE_ACL|ACE_WRITE_OWNER | \
ACE_WRITE_ATTRIBUTES|ACE_WRITE_NAMED_ATTRS)
#define OWNER_ALLOW_MASK (ACE_WRITE_ACL | ACE_WRITE_OWNER | \
ACE_WRITE_ATTRIBUTES|ACE_WRITE_NAMED_ATTRS)
#define ZFS_CHECKED_MASKS (ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_READ_DATA| \
ACE_READ_NAMED_ATTRS|ACE_WRITE_DATA|ACE_WRITE_ATTRIBUTES| \
ACE_WRITE_NAMED_ATTRS|ACE_APPEND_DATA|ACE_EXECUTE|ACE_WRITE_OWNER| \
ACE_WRITE_ACL|ACE_DELETE|ACE_DELETE_CHILD|ACE_SYNCHRONIZE)
#define WRITE_MASK_DATA (ACE_WRITE_DATA|ACE_APPEND_DATA|ACE_WRITE_NAMED_ATTRS)
#define WRITE_MASK_ATTRS (ACE_WRITE_ACL|ACE_WRITE_OWNER|ACE_WRITE_ATTRIBUTES| \
ACE_DELETE|ACE_DELETE_CHILD)
#define WRITE_MASK (WRITE_MASK_DATA|WRITE_MASK_ATTRS)
#define OGE_CLEAR (ACE_READ_DATA|ACE_LIST_DIRECTORY|ACE_WRITE_DATA| \
ACE_ADD_FILE|ACE_APPEND_DATA|ACE_ADD_SUBDIRECTORY|ACE_EXECUTE)
#define OKAY_MASK_BITS (ACE_READ_DATA|ACE_LIST_DIRECTORY|ACE_WRITE_DATA| \
ACE_ADD_FILE|ACE_APPEND_DATA|ACE_ADD_SUBDIRECTORY|ACE_EXECUTE)
#define ALL_INHERIT (ACE_FILE_INHERIT_ACE|ACE_DIRECTORY_INHERIT_ACE | \
ACE_NO_PROPAGATE_INHERIT_ACE|ACE_INHERIT_ONLY_ACE|ACE_INHERITED_ACE)
#define RESTRICTED_CLEAR (ACE_WRITE_ACL|ACE_WRITE_OWNER)
#define V4_ACL_WIDE_FLAGS (ZFS_ACL_AUTO_INHERIT|ZFS_ACL_DEFAULTED|\
ZFS_ACL_PROTECTED)
#define ZFS_ACL_WIDE_FLAGS (V4_ACL_WIDE_FLAGS|ZFS_ACL_TRIVIAL|ZFS_INHERIT_ACE|\
ZFS_ACL_OBJ_ACE)
#define ALL_MODE_EXECS (S_IXUSR | S_IXGRP | S_IXOTH)
static uint16_t
zfs_ace_v0_get_type(void *acep)
{
return (((zfs_oldace_t *)acep)->z_type);
}
static uint16_t
zfs_ace_v0_get_flags(void *acep)
{
return (((zfs_oldace_t *)acep)->z_flags);
}
static uint32_t
zfs_ace_v0_get_mask(void *acep)
{
return (((zfs_oldace_t *)acep)->z_access_mask);
}
static uint64_t
zfs_ace_v0_get_who(void *acep)
{
return (((zfs_oldace_t *)acep)->z_fuid);
}
static void
zfs_ace_v0_set_type(void *acep, uint16_t type)
{
((zfs_oldace_t *)acep)->z_type = type;
}
static void
zfs_ace_v0_set_flags(void *acep, uint16_t flags)
{
((zfs_oldace_t *)acep)->z_flags = flags;
}
static void
zfs_ace_v0_set_mask(void *acep, uint32_t mask)
{
((zfs_oldace_t *)acep)->z_access_mask = mask;
}
static void
zfs_ace_v0_set_who(void *acep, uint64_t who)
{
((zfs_oldace_t *)acep)->z_fuid = who;
}
/*ARGSUSED*/
static size_t
zfs_ace_v0_size(void *acep)
{
return (sizeof (zfs_oldace_t));
}
static size_t
zfs_ace_v0_abstract_size(void)
{
return (sizeof (zfs_oldace_t));
}
static int
zfs_ace_v0_mask_off(void)
{
return (offsetof(zfs_oldace_t, z_access_mask));
}
/*ARGSUSED*/
static int
zfs_ace_v0_data(void *acep, void **datap)
{
*datap = NULL;
return (0);
}
static acl_ops_t zfs_acl_v0_ops = {
zfs_ace_v0_get_mask,
zfs_ace_v0_set_mask,
zfs_ace_v0_get_flags,
zfs_ace_v0_set_flags,
zfs_ace_v0_get_type,
zfs_ace_v0_set_type,
zfs_ace_v0_get_who,
zfs_ace_v0_set_who,
zfs_ace_v0_size,
zfs_ace_v0_abstract_size,
zfs_ace_v0_mask_off,
zfs_ace_v0_data
};
static uint16_t
zfs_ace_fuid_get_type(void *acep)
{
return (((zfs_ace_hdr_t *)acep)->z_type);
}
static uint16_t
zfs_ace_fuid_get_flags(void *acep)
{
return (((zfs_ace_hdr_t *)acep)->z_flags);
}
static uint32_t
zfs_ace_fuid_get_mask(void *acep)
{
return (((zfs_ace_hdr_t *)acep)->z_access_mask);
}
static uint64_t
zfs_ace_fuid_get_who(void *args)
{
uint16_t entry_type;
zfs_ace_t *acep = args;
entry_type = acep->z_hdr.z_flags & ACE_TYPE_FLAGS;
if (entry_type == ACE_OWNER || entry_type == OWNING_GROUP ||
entry_type == ACE_EVERYONE)
return (-1);
return (((zfs_ace_t *)acep)->z_fuid);
}
static void
zfs_ace_fuid_set_type(void *acep, uint16_t type)
{
((zfs_ace_hdr_t *)acep)->z_type = type;
}
static void
zfs_ace_fuid_set_flags(void *acep, uint16_t flags)
{
((zfs_ace_hdr_t *)acep)->z_flags = flags;
}
static void
zfs_ace_fuid_set_mask(void *acep, uint32_t mask)
{
((zfs_ace_hdr_t *)acep)->z_access_mask = mask;
}
static void
zfs_ace_fuid_set_who(void *arg, uint64_t who)
{
zfs_ace_t *acep = arg;
uint16_t entry_type = acep->z_hdr.z_flags & ACE_TYPE_FLAGS;
if (entry_type == ACE_OWNER || entry_type == OWNING_GROUP ||
entry_type == ACE_EVERYONE)
return;
acep->z_fuid = who;
}
static size_t
zfs_ace_fuid_size(void *acep)
{
zfs_ace_hdr_t *zacep = acep;
uint16_t entry_type;
switch (zacep->z_type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
return (sizeof (zfs_object_ace_t));
case ALLOW:
case DENY:
entry_type =
(((zfs_ace_hdr_t *)acep)->z_flags & ACE_TYPE_FLAGS);
if (entry_type == ACE_OWNER ||
entry_type == OWNING_GROUP ||
entry_type == ACE_EVERYONE)
return (sizeof (zfs_ace_hdr_t));
- /*FALLTHROUGH*/
+ /* FALLTHROUGH */
default:
return (sizeof (zfs_ace_t));
}
}
static size_t
zfs_ace_fuid_abstract_size(void)
{
return (sizeof (zfs_ace_hdr_t));
}
static int
zfs_ace_fuid_mask_off(void)
{
return (offsetof(zfs_ace_hdr_t, z_access_mask));
}
static int
zfs_ace_fuid_data(void *acep, void **datap)
{
zfs_ace_t *zacep = acep;
zfs_object_ace_t *zobjp;
switch (zacep->z_hdr.z_type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
zobjp = acep;
*datap = (caddr_t)zobjp + sizeof (zfs_ace_t);
return (sizeof (zfs_object_ace_t) - sizeof (zfs_ace_t));
default:
*datap = NULL;
return (0);
}
}
static acl_ops_t zfs_acl_fuid_ops = {
zfs_ace_fuid_get_mask,
zfs_ace_fuid_set_mask,
zfs_ace_fuid_get_flags,
zfs_ace_fuid_set_flags,
zfs_ace_fuid_get_type,
zfs_ace_fuid_set_type,
zfs_ace_fuid_get_who,
zfs_ace_fuid_set_who,
zfs_ace_fuid_size,
zfs_ace_fuid_abstract_size,
zfs_ace_fuid_mask_off,
zfs_ace_fuid_data
};
/*
* The following three functions are provided for compatibility with
* older ZPL version in order to determine if the file use to have
* an external ACL and what version of ACL previously existed on the
* file. Would really be nice to not need this, sigh.
*/
uint64_t
zfs_external_acl(znode_t *zp)
{
zfs_acl_phys_t acl_phys;
int error;
if (zp->z_is_sa)
return (0);
/*
* Need to deal with a potential
* race where zfs_sa_upgrade could cause
* z_isa_sa to change.
*
* If the lookup fails then the state of z_is_sa should have
* changed.
*/
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zp->z_zfsvfs),
&acl_phys, sizeof (acl_phys))) == 0)
return (acl_phys.z_acl_extern_obj);
else {
/*
* after upgrade the SA_ZPL_ZNODE_ACL should have been
* removed
*/
VERIFY(zp->z_is_sa);
VERIFY3S(error, ==, ENOENT);
return (0);
}
}
/*
* Determine size of ACL in bytes
*
* This is more complicated than it should be since we have to deal
* with old external ACLs.
*/
static int
zfs_acl_znode_info(znode_t *zp, int *aclsize, int *aclcount,
zfs_acl_phys_t *aclphys)
{
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
uint64_t acl_count;
int size;
int error;
ASSERT(MUTEX_HELD(&zp->z_acl_lock));
if (zp->z_is_sa) {
if ((error = sa_size(zp->z_sa_hdl, SA_ZPL_DACL_ACES(zfsvfs),
&size)) != 0)
return (error);
*aclsize = size;
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_DACL_COUNT(zfsvfs),
&acl_count, sizeof (acl_count))) != 0)
return (error);
*aclcount = acl_count;
} else {
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zfsvfs),
aclphys, sizeof (*aclphys))) != 0)
return (error);
if (aclphys->z_acl_version == ZFS_ACL_VERSION_INITIAL) {
*aclsize = ZFS_ACL_SIZE(aclphys->z_acl_size);
*aclcount = aclphys->z_acl_size;
} else {
*aclsize = aclphys->z_acl_size;
*aclcount = aclphys->z_acl_count;
}
}
return (0);
}
int
zfs_znode_acl_version(znode_t *zp)
{
zfs_acl_phys_t acl_phys;
if (zp->z_is_sa)
return (ZFS_ACL_VERSION_FUID);
else {
int error;
/*
* Need to deal with a potential
* race where zfs_sa_upgrade could cause
* z_isa_sa to change.
*
* If the lookup fails then the state of z_is_sa should have
* changed.
*/
if ((error = sa_lookup(zp->z_sa_hdl,
SA_ZPL_ZNODE_ACL(zp->z_zfsvfs),
&acl_phys, sizeof (acl_phys))) == 0)
return (acl_phys.z_acl_version);
else {
/*
* After upgrade SA_ZPL_ZNODE_ACL should have
* been removed.
*/
VERIFY(zp->z_is_sa);
VERIFY3S(error, ==, ENOENT);
return (ZFS_ACL_VERSION_FUID);
}
}
}
static int
zfs_acl_version(int version)
{
if (version < ZPL_VERSION_FUID)
return (ZFS_ACL_VERSION_INITIAL);
else
return (ZFS_ACL_VERSION_FUID);
}
static int
zfs_acl_version_zp(znode_t *zp)
{
return (zfs_acl_version(zp->z_zfsvfs->z_version));
}
zfs_acl_t *
zfs_acl_alloc(int vers)
{
zfs_acl_t *aclp;
aclp = kmem_zalloc(sizeof (zfs_acl_t), KM_SLEEP);
list_create(&aclp->z_acl, sizeof (zfs_acl_node_t),
offsetof(zfs_acl_node_t, z_next));
aclp->z_version = vers;
if (vers == ZFS_ACL_VERSION_FUID)
aclp->z_ops = &zfs_acl_fuid_ops;
else
aclp->z_ops = &zfs_acl_v0_ops;
return (aclp);
}
zfs_acl_node_t *
zfs_acl_node_alloc(size_t bytes)
{
zfs_acl_node_t *aclnode;
aclnode = kmem_zalloc(sizeof (zfs_acl_node_t), KM_SLEEP);
if (bytes) {
aclnode->z_acldata = kmem_alloc(bytes, KM_SLEEP);
aclnode->z_allocdata = aclnode->z_acldata;
aclnode->z_allocsize = bytes;
aclnode->z_size = bytes;
}
return (aclnode);
}
static void
zfs_acl_node_free(zfs_acl_node_t *aclnode)
{
if (aclnode->z_allocsize)
kmem_free(aclnode->z_allocdata, aclnode->z_allocsize);
kmem_free(aclnode, sizeof (zfs_acl_node_t));
}
static void
zfs_acl_release_nodes(zfs_acl_t *aclp)
{
zfs_acl_node_t *aclnode;
while ((aclnode = list_head(&aclp->z_acl))) {
list_remove(&aclp->z_acl, aclnode);
zfs_acl_node_free(aclnode);
}
aclp->z_acl_count = 0;
aclp->z_acl_bytes = 0;
}
void
zfs_acl_free(zfs_acl_t *aclp)
{
zfs_acl_release_nodes(aclp);
list_destroy(&aclp->z_acl);
kmem_free(aclp, sizeof (zfs_acl_t));
}
static boolean_t
zfs_acl_valid_ace_type(uint_t type, uint_t flags)
{
uint16_t entry_type;
switch (type) {
case ALLOW:
case DENY:
case ACE_SYSTEM_AUDIT_ACE_TYPE:
case ACE_SYSTEM_ALARM_ACE_TYPE:
entry_type = flags & ACE_TYPE_FLAGS;
return (entry_type == ACE_OWNER ||
entry_type == OWNING_GROUP ||
entry_type == ACE_EVERYONE || entry_type == 0 ||
entry_type == ACE_IDENTIFIER_GROUP);
default:
if (type >= MIN_ACE_TYPE && type <= MAX_ACE_TYPE)
return (B_TRUE);
}
return (B_FALSE);
}
static boolean_t
zfs_ace_valid(vtype_t obj_type, zfs_acl_t *aclp, uint16_t type, uint16_t iflags)
{
/*
* first check type of entry
*/
if (!zfs_acl_valid_ace_type(type, iflags))
return (B_FALSE);
switch (type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
if (aclp->z_version < ZFS_ACL_VERSION_FUID)
return (B_FALSE);
aclp->z_hints |= ZFS_ACL_OBJ_ACE;
}
/*
* next check inheritance level flags
*/
if (obj_type == VDIR &&
(iflags & (ACE_FILE_INHERIT_ACE|ACE_DIRECTORY_INHERIT_ACE)))
aclp->z_hints |= ZFS_INHERIT_ACE;
if (iflags & (ACE_INHERIT_ONLY_ACE|ACE_NO_PROPAGATE_INHERIT_ACE)) {
if ((iflags & (ACE_FILE_INHERIT_ACE|
ACE_DIRECTORY_INHERIT_ACE)) == 0) {
return (B_FALSE);
}
}
return (B_TRUE);
}
static void *
zfs_acl_next_ace(zfs_acl_t *aclp, void *start, uint64_t *who,
uint32_t *access_mask, uint16_t *iflags, uint16_t *type)
{
zfs_acl_node_t *aclnode;
ASSERT3P(aclp, !=, NULL);
if (start == NULL) {
aclnode = list_head(&aclp->z_acl);
if (aclnode == NULL)
return (NULL);
aclp->z_next_ace = aclnode->z_acldata;
aclp->z_curr_node = aclnode;
aclnode->z_ace_idx = 0;
}
aclnode = aclp->z_curr_node;
if (aclnode == NULL)
return (NULL);
if (aclnode->z_ace_idx >= aclnode->z_ace_count) {
aclnode = list_next(&aclp->z_acl, aclnode);
if (aclnode == NULL)
return (NULL);
else {
aclp->z_curr_node = aclnode;
aclnode->z_ace_idx = 0;
aclp->z_next_ace = aclnode->z_acldata;
}
}
if (aclnode->z_ace_idx < aclnode->z_ace_count) {
void *acep = aclp->z_next_ace;
size_t ace_size;
/*
* Make sure we don't overstep our bounds
*/
ace_size = aclp->z_ops->ace_size(acep);
if (((caddr_t)acep + ace_size) >
((caddr_t)aclnode->z_acldata + aclnode->z_size)) {
return (NULL);
}
*iflags = aclp->z_ops->ace_flags_get(acep);
*type = aclp->z_ops->ace_type_get(acep);
*access_mask = aclp->z_ops->ace_mask_get(acep);
*who = aclp->z_ops->ace_who_get(acep);
aclp->z_next_ace = (caddr_t)aclp->z_next_ace + ace_size;
aclnode->z_ace_idx++;
return ((void *)acep);
}
return (NULL);
}
/*ARGSUSED*/
static uint64_t
zfs_ace_walk(void *datap, uint64_t cookie, int aclcnt,
uint16_t *flags, uint16_t *type, uint32_t *mask)
{
zfs_acl_t *aclp = datap;
zfs_ace_hdr_t *acep = (zfs_ace_hdr_t *)(uintptr_t)cookie;
uint64_t who;
acep = zfs_acl_next_ace(aclp, acep, &who, mask,
flags, type);
return ((uint64_t)(uintptr_t)acep);
}
/*
* Copy ACE to internal ZFS format.
* While processing the ACL each ACE will be validated for correctness.
* ACE FUIDs will be created later.
*/
static int
zfs_copy_ace_2_fuid(zfsvfs_t *zfsvfs, vtype_t obj_type, zfs_acl_t *aclp,
void *datap, zfs_ace_t *z_acl, uint64_t aclcnt, size_t *size,
zfs_fuid_info_t **fuidp, cred_t *cr)
{
int i;
uint16_t entry_type;
zfs_ace_t *aceptr = z_acl;
ace_t *acep = datap;
zfs_object_ace_t *zobjacep;
ace_object_t *aceobjp;
for (i = 0; i != aclcnt; i++) {
aceptr->z_hdr.z_access_mask = acep->a_access_mask;
aceptr->z_hdr.z_flags = acep->a_flags;
aceptr->z_hdr.z_type = acep->a_type;
entry_type = aceptr->z_hdr.z_flags & ACE_TYPE_FLAGS;
if (entry_type != ACE_OWNER && entry_type != OWNING_GROUP &&
entry_type != ACE_EVERYONE) {
aceptr->z_fuid = zfs_fuid_create(zfsvfs, acep->a_who,
cr, (entry_type == 0) ?
ZFS_ACE_USER : ZFS_ACE_GROUP, fuidp);
}
/*
* Make sure ACE is valid
*/
if (zfs_ace_valid(obj_type, aclp, aceptr->z_hdr.z_type,
aceptr->z_hdr.z_flags) != B_TRUE)
return (SET_ERROR(EINVAL));
switch (acep->a_type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
zobjacep = (zfs_object_ace_t *)aceptr;
aceobjp = (ace_object_t *)acep;
bcopy(aceobjp->a_obj_type, zobjacep->z_object_type,
sizeof (aceobjp->a_obj_type));
bcopy(aceobjp->a_inherit_obj_type,
zobjacep->z_inherit_type,
sizeof (aceobjp->a_inherit_obj_type));
acep = (ace_t *)((caddr_t)acep + sizeof (ace_object_t));
break;
default:
acep = (ace_t *)((caddr_t)acep + sizeof (ace_t));
}
aceptr = (zfs_ace_t *)((caddr_t)aceptr +
aclp->z_ops->ace_size(aceptr));
}
*size = (caddr_t)aceptr - (caddr_t)z_acl;
return (0);
}
/*
* Copy ZFS ACEs to fixed size ace_t layout
*/
static void
zfs_copy_fuid_2_ace(zfsvfs_t *zfsvfs, zfs_acl_t *aclp, cred_t *cr,
void *datap, int filter)
{
uint64_t who;
uint32_t access_mask;
uint16_t iflags, type;
zfs_ace_hdr_t *zacep = NULL;
ace_t *acep = datap;
ace_object_t *objacep;
zfs_object_ace_t *zobjacep;
size_t ace_size;
uint16_t entry_type;
while ((zacep = zfs_acl_next_ace(aclp, zacep,
&who, &access_mask, &iflags, &type))) {
switch (type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
if (filter) {
continue;
}
zobjacep = (zfs_object_ace_t *)zacep;
objacep = (ace_object_t *)acep;
bcopy(zobjacep->z_object_type,
objacep->a_obj_type,
sizeof (zobjacep->z_object_type));
bcopy(zobjacep->z_inherit_type,
objacep->a_inherit_obj_type,
sizeof (zobjacep->z_inherit_type));
ace_size = sizeof (ace_object_t);
break;
default:
ace_size = sizeof (ace_t);
break;
}
entry_type = (iflags & ACE_TYPE_FLAGS);
if ((entry_type != ACE_OWNER &&
entry_type != OWNING_GROUP &&
entry_type != ACE_EVERYONE)) {
acep->a_who = zfs_fuid_map_id(zfsvfs, who,
cr, (entry_type & ACE_IDENTIFIER_GROUP) ?
ZFS_ACE_GROUP : ZFS_ACE_USER);
} else {
acep->a_who = (uid_t)(int64_t)who;
}
acep->a_access_mask = access_mask;
acep->a_flags = iflags;
acep->a_type = type;
acep = (ace_t *)((caddr_t)acep + ace_size);
}
}
static int
zfs_copy_ace_2_oldace(vtype_t obj_type, zfs_acl_t *aclp, ace_t *acep,
zfs_oldace_t *z_acl, int aclcnt, size_t *size)
{
int i;
zfs_oldace_t *aceptr = z_acl;
for (i = 0; i != aclcnt; i++, aceptr++) {
aceptr->z_access_mask = acep[i].a_access_mask;
aceptr->z_type = acep[i].a_type;
aceptr->z_flags = acep[i].a_flags;
aceptr->z_fuid = acep[i].a_who;
/*
* Make sure ACE is valid
*/
if (zfs_ace_valid(obj_type, aclp, aceptr->z_type,
aceptr->z_flags) != B_TRUE)
return (SET_ERROR(EINVAL));
}
*size = (caddr_t)aceptr - (caddr_t)z_acl;
return (0);
}
/*
* convert old ACL format to new
*/
void
zfs_acl_xform(znode_t *zp, zfs_acl_t *aclp, cred_t *cr)
{
zfs_oldace_t *oldaclp;
int i;
uint16_t type, iflags;
uint32_t access_mask;
uint64_t who;
void *cookie = NULL;
zfs_acl_node_t *newaclnode;
ASSERT3U(aclp->z_version, ==, ZFS_ACL_VERSION_INITIAL);
/*
* First create the ACE in a contiguous piece of memory
* for zfs_copy_ace_2_fuid().
*
* We only convert an ACL once, so this won't happen
* everytime.
*/
oldaclp = kmem_alloc(sizeof (zfs_oldace_t) * aclp->z_acl_count,
KM_SLEEP);
i = 0;
while ((cookie = zfs_acl_next_ace(aclp, cookie, &who,
&access_mask, &iflags, &type))) {
oldaclp[i].z_flags = iflags;
oldaclp[i].z_type = type;
oldaclp[i].z_fuid = who;
oldaclp[i++].z_access_mask = access_mask;
}
newaclnode = zfs_acl_node_alloc(aclp->z_acl_count *
sizeof (zfs_object_ace_t));
aclp->z_ops = &zfs_acl_fuid_ops;
VERIFY0(zfs_copy_ace_2_fuid(zp->z_zfsvfs, ZTOV(zp)->v_type, aclp,
oldaclp, newaclnode->z_acldata, aclp->z_acl_count,
&newaclnode->z_size, NULL, cr));
newaclnode->z_ace_count = aclp->z_acl_count;
aclp->z_version = ZFS_ACL_VERSION;
kmem_free(oldaclp, aclp->z_acl_count * sizeof (zfs_oldace_t));
/*
* Release all previous ACL nodes
*/
zfs_acl_release_nodes(aclp);
list_insert_head(&aclp->z_acl, newaclnode);
aclp->z_acl_bytes = newaclnode->z_size;
aclp->z_acl_count = newaclnode->z_ace_count;
}
/*
* Convert unix access mask to v4 access mask
*/
static uint32_t
zfs_unix_to_v4(uint32_t access_mask)
{
uint32_t new_mask = 0;
if (access_mask & S_IXOTH)
new_mask |= ACE_EXECUTE;
if (access_mask & S_IWOTH)
new_mask |= ACE_WRITE_DATA;
if (access_mask & S_IROTH)
new_mask |= ACE_READ_DATA;
return (new_mask);
}
static void
zfs_set_ace(zfs_acl_t *aclp, void *acep, uint32_t access_mask,
uint16_t access_type, uint64_t fuid, uint16_t entry_type)
{
uint16_t type = entry_type & ACE_TYPE_FLAGS;
aclp->z_ops->ace_mask_set(acep, access_mask);
aclp->z_ops->ace_type_set(acep, access_type);
aclp->z_ops->ace_flags_set(acep, entry_type);
if ((type != ACE_OWNER && type != OWNING_GROUP &&
type != ACE_EVERYONE))
aclp->z_ops->ace_who_set(acep, fuid);
}
/*
* Determine mode of file based on ACL.
*/
uint64_t
zfs_mode_compute(uint64_t fmode, zfs_acl_t *aclp,
uint64_t *pflags, uint64_t fuid, uint64_t fgid)
{
int entry_type;
mode_t mode;
mode_t seen = 0;
zfs_ace_hdr_t *acep = NULL;
uint64_t who;
uint16_t iflags, type;
uint32_t access_mask;
boolean_t an_exec_denied = B_FALSE;
mode = (fmode & (S_IFMT | S_ISUID | S_ISGID | S_ISVTX));
while ((acep = zfs_acl_next_ace(aclp, acep, &who,
&access_mask, &iflags, &type))) {
if (!zfs_acl_valid_ace_type(type, iflags))
continue;
entry_type = (iflags & ACE_TYPE_FLAGS);
/*
* Skip over any inherit_only ACEs
*/
if (iflags & ACE_INHERIT_ONLY_ACE)
continue;
if (entry_type == ACE_OWNER || (entry_type == 0 &&
who == fuid)) {
if ((access_mask & ACE_READ_DATA) &&
(!(seen & S_IRUSR))) {
seen |= S_IRUSR;
if (type == ALLOW) {
mode |= S_IRUSR;
}
}
if ((access_mask & ACE_WRITE_DATA) &&
(!(seen & S_IWUSR))) {
seen |= S_IWUSR;
if (type == ALLOW) {
mode |= S_IWUSR;
}
}
if ((access_mask & ACE_EXECUTE) &&
(!(seen & S_IXUSR))) {
seen |= S_IXUSR;
if (type == ALLOW) {
mode |= S_IXUSR;
}
}
} else if (entry_type == OWNING_GROUP ||
(entry_type == ACE_IDENTIFIER_GROUP && who == fgid)) {
if ((access_mask & ACE_READ_DATA) &&
(!(seen & S_IRGRP))) {
seen |= S_IRGRP;
if (type == ALLOW) {
mode |= S_IRGRP;
}
}
if ((access_mask & ACE_WRITE_DATA) &&
(!(seen & S_IWGRP))) {
seen |= S_IWGRP;
if (type == ALLOW) {
mode |= S_IWGRP;
}
}
if ((access_mask & ACE_EXECUTE) &&
(!(seen & S_IXGRP))) {
seen |= S_IXGRP;
if (type == ALLOW) {
mode |= S_IXGRP;
}
}
} else if (entry_type == ACE_EVERYONE) {
if ((access_mask & ACE_READ_DATA)) {
if (!(seen & S_IRUSR)) {
seen |= S_IRUSR;
if (type == ALLOW) {
mode |= S_IRUSR;
}
}
if (!(seen & S_IRGRP)) {
seen |= S_IRGRP;
if (type == ALLOW) {
mode |= S_IRGRP;
}
}
if (!(seen & S_IROTH)) {
seen |= S_IROTH;
if (type == ALLOW) {
mode |= S_IROTH;
}
}
}
if ((access_mask & ACE_WRITE_DATA)) {
if (!(seen & S_IWUSR)) {
seen |= S_IWUSR;
if (type == ALLOW) {
mode |= S_IWUSR;
}
}
if (!(seen & S_IWGRP)) {
seen |= S_IWGRP;
if (type == ALLOW) {
mode |= S_IWGRP;
}
}
if (!(seen & S_IWOTH)) {
seen |= S_IWOTH;
if (type == ALLOW) {
mode |= S_IWOTH;
}
}
}
if ((access_mask & ACE_EXECUTE)) {
if (!(seen & S_IXUSR)) {
seen |= S_IXUSR;
if (type == ALLOW) {
mode |= S_IXUSR;
}
}
if (!(seen & S_IXGRP)) {
seen |= S_IXGRP;
if (type == ALLOW) {
mode |= S_IXGRP;
}
}
if (!(seen & S_IXOTH)) {
seen |= S_IXOTH;
if (type == ALLOW) {
mode |= S_IXOTH;
}
}
}
} else {
/*
* Only care if this IDENTIFIER_GROUP or
* USER ACE denies execute access to someone,
* mode is not affected
*/
if ((access_mask & ACE_EXECUTE) && type == DENY)
an_exec_denied = B_TRUE;
}
}
/*
* Failure to allow is effectively a deny, so execute permission
* is denied if it was never mentioned or if we explicitly
* weren't allowed it.
*/
if (!an_exec_denied &&
((seen & ALL_MODE_EXECS) != ALL_MODE_EXECS ||
(mode & ALL_MODE_EXECS) != ALL_MODE_EXECS))
an_exec_denied = B_TRUE;
if (an_exec_denied)
*pflags &= ~ZFS_NO_EXECS_DENIED;
else
*pflags |= ZFS_NO_EXECS_DENIED;
return (mode);
}
/*
* Read an external acl object. If the intent is to modify, always
* create a new acl and leave any cached acl in place.
*/
int
zfs_acl_node_read(znode_t *zp, boolean_t have_lock, zfs_acl_t **aclpp,
boolean_t will_modify)
{
zfs_acl_t *aclp;
int aclsize;
int acl_count;
zfs_acl_node_t *aclnode;
zfs_acl_phys_t znode_acl;
int version;
int error;
ASSERT(MUTEX_HELD(&zp->z_acl_lock));
if (zp->z_zfsvfs->z_replay == B_FALSE)
ASSERT_VOP_LOCKED(ZTOV(zp), __func__);
if (zp->z_acl_cached && !will_modify) {
*aclpp = zp->z_acl_cached;
return (0);
}
version = zfs_znode_acl_version(zp);
if ((error = zfs_acl_znode_info(zp, &aclsize,
&acl_count, &znode_acl)) != 0) {
goto done;
}
aclp = zfs_acl_alloc(version);
aclp->z_acl_count = acl_count;
aclp->z_acl_bytes = aclsize;
aclnode = zfs_acl_node_alloc(aclsize);
aclnode->z_ace_count = aclp->z_acl_count;
aclnode->z_size = aclsize;
if (!zp->z_is_sa) {
if (znode_acl.z_acl_extern_obj) {
error = dmu_read(zp->z_zfsvfs->z_os,
znode_acl.z_acl_extern_obj, 0, aclnode->z_size,
aclnode->z_acldata, DMU_READ_PREFETCH);
} else {
bcopy(znode_acl.z_ace_data, aclnode->z_acldata,
aclnode->z_size);
}
} else {
error = sa_lookup(zp->z_sa_hdl, SA_ZPL_DACL_ACES(zp->z_zfsvfs),
aclnode->z_acldata, aclnode->z_size);
}
if (error != 0) {
zfs_acl_free(aclp);
zfs_acl_node_free(aclnode);
/* convert checksum errors into IO errors */
if (error == ECKSUM)
error = SET_ERROR(EIO);
goto done;
}
list_insert_head(&aclp->z_acl, aclnode);
*aclpp = aclp;
if (!will_modify)
zp->z_acl_cached = aclp;
done:
return (error);
}
/*ARGSUSED*/
void
zfs_acl_data_locator(void **dataptr, uint32_t *length, uint32_t buflen,
boolean_t start, void *userdata)
{
zfs_acl_locator_cb_t *cb = (zfs_acl_locator_cb_t *)userdata;
if (start) {
cb->cb_acl_node = list_head(&cb->cb_aclp->z_acl);
} else {
cb->cb_acl_node = list_next(&cb->cb_aclp->z_acl,
cb->cb_acl_node);
}
*dataptr = cb->cb_acl_node->z_acldata;
*length = cb->cb_acl_node->z_size;
}
int
zfs_acl_chown_setattr(znode_t *zp)
{
int error;
zfs_acl_t *aclp;
if (zp->z_zfsvfs->z_replay == B_FALSE) {
ASSERT_VOP_ELOCKED(ZTOV(zp), __func__);
ASSERT_VOP_IN_SEQC(ZTOV(zp));
}
ASSERT(MUTEX_HELD(&zp->z_acl_lock));
if ((error = zfs_acl_node_read(zp, B_TRUE, &aclp, B_FALSE)) == 0)
zp->z_mode = zfs_mode_compute(zp->z_mode, aclp,
&zp->z_pflags, zp->z_uid, zp->z_gid);
return (error);
}
/*
* common code for setting ACLs.
*
* This function is called from zfs_mode_update, zfs_perm_init, and zfs_setacl.
* zfs_setacl passes a non-NULL inherit pointer (ihp) to indicate that it's
* already checked the acl and knows whether to inherit.
*/
int
zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx)
{
int error;
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
dmu_object_type_t otype;
zfs_acl_locator_cb_t locate = { 0 };
uint64_t mode;
sa_bulk_attr_t bulk[5];
uint64_t ctime[2];
int count = 0;
zfs_acl_phys_t acl_phys;
if (zp->z_zfsvfs->z_replay == B_FALSE) {
ASSERT_VOP_IN_SEQC(ZTOV(zp));
}
mode = zp->z_mode;
mode = zfs_mode_compute(mode, aclp, &zp->z_pflags,
zp->z_uid, zp->z_gid);
zp->z_mode = mode;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
&mode, sizeof (mode));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, sizeof (zp->z_pflags));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
&ctime, sizeof (ctime));
if (zp->z_acl_cached) {
zfs_acl_free(zp->z_acl_cached);
zp->z_acl_cached = NULL;
}
/*
* Upgrade needed?
*/
if (!zfsvfs->z_use_fuids) {
otype = DMU_OT_OLDACL;
} else {
if ((aclp->z_version == ZFS_ACL_VERSION_INITIAL) &&
(zfsvfs->z_version >= ZPL_VERSION_FUID))
zfs_acl_xform(zp, aclp, cr);
ASSERT3U(aclp->z_version, >=, ZFS_ACL_VERSION_FUID);
otype = DMU_OT_ACL;
}
/*
* Arrgh, we have to handle old on disk format
* as well as newer (preferred) SA format.
*/
if (zp->z_is_sa) { /* the easy case, just update the ACL attribute */
locate.cb_aclp = aclp;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_DACL_ACES(zfsvfs),
zfs_acl_data_locator, &locate, aclp->z_acl_bytes);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_DACL_COUNT(zfsvfs),
NULL, &aclp->z_acl_count, sizeof (uint64_t));
} else { /* Painful legacy way */
zfs_acl_node_t *aclnode;
uint64_t off = 0;
uint64_t aoid;
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zfsvfs),
&acl_phys, sizeof (acl_phys))) != 0)
return (error);
aoid = acl_phys.z_acl_extern_obj;
if (aclp->z_acl_bytes > ZFS_ACE_SPACE) {
/*
* If ACL was previously external and we are now
* converting to new ACL format then release old
* ACL object and create a new one.
*/
if (aoid &&
aclp->z_version != acl_phys.z_acl_version) {
error = dmu_object_free(zfsvfs->z_os, aoid, tx);
if (error)
return (error);
aoid = 0;
}
if (aoid == 0) {
aoid = dmu_object_alloc(zfsvfs->z_os,
otype, aclp->z_acl_bytes,
otype == DMU_OT_ACL ?
DMU_OT_SYSACL : DMU_OT_NONE,
otype == DMU_OT_ACL ?
DN_OLD_MAX_BONUSLEN : 0, tx);
} else {
(void) dmu_object_set_blocksize(zfsvfs->z_os,
aoid, aclp->z_acl_bytes, 0, tx);
}
acl_phys.z_acl_extern_obj = aoid;
for (aclnode = list_head(&aclp->z_acl); aclnode;
aclnode = list_next(&aclp->z_acl, aclnode)) {
if (aclnode->z_ace_count == 0)
continue;
dmu_write(zfsvfs->z_os, aoid, off,
aclnode->z_size, aclnode->z_acldata, tx);
off += aclnode->z_size;
}
} else {
void *start = acl_phys.z_ace_data;
/*
* Migrating back embedded?
*/
if (acl_phys.z_acl_extern_obj) {
error = dmu_object_free(zfsvfs->z_os,
acl_phys.z_acl_extern_obj, tx);
if (error)
return (error);
acl_phys.z_acl_extern_obj = 0;
}
for (aclnode = list_head(&aclp->z_acl); aclnode;
aclnode = list_next(&aclp->z_acl, aclnode)) {
if (aclnode->z_ace_count == 0)
continue;
bcopy(aclnode->z_acldata, start,
aclnode->z_size);
start = (caddr_t)start + aclnode->z_size;
}
}
/*
* If Old version then swap count/bytes to match old
* layout of znode_acl_phys_t.
*/
if (aclp->z_version == ZFS_ACL_VERSION_INITIAL) {
acl_phys.z_acl_size = aclp->z_acl_count;
acl_phys.z_acl_count = aclp->z_acl_bytes;
} else {
acl_phys.z_acl_size = aclp->z_acl_bytes;
acl_phys.z_acl_count = aclp->z_acl_count;
}
acl_phys.z_acl_version = aclp->z_version;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ZNODE_ACL(zfsvfs), NULL,
&acl_phys, sizeof (acl_phys));
}
/*
* Replace ACL wide bits, but first clear them.
*/
zp->z_pflags &= ~ZFS_ACL_WIDE_FLAGS;
zp->z_pflags |= aclp->z_hints;
if (ace_trivial_common(aclp, 0, zfs_ace_walk) == 0)
zp->z_pflags |= ZFS_ACL_TRIVIAL;
zfs_tstamp_update_setup(zp, STATE_CHANGED, NULL, ctime);
return (sa_bulk_update(zp->z_sa_hdl, bulk, count, tx));
}
static void
zfs_acl_chmod(vtype_t vtype, uint64_t mode, boolean_t split, boolean_t trim,
zfs_acl_t *aclp)
{
void *acep = NULL;
uint64_t who;
int new_count, new_bytes;
int ace_size;
int entry_type;
uint16_t iflags, type;
uint32_t access_mask;
zfs_acl_node_t *newnode;
size_t abstract_size = aclp->z_ops->ace_abstract_size();
void *zacep;
boolean_t isdir;
trivial_acl_t masks;
new_count = new_bytes = 0;
isdir = (vtype == VDIR);
acl_trivial_access_masks((mode_t)mode, isdir, &masks);
newnode = zfs_acl_node_alloc((abstract_size * 6) + aclp->z_acl_bytes);
zacep = newnode->z_acldata;
if (masks.allow0) {
zfs_set_ace(aclp, zacep, masks.allow0, ALLOW, -1, ACE_OWNER);
zacep = (void *)((uintptr_t)zacep + abstract_size);
new_count++;
new_bytes += abstract_size;
}
if (masks.deny1) {
zfs_set_ace(aclp, zacep, masks.deny1, DENY, -1, ACE_OWNER);
zacep = (void *)((uintptr_t)zacep + abstract_size);
new_count++;
new_bytes += abstract_size;
}
if (masks.deny2) {
zfs_set_ace(aclp, zacep, masks.deny2, DENY, -1, OWNING_GROUP);
zacep = (void *)((uintptr_t)zacep + abstract_size);
new_count++;
new_bytes += abstract_size;
}
while ((acep = zfs_acl_next_ace(aclp, acep, &who, &access_mask,
&iflags, &type))) {
entry_type = (iflags & ACE_TYPE_FLAGS);
/*
* ACEs used to represent the file mode may be divided
* into an equivalent pair of inherit-only and regular
* ACEs, if they are inheritable.
* Skip regular ACEs, which are replaced by the new mode.
*/
if (split && (entry_type == ACE_OWNER ||
entry_type == OWNING_GROUP ||
entry_type == ACE_EVERYONE)) {
if (!isdir || !(iflags &
(ACE_FILE_INHERIT_ACE|ACE_DIRECTORY_INHERIT_ACE)))
continue;
/*
* We preserve owner@, group@, or @everyone
* permissions, if they are inheritable, by
* copying them to inherit_only ACEs. This
* prevents inheritable permissions from being
* altered along with the file mode.
*/
iflags |= ACE_INHERIT_ONLY_ACE;
}
/*
* If this ACL has any inheritable ACEs, mark that in
* the hints (which are later masked into the pflags)
* so create knows to do inheritance.
*/
if (isdir && (iflags &
(ACE_FILE_INHERIT_ACE|ACE_DIRECTORY_INHERIT_ACE)))
aclp->z_hints |= ZFS_INHERIT_ACE;
if ((type != ALLOW && type != DENY) ||
(iflags & ACE_INHERIT_ONLY_ACE)) {
switch (type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
aclp->z_hints |= ZFS_ACL_OBJ_ACE;
break;
}
} else {
/*
* Limit permissions granted by ACEs to be no greater
* than permissions of the requested group mode.
* Applies when the "aclmode" property is set to
* "groupmask".
*/
if ((type == ALLOW) && trim)
access_mask &= masks.group;
}
zfs_set_ace(aclp, zacep, access_mask, type, who, iflags);
ace_size = aclp->z_ops->ace_size(acep);
zacep = (void *)((uintptr_t)zacep + ace_size);
new_count++;
new_bytes += ace_size;
}
zfs_set_ace(aclp, zacep, masks.owner, ALLOW, -1, ACE_OWNER);
zacep = (void *)((uintptr_t)zacep + abstract_size);
zfs_set_ace(aclp, zacep, masks.group, ALLOW, -1, OWNING_GROUP);
zacep = (void *)((uintptr_t)zacep + abstract_size);
zfs_set_ace(aclp, zacep, masks.everyone, ALLOW, -1, ACE_EVERYONE);
new_count += 3;
new_bytes += abstract_size * 3;
zfs_acl_release_nodes(aclp);
aclp->z_acl_count = new_count;
aclp->z_acl_bytes = new_bytes;
newnode->z_ace_count = new_count;
newnode->z_size = new_bytes;
list_insert_tail(&aclp->z_acl, newnode);
}
int
zfs_acl_chmod_setattr(znode_t *zp, zfs_acl_t **aclp, uint64_t mode)
{
int error = 0;
mutex_enter(&zp->z_acl_lock);
if (zp->z_zfsvfs->z_replay == B_FALSE)
ASSERT_VOP_ELOCKED(ZTOV(zp), __func__);
if (zp->z_zfsvfs->z_acl_mode == ZFS_ACL_DISCARD)
*aclp = zfs_acl_alloc(zfs_acl_version_zp(zp));
else
error = zfs_acl_node_read(zp, B_TRUE, aclp, B_TRUE);
if (error == 0) {
(*aclp)->z_hints = zp->z_pflags & V4_ACL_WIDE_FLAGS;
zfs_acl_chmod(ZTOV(zp)->v_type, mode, B_TRUE,
(zp->z_zfsvfs->z_acl_mode == ZFS_ACL_GROUPMASK), *aclp);
}
mutex_exit(&zp->z_acl_lock);
return (error);
}
/*
* Should ACE be inherited?
*/
static int
zfs_ace_can_use(vtype_t vtype, uint16_t acep_flags)
{
int iflags = (acep_flags & 0xf);
if ((vtype == VDIR) && (iflags & ACE_DIRECTORY_INHERIT_ACE))
return (1);
else if (iflags & ACE_FILE_INHERIT_ACE)
return (!((vtype == VDIR) &&
(iflags & ACE_NO_PROPAGATE_INHERIT_ACE)));
return (0);
}
/*
* inherit inheritable ACEs from parent
*/
static zfs_acl_t *
zfs_acl_inherit(zfsvfs_t *zfsvfs, vtype_t vtype, zfs_acl_t *paclp,
uint64_t mode, boolean_t *need_chmod)
{
void *pacep = NULL;
void *acep;
zfs_acl_node_t *aclnode;
zfs_acl_t *aclp = NULL;
uint64_t who;
uint32_t access_mask;
uint16_t iflags, newflags, type;
size_t ace_size;
void *data1, *data2;
size_t data1sz, data2sz;
uint_t aclinherit;
boolean_t isdir = (vtype == VDIR);
boolean_t isreg = (vtype == VREG);
*need_chmod = B_TRUE;
aclp = zfs_acl_alloc(paclp->z_version);
aclinherit = zfsvfs->z_acl_inherit;
if (aclinherit == ZFS_ACL_DISCARD || vtype == VLNK)
return (aclp);
while ((pacep = zfs_acl_next_ace(paclp, pacep, &who,
&access_mask, &iflags, &type))) {
/*
* don't inherit bogus ACEs
*/
if (!zfs_acl_valid_ace_type(type, iflags))
continue;
/*
* Check if ACE is inheritable by this vnode
*/
if ((aclinherit == ZFS_ACL_NOALLOW && type == ALLOW) ||
!zfs_ace_can_use(vtype, iflags))
continue;
/*
* If owner@, group@, or everyone@ inheritable
* then zfs_acl_chmod() isn't needed.
*/
if ((aclinherit == ZFS_ACL_PASSTHROUGH ||
aclinherit == ZFS_ACL_PASSTHROUGH_X) &&
((iflags & (ACE_OWNER|ACE_EVERYONE)) ||
((iflags & OWNING_GROUP) == OWNING_GROUP)) &&
(isreg || (isdir && (iflags & ACE_DIRECTORY_INHERIT_ACE))))
*need_chmod = B_FALSE;
/*
* Strip inherited execute permission from file if
* not in mode
*/
if (aclinherit == ZFS_ACL_PASSTHROUGH_X && type == ALLOW &&
!isdir && ((mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0)) {
access_mask &= ~ACE_EXECUTE;
}
/*
* Strip write_acl and write_owner from permissions
* when inheriting an ACE
*/
if (aclinherit == ZFS_ACL_RESTRICTED && type == ALLOW) {
access_mask &= ~RESTRICTED_CLEAR;
}
ace_size = aclp->z_ops->ace_size(pacep);
aclnode = zfs_acl_node_alloc(ace_size);
list_insert_tail(&aclp->z_acl, aclnode);
acep = aclnode->z_acldata;
zfs_set_ace(aclp, acep, access_mask, type,
who, iflags|ACE_INHERITED_ACE);
/*
* Copy special opaque data if any
*/
if ((data1sz = paclp->z_ops->ace_data(pacep, &data1)) != 0) {
data2sz = aclp->z_ops->ace_data(acep, &data2);
VERIFY3U(data2sz, ==, data1sz);
bcopy(data1, data2, data2sz);
}
aclp->z_acl_count++;
aclnode->z_ace_count++;
aclp->z_acl_bytes += aclnode->z_size;
newflags = aclp->z_ops->ace_flags_get(acep);
/*
* If ACE is not to be inherited further, or if the vnode is
* not a directory, remove all inheritance flags
*/
if (!isdir || (iflags & ACE_NO_PROPAGATE_INHERIT_ACE)) {
newflags &= ~ALL_INHERIT;
aclp->z_ops->ace_flags_set(acep,
newflags|ACE_INHERITED_ACE);
continue;
}
/*
* This directory has an inheritable ACE
*/
aclp->z_hints |= ZFS_INHERIT_ACE;
/*
* If only FILE_INHERIT is set then turn on
* inherit_only
*/
if ((iflags & (ACE_FILE_INHERIT_ACE |
ACE_DIRECTORY_INHERIT_ACE)) == ACE_FILE_INHERIT_ACE) {
newflags |= ACE_INHERIT_ONLY_ACE;
aclp->z_ops->ace_flags_set(acep,
newflags|ACE_INHERITED_ACE);
} else {
newflags &= ~ACE_INHERIT_ONLY_ACE;
aclp->z_ops->ace_flags_set(acep,
newflags|ACE_INHERITED_ACE);
}
}
if (zfsvfs->z_acl_mode == ZFS_ACL_RESTRICTED &&
aclp->z_acl_count != 0) {
*need_chmod = B_FALSE;
}
return (aclp);
}
/*
* Create file system object initial permissions
* including inheritable ACEs.
* Also, create FUIDs for owner and group.
*/
int
zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr,
vsecattr_t *vsecp, zfs_acl_ids_t *acl_ids)
{
int error;
zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
zfs_acl_t *paclp;
gid_t gid;
boolean_t need_chmod = B_TRUE;
boolean_t trim = B_FALSE;
boolean_t inherited = B_FALSE;
if ((flag & IS_ROOT_NODE) == 0) {
if (zfsvfs->z_replay == B_FALSE)
ASSERT_VOP_ELOCKED(ZTOV(dzp), __func__);
} else
ASSERT3P(dzp->z_vnode, ==, NULL);
bzero(acl_ids, sizeof (zfs_acl_ids_t));
acl_ids->z_mode = MAKEIMODE(vap->va_type, vap->va_mode);
if (vsecp)
if ((error = zfs_vsec_2_aclp(zfsvfs, vap->va_type, vsecp, cr,
&acl_ids->z_fuidp, &acl_ids->z_aclp)) != 0)
return (error);
/*
* Determine uid and gid.
*/
if ((flag & IS_ROOT_NODE) || zfsvfs->z_replay ||
((flag & IS_XATTR) && (vap->va_type == VDIR))) {
acl_ids->z_fuid = zfs_fuid_create(zfsvfs,
(uint64_t)vap->va_uid, cr,
ZFS_OWNER, &acl_ids->z_fuidp);
acl_ids->z_fgid = zfs_fuid_create(zfsvfs,
(uint64_t)vap->va_gid, cr,
ZFS_GROUP, &acl_ids->z_fuidp);
gid = vap->va_gid;
} else {
acl_ids->z_fuid = zfs_fuid_create_cred(zfsvfs, ZFS_OWNER,
cr, &acl_ids->z_fuidp);
acl_ids->z_fgid = 0;
if (vap->va_mask & AT_GID) {
acl_ids->z_fgid = zfs_fuid_create(zfsvfs,
(uint64_t)vap->va_gid,
cr, ZFS_GROUP, &acl_ids->z_fuidp);
gid = vap->va_gid;
if (acl_ids->z_fgid != dzp->z_gid &&
!groupmember(vap->va_gid, cr) &&
secpolicy_vnode_create_gid(cr) != 0)
acl_ids->z_fgid = 0;
}
if (acl_ids->z_fgid == 0) {
char *domain;
uint32_t rid;
acl_ids->z_fgid = dzp->z_gid;
gid = zfs_fuid_map_id(zfsvfs, acl_ids->z_fgid,
cr, ZFS_GROUP);
if (zfsvfs->z_use_fuids &&
IS_EPHEMERAL(acl_ids->z_fgid)) {
domain =
zfs_fuid_idx_domain(&zfsvfs->z_fuid_idx,
FUID_INDEX(acl_ids->z_fgid));
rid = FUID_RID(acl_ids->z_fgid);
zfs_fuid_node_add(&acl_ids->z_fuidp,
domain, rid, FUID_INDEX(acl_ids->z_fgid),
acl_ids->z_fgid, ZFS_GROUP);
}
}
}
/*
* If we're creating a directory, and the parent directory has the
* set-GID bit set, set in on the new directory.
* Otherwise, if the user is neither privileged nor a member of the
* file's new group, clear the file's set-GID bit.
*/
if (!(flag & IS_ROOT_NODE) && (dzp->z_mode & S_ISGID) &&
(vap->va_type == VDIR)) {
acl_ids->z_mode |= S_ISGID;
} else {
if ((acl_ids->z_mode & S_ISGID) &&
secpolicy_vnode_setids_setgids(ZTOV(dzp), cr, gid) != 0)
acl_ids->z_mode &= ~S_ISGID;
}
if (acl_ids->z_aclp == NULL) {
mutex_enter(&dzp->z_acl_lock);
if (!(flag & IS_ROOT_NODE) &&
(dzp->z_pflags & ZFS_INHERIT_ACE) &&
!(dzp->z_pflags & ZFS_XATTR)) {
VERIFY0(zfs_acl_node_read(dzp, B_TRUE,
&paclp, B_FALSE));
acl_ids->z_aclp = zfs_acl_inherit(zfsvfs,
vap->va_type, paclp, acl_ids->z_mode, &need_chmod);
inherited = B_TRUE;
} else {
acl_ids->z_aclp =
zfs_acl_alloc(zfs_acl_version_zp(dzp));
acl_ids->z_aclp->z_hints |= ZFS_ACL_TRIVIAL;
}
mutex_exit(&dzp->z_acl_lock);
if (need_chmod) {
if (vap->va_type == VDIR)
acl_ids->z_aclp->z_hints |=
ZFS_ACL_AUTO_INHERIT;
if (zfsvfs->z_acl_mode == ZFS_ACL_GROUPMASK &&
zfsvfs->z_acl_inherit != ZFS_ACL_PASSTHROUGH &&
zfsvfs->z_acl_inherit != ZFS_ACL_PASSTHROUGH_X)
trim = B_TRUE;
zfs_acl_chmod(vap->va_type, acl_ids->z_mode, B_FALSE,
trim, acl_ids->z_aclp);
}
}
if (inherited || vsecp) {
acl_ids->z_mode = zfs_mode_compute(acl_ids->z_mode,
acl_ids->z_aclp, &acl_ids->z_aclp->z_hints,
acl_ids->z_fuid, acl_ids->z_fgid);
if (ace_trivial_common(acl_ids->z_aclp, 0, zfs_ace_walk) == 0)
acl_ids->z_aclp->z_hints |= ZFS_ACL_TRIVIAL;
}
return (0);
}
/*
* Free ACL and fuid_infop, but not the acl_ids structure
*/
void
zfs_acl_ids_free(zfs_acl_ids_t *acl_ids)
{
if (acl_ids->z_aclp)
zfs_acl_free(acl_ids->z_aclp);
if (acl_ids->z_fuidp)
zfs_fuid_info_free(acl_ids->z_fuidp);
acl_ids->z_aclp = NULL;
acl_ids->z_fuidp = NULL;
}
boolean_t
zfs_acl_ids_overquota(zfsvfs_t *zv, zfs_acl_ids_t *acl_ids, uint64_t projid)
{
return (zfs_id_overquota(zv, DMU_USERUSED_OBJECT, acl_ids->z_fuid) ||
zfs_id_overquota(zv, DMU_GROUPUSED_OBJECT, acl_ids->z_fgid) ||
(projid != ZFS_DEFAULT_PROJID && projid != ZFS_INVALID_PROJID &&
zfs_id_overquota(zv, DMU_PROJECTUSED_OBJECT, projid)));
}
/*
* Retrieve a file's ACL
*/
int
zfs_getacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
{
zfs_acl_t *aclp;
ulong_t mask;
int error;
int count = 0;
int largeace = 0;
mask = vsecp->vsa_mask & (VSA_ACE | VSA_ACECNT |
VSA_ACE_ACLFLAGS | VSA_ACE_ALLTYPES);
if (mask == 0)
return (SET_ERROR(ENOSYS));
if ((error = zfs_zaccess(zp, ACE_READ_ACL, 0, skipaclchk, cr)))
return (error);
mutex_enter(&zp->z_acl_lock);
if (zp->z_zfsvfs->z_replay == B_FALSE)
ASSERT_VOP_LOCKED(ZTOV(zp), __func__);
error = zfs_acl_node_read(zp, B_TRUE, &aclp, B_FALSE);
if (error != 0) {
mutex_exit(&zp->z_acl_lock);
return (error);
}
/*
* Scan ACL to determine number of ACEs
*/
if ((zp->z_pflags & ZFS_ACL_OBJ_ACE) && !(mask & VSA_ACE_ALLTYPES)) {
void *zacep = NULL;
uint64_t who;
uint32_t access_mask;
uint16_t type, iflags;
while ((zacep = zfs_acl_next_ace(aclp, zacep,
&who, &access_mask, &iflags, &type))) {
switch (type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
largeace++;
continue;
default:
count++;
}
}
vsecp->vsa_aclcnt = count;
} else
count = (int)aclp->z_acl_count;
if (mask & VSA_ACECNT) {
vsecp->vsa_aclcnt = count;
}
if (mask & VSA_ACE) {
size_t aclsz;
aclsz = count * sizeof (ace_t) +
sizeof (ace_object_t) * largeace;
vsecp->vsa_aclentp = kmem_alloc(aclsz, KM_SLEEP);
vsecp->vsa_aclentsz = aclsz;
if (aclp->z_version == ZFS_ACL_VERSION_FUID)
zfs_copy_fuid_2_ace(zp->z_zfsvfs, aclp, cr,
vsecp->vsa_aclentp, !(mask & VSA_ACE_ALLTYPES));
else {
zfs_acl_node_t *aclnode;
void *start = vsecp->vsa_aclentp;
for (aclnode = list_head(&aclp->z_acl); aclnode;
aclnode = list_next(&aclp->z_acl, aclnode)) {
bcopy(aclnode->z_acldata, start,
aclnode->z_size);
start = (caddr_t)start + aclnode->z_size;
}
ASSERT3U((caddr_t)start - (caddr_t)vsecp->vsa_aclentp,
==, aclp->z_acl_bytes);
}
}
if (mask & VSA_ACE_ACLFLAGS) {
vsecp->vsa_aclflags = 0;
if (zp->z_pflags & ZFS_ACL_DEFAULTED)
vsecp->vsa_aclflags |= ACL_DEFAULTED;
if (zp->z_pflags & ZFS_ACL_PROTECTED)
vsecp->vsa_aclflags |= ACL_PROTECTED;
if (zp->z_pflags & ZFS_ACL_AUTO_INHERIT)
vsecp->vsa_aclflags |= ACL_AUTO_INHERIT;
}
mutex_exit(&zp->z_acl_lock);
return (0);
}
int
zfs_vsec_2_aclp(zfsvfs_t *zfsvfs, umode_t obj_type,
vsecattr_t *vsecp, cred_t *cr, zfs_fuid_info_t **fuidp, zfs_acl_t **zaclp)
{
zfs_acl_t *aclp;
zfs_acl_node_t *aclnode;
int aclcnt = vsecp->vsa_aclcnt;
int error;
if (vsecp->vsa_aclcnt > MAX_ACL_ENTRIES || vsecp->vsa_aclcnt <= 0)
return (SET_ERROR(EINVAL));
aclp = zfs_acl_alloc(zfs_acl_version(zfsvfs->z_version));
aclp->z_hints = 0;
aclnode = zfs_acl_node_alloc(aclcnt * sizeof (zfs_object_ace_t));
if (aclp->z_version == ZFS_ACL_VERSION_INITIAL) {
if ((error = zfs_copy_ace_2_oldace(obj_type, aclp,
(ace_t *)vsecp->vsa_aclentp, aclnode->z_acldata,
aclcnt, &aclnode->z_size)) != 0) {
zfs_acl_free(aclp);
zfs_acl_node_free(aclnode);
return (error);
}
} else {
if ((error = zfs_copy_ace_2_fuid(zfsvfs, obj_type, aclp,
vsecp->vsa_aclentp, aclnode->z_acldata, aclcnt,
&aclnode->z_size, fuidp, cr)) != 0) {
zfs_acl_free(aclp);
zfs_acl_node_free(aclnode);
return (error);
}
}
aclp->z_acl_bytes = aclnode->z_size;
aclnode->z_ace_count = aclcnt;
aclp->z_acl_count = aclcnt;
list_insert_head(&aclp->z_acl, aclnode);
/*
* If flags are being set then add them to z_hints
*/
if (vsecp->vsa_mask & VSA_ACE_ACLFLAGS) {
if (vsecp->vsa_aclflags & ACL_PROTECTED)
aclp->z_hints |= ZFS_ACL_PROTECTED;
if (vsecp->vsa_aclflags & ACL_DEFAULTED)
aclp->z_hints |= ZFS_ACL_DEFAULTED;
if (vsecp->vsa_aclflags & ACL_AUTO_INHERIT)
aclp->z_hints |= ZFS_ACL_AUTO_INHERIT;
}
*zaclp = aclp;
return (0);
}
/*
* Set a file's ACL
*/
int
zfs_setacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
{
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
zilog_t *zilog = zfsvfs->z_log;
ulong_t mask = vsecp->vsa_mask & (VSA_ACE | VSA_ACECNT);
dmu_tx_t *tx;
int error;
zfs_acl_t *aclp;
zfs_fuid_info_t *fuidp = NULL;
boolean_t fuid_dirtied;
uint64_t acl_obj;
if (zp->z_zfsvfs->z_replay == B_FALSE)
ASSERT_VOP_ELOCKED(ZTOV(zp), __func__);
if (mask == 0)
return (SET_ERROR(ENOSYS));
if (zp->z_pflags & ZFS_IMMUTABLE)
return (SET_ERROR(EPERM));
if ((error = zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr)))
return (error);
error = zfs_vsec_2_aclp(zfsvfs, ZTOV(zp)->v_type, vsecp, cr, &fuidp,
&aclp);
if (error)
return (error);
/*
* If ACL wide flags aren't being set then preserve any
* existing flags.
*/
if (!(vsecp->vsa_mask & VSA_ACE_ACLFLAGS)) {
aclp->z_hints |=
(zp->z_pflags & V4_ACL_WIDE_FLAGS);
}
top:
mutex_enter(&zp->z_acl_lock);
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
fuid_dirtied = zfsvfs->z_fuid_dirty;
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
/*
* If old version and ACL won't fit in bonus and we aren't
* upgrading then take out necessary DMU holds
*/
if ((acl_obj = zfs_external_acl(zp)) != 0) {
if (zfsvfs->z_version >= ZPL_VERSION_FUID &&
zfs_znode_acl_version(zp) <= ZFS_ACL_VERSION_INITIAL) {
dmu_tx_hold_free(tx, acl_obj, 0,
DMU_OBJECT_END);
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
aclp->z_acl_bytes);
} else {
dmu_tx_hold_write(tx, acl_obj, 0, aclp->z_acl_bytes);
}
} else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, aclp->z_acl_bytes);
}
zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_NOWAIT);
if (error) {
mutex_exit(&zp->z_acl_lock);
if (error == ERESTART) {
dmu_tx_wait(tx);
dmu_tx_abort(tx);
goto top;
}
dmu_tx_abort(tx);
zfs_acl_free(aclp);
return (error);
}
error = zfs_aclset_common(zp, aclp, cr, tx);
ASSERT0(error);
ASSERT3P(zp->z_acl_cached, ==, NULL);
zp->z_acl_cached = aclp;
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
zfs_log_acl(zilog, tx, zp, vsecp, fuidp);
if (fuidp)
zfs_fuid_info_free(fuidp);
dmu_tx_commit(tx);
mutex_exit(&zp->z_acl_lock);
return (error);
}
/*
* Check accesses of interest (AoI) against attributes of the dataset
* such as read-only. Returns zero if no AoI conflict with dataset
* attributes, otherwise an appropriate errno is returned.
*/
static int
zfs_zaccess_dataset_check(znode_t *zp, uint32_t v4_mode)
{
if ((v4_mode & WRITE_MASK) &&
(zp->z_zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) &&
(!IS_DEVVP(ZTOV(zp)) ||
(IS_DEVVP(ZTOV(zp)) && (v4_mode & WRITE_MASK_ATTRS)))) {
return (SET_ERROR(EROFS));
}
/*
* Intentionally allow ZFS_READONLY through here.
* See zfs_zaccess_common().
*/
if ((v4_mode & WRITE_MASK_DATA) &&
(zp->z_pflags & ZFS_IMMUTABLE)) {
return (SET_ERROR(EPERM));
}
/*
* In FreeBSD we allow to modify directory's content is ZFS_NOUNLINK
* (sunlnk) is set. We just don't allow directory removal, which is
* handled in zfs_zaccess_delete().
*/
if ((v4_mode & ACE_DELETE) &&
(zp->z_pflags & ZFS_NOUNLINK)) {
return (EPERM);
}
if (((v4_mode & (ACE_READ_DATA|ACE_EXECUTE)) &&
(zp->z_pflags & ZFS_AV_QUARANTINED))) {
return (SET_ERROR(EACCES));
}
return (0);
}
/*
* The primary usage of this function is to loop through all of the
* ACEs in the znode, determining what accesses of interest (AoI) to
* the caller are allowed or denied. The AoI are expressed as bits in
* the working_mode parameter. As each ACE is processed, bits covered
* by that ACE are removed from the working_mode. This removal
* facilitates two things. The first is that when the working mode is
* empty (= 0), we know we've looked at all the AoI. The second is
* that the ACE interpretation rules don't allow a later ACE to undo
* something granted or denied by an earlier ACE. Removing the
* discovered access or denial enforces this rule. At the end of
* processing the ACEs, all AoI that were found to be denied are
* placed into the working_mode, giving the caller a mask of denied
* accesses. Returns:
* 0 if all AoI granted
* EACCESS if the denied mask is non-zero
* other error if abnormal failure (e.g., IO error)
*
* A secondary usage of the function is to determine if any of the
* AoI are granted. If an ACE grants any access in
* the working_mode, we immediately short circuit out of the function.
* This mode is chosen by setting anyaccess to B_TRUE. The
* working_mode is not a denied access mask upon exit if the function
* is used in this manner.
*/
static int
zfs_zaccess_aces_check(znode_t *zp, uint32_t *working_mode,
boolean_t anyaccess, cred_t *cr)
{
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
zfs_acl_t *aclp;
int error;
uid_t uid = crgetuid(cr);
uint64_t who;
uint16_t type, iflags;
uint16_t entry_type;
uint32_t access_mask;
uint32_t deny_mask = 0;
zfs_ace_hdr_t *acep = NULL;
boolean_t checkit;
uid_t gowner;
uid_t fowner;
zfs_fuid_map_ids(zp, cr, &fowner, &gowner);
mutex_enter(&zp->z_acl_lock);
if (zp->z_zfsvfs->z_replay == B_FALSE)
ASSERT_VOP_LOCKED(ZTOV(zp), __func__);
error = zfs_acl_node_read(zp, B_TRUE, &aclp, B_FALSE);
if (error != 0) {
mutex_exit(&zp->z_acl_lock);
return (error);
}
ASSERT3P(zp->z_acl_cached, !=, NULL);
while ((acep = zfs_acl_next_ace(aclp, acep, &who, &access_mask,
&iflags, &type))) {
uint32_t mask_matched;
if (!zfs_acl_valid_ace_type(type, iflags))
continue;
if (ZTOV(zp)->v_type == VDIR && (iflags & ACE_INHERIT_ONLY_ACE))
continue;
/* Skip ACE if it does not affect any AoI */
mask_matched = (access_mask & *working_mode);
if (!mask_matched)
continue;
entry_type = (iflags & ACE_TYPE_FLAGS);
checkit = B_FALSE;
switch (entry_type) {
case ACE_OWNER:
if (uid == fowner)
checkit = B_TRUE;
break;
case OWNING_GROUP:
who = gowner;
- /*FALLTHROUGH*/
+ /* FALLTHROUGH */
case ACE_IDENTIFIER_GROUP:
checkit = zfs_groupmember(zfsvfs, who, cr);
break;
case ACE_EVERYONE:
checkit = B_TRUE;
break;
/* USER Entry */
default:
if (entry_type == 0) {
uid_t newid;
newid = zfs_fuid_map_id(zfsvfs, who, cr,
ZFS_ACE_USER);
if (newid != UID_NOBODY &&
uid == newid)
checkit = B_TRUE;
break;
} else {
mutex_exit(&zp->z_acl_lock);
return (SET_ERROR(EIO));
}
}
if (checkit) {
if (type == DENY) {
DTRACE_PROBE3(zfs__ace__denies,
znode_t *, zp,
zfs_ace_hdr_t *, acep,
uint32_t, mask_matched);
deny_mask |= mask_matched;
} else {
DTRACE_PROBE3(zfs__ace__allows,
znode_t *, zp,
zfs_ace_hdr_t *, acep,
uint32_t, mask_matched);
if (anyaccess) {
mutex_exit(&zp->z_acl_lock);
return (0);
}
}
*working_mode &= ~mask_matched;
}
/* Are we done? */
if (*working_mode == 0)
break;
}
mutex_exit(&zp->z_acl_lock);
/* Put the found 'denies' back on the working mode */
if (deny_mask) {
*working_mode |= deny_mask;
return (SET_ERROR(EACCES));
} else if (*working_mode) {
return (-1);
}
return (0);
}
/*
* Return true if any access whatsoever granted, we don't actually
* care what access is granted.
*/
boolean_t
zfs_has_access(znode_t *zp, cred_t *cr)
{
uint32_t have = ACE_ALL_PERMS;
if (zfs_zaccess_aces_check(zp, &have, B_TRUE, cr) != 0) {
uid_t owner;
owner = zfs_fuid_map_id(zp->z_zfsvfs, zp->z_uid, cr, ZFS_OWNER);
return (secpolicy_vnode_any_access(cr, ZTOV(zp), owner) == 0);
}
return (B_TRUE);
}
static int
zfs_zaccess_common(znode_t *zp, uint32_t v4_mode, uint32_t *working_mode,
boolean_t *check_privs, boolean_t skipaclchk, cred_t *cr)
{
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
int err;
*working_mode = v4_mode;
*check_privs = B_TRUE;
/*
* Short circuit empty requests
*/
if (v4_mode == 0 || zfsvfs->z_replay) {
*working_mode = 0;
return (0);
}
if ((err = zfs_zaccess_dataset_check(zp, v4_mode)) != 0) {
*check_privs = B_FALSE;
return (err);
}
/*
* The caller requested that the ACL check be skipped. This
* would only happen if the caller checked VOP_ACCESS() with a
* 32 bit ACE mask and already had the appropriate permissions.
*/
if (skipaclchk) {
*working_mode = 0;
return (0);
}
/*
* Note: ZFS_READONLY represents the "DOS R/O" attribute.
* When that flag is set, we should behave as if write access
* were not granted by anything in the ACL. In particular:
* We _must_ allow writes after opening the file r/w, then
* setting the DOS R/O attribute, and writing some more.
* (Similar to how you can write after fchmod(fd, 0444).)
*
* Therefore ZFS_READONLY is ignored in the dataset check
* above, and checked here as if part of the ACL check.
* Also note: DOS R/O is ignored for directories.
*/
if ((v4_mode & WRITE_MASK_DATA) &&
(ZTOV(zp)->v_type != VDIR) &&
(zp->z_pflags & ZFS_READONLY)) {
return (SET_ERROR(EPERM));
}
return (zfs_zaccess_aces_check(zp, working_mode, B_FALSE, cr));
}
static int
zfs_zaccess_append(znode_t *zp, uint32_t *working_mode, boolean_t *check_privs,
cred_t *cr)
{
if (*working_mode != ACE_WRITE_DATA)
return (SET_ERROR(EACCES));
return (zfs_zaccess_common(zp, ACE_APPEND_DATA, working_mode,
check_privs, B_FALSE, cr));
}
/*
* Check if VEXEC is allowed.
*
* This routine is based on zfs_fastaccesschk_execute which has slowpath
* calling zfs_zaccess. This would be incorrect on FreeBSD (see
* zfs_freebsd_access for the difference). Thus this variant let's the
* caller handle the slowpath (if necessary).
*
* On top of that we perform a lockless check for ZFS_NO_EXECS_DENIED.
*
* Safe access to znode_t is provided by the vnode lock.
*/
int
zfs_fastaccesschk_execute(znode_t *zdp, cred_t *cr)
{
boolean_t is_attr;
if (zdp->z_pflags & ZFS_AV_QUARANTINED)
return (1);
is_attr = ((zdp->z_pflags & ZFS_XATTR) &&
(ZTOV(zdp)->v_type == VDIR));
if (is_attr)
return (1);
if (zdp->z_pflags & ZFS_NO_EXECS_DENIED)
return (0);
return (1);
}
/*
* Determine whether Access should be granted/denied.
*
* The least priv subsystem is always consulted as a basic privilege
* can define any form of access.
*/
int
zfs_zaccess(znode_t *zp, int mode, int flags, boolean_t skipaclchk, cred_t *cr)
{
uint32_t working_mode;
int error;
int is_attr;
boolean_t check_privs;
znode_t *xzp = NULL;
znode_t *check_zp = zp;
mode_t needed_bits;
uid_t owner;
is_attr = ((zp->z_pflags & ZFS_XATTR) && (ZTOV(zp)->v_type == VDIR));
/*
* In FreeBSD, we don't care about permissions of individual ADS.
* Note that not checking them is not just an optimization - without
* this shortcut, EA operations may bogusly fail with EACCES.
*/
if (zp->z_pflags & ZFS_XATTR)
return (0);
owner = zfs_fuid_map_id(zp->z_zfsvfs, zp->z_uid, cr, ZFS_OWNER);
/*
* Map the bits required to the standard vnode flags VREAD|VWRITE|VEXEC
* in needed_bits. Map the bits mapped by working_mode (currently
* missing) in missing_bits.
* Call secpolicy_vnode_access2() with (needed_bits & ~checkmode),
* needed_bits.
*/
needed_bits = 0;
working_mode = mode;
if ((working_mode & (ACE_READ_ACL|ACE_READ_ATTRIBUTES)) &&
owner == crgetuid(cr))
working_mode &= ~(ACE_READ_ACL|ACE_READ_ATTRIBUTES);
if (working_mode & (ACE_READ_DATA|ACE_READ_NAMED_ATTRS|
ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_SYNCHRONIZE))
needed_bits |= VREAD;
if (working_mode & (ACE_WRITE_DATA|ACE_WRITE_NAMED_ATTRS|
ACE_APPEND_DATA|ACE_WRITE_ATTRIBUTES|ACE_SYNCHRONIZE))
needed_bits |= VWRITE;
if (working_mode & ACE_EXECUTE)
needed_bits |= VEXEC;
if ((error = zfs_zaccess_common(check_zp, mode, &working_mode,
&check_privs, skipaclchk, cr)) == 0) {
if (is_attr)
VN_RELE(ZTOV(xzp));
return (secpolicy_vnode_access2(cr, ZTOV(zp), owner,
needed_bits, needed_bits));
}
if (error && !check_privs) {
if (is_attr)
VN_RELE(ZTOV(xzp));
return (error);
}
if (error && (flags & V_APPEND)) {
error = zfs_zaccess_append(zp, &working_mode, &check_privs, cr);
}
if (error && check_privs) {
mode_t checkmode = 0;
vnode_t *check_vp = ZTOV(check_zp);
/*
* First check for implicit owner permission on
* read_acl/read_attributes
*/
error = 0;
ASSERT3U(working_mode, !=, 0);
if ((working_mode & (ACE_READ_ACL|ACE_READ_ATTRIBUTES) &&
owner == crgetuid(cr)))
working_mode &= ~(ACE_READ_ACL|ACE_READ_ATTRIBUTES);
if (working_mode & (ACE_READ_DATA|ACE_READ_NAMED_ATTRS|
ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_SYNCHRONIZE))
checkmode |= VREAD;
if (working_mode & (ACE_WRITE_DATA|ACE_WRITE_NAMED_ATTRS|
ACE_APPEND_DATA|ACE_WRITE_ATTRIBUTES|ACE_SYNCHRONIZE))
checkmode |= VWRITE;
if (working_mode & ACE_EXECUTE)
checkmode |= VEXEC;
error = secpolicy_vnode_access2(cr, check_vp, owner,
needed_bits & ~checkmode, needed_bits);
if (error == 0 && (working_mode & ACE_WRITE_OWNER))
error = secpolicy_vnode_chown(check_vp, cr, owner);
if (error == 0 && (working_mode & ACE_WRITE_ACL))
error = secpolicy_vnode_setdac(check_vp, cr, owner);
if (error == 0 && (working_mode &
(ACE_DELETE|ACE_DELETE_CHILD)))
error = secpolicy_vnode_remove(check_vp, cr);
if (error == 0 && (working_mode & ACE_SYNCHRONIZE)) {
error = secpolicy_vnode_chown(check_vp, cr, owner);
}
if (error == 0) {
/*
* See if any bits other than those already checked
* for are still present. If so then return EACCES
*/
if (working_mode & ~(ZFS_CHECKED_MASKS)) {
error = SET_ERROR(EACCES);
}
}
} else if (error == 0) {
error = secpolicy_vnode_access2(cr, ZTOV(zp), owner,
needed_bits, needed_bits);
}
if (is_attr)
VN_RELE(ZTOV(xzp));
return (error);
}
/*
* Translate traditional unix VREAD/VWRITE/VEXEC mode into
* NFSv4-style ZFS ACL format and call zfs_zaccess()
*/
int
zfs_zaccess_rwx(znode_t *zp, mode_t mode, int flags, cred_t *cr)
{
return (zfs_zaccess(zp, zfs_unix_to_v4(mode >> 6), flags, B_FALSE, cr));
}
/*
* Access function for secpolicy_vnode_setattr
*/
int
zfs_zaccess_unix(znode_t *zp, mode_t mode, cred_t *cr)
{
int v4_mode = zfs_unix_to_v4(mode >> 6);
return (zfs_zaccess(zp, v4_mode, 0, B_FALSE, cr));
}
static int
zfs_delete_final_check(znode_t *zp, znode_t *dzp,
mode_t available_perms, cred_t *cr)
{
int error;
uid_t downer;
downer = zfs_fuid_map_id(dzp->z_zfsvfs, dzp->z_uid, cr, ZFS_OWNER);
error = secpolicy_vnode_access2(cr, ZTOV(dzp),
downer, available_perms, VWRITE|VEXEC);
if (error == 0)
error = zfs_sticky_remove_access(dzp, zp, cr);
return (error);
}
/*
* Determine whether Access should be granted/deny, without
* consulting least priv subsystem.
*
* The following chart is the recommended NFSv4 enforcement for
* ability to delete an object.
*
* -------------------------------------------------------
* | Parent Dir | Target Object Permissions |
* | permissions | |
* -------------------------------------------------------
* | | ACL Allows | ACL Denies| Delete |
* | | Delete | Delete | unspecified|
* -------------------------------------------------------
* | ACL Allows | Permit | Permit | Permit |
* | DELETE_CHILD | |
* -------------------------------------------------------
* | ACL Denies | Permit | Deny | Deny |
* | DELETE_CHILD | | | |
* -------------------------------------------------------
* | ACL specifies | | | |
* | only allow | Permit | Permit | Permit |
* | write and | | | |
* | execute | | | |
* -------------------------------------------------------
* | ACL denies | | | |
* | write and | Permit | Deny | Deny |
* | execute | | | |
* -------------------------------------------------------
* ^
* |
* No search privilege, can't even look up file?
*
*/
int
zfs_zaccess_delete(znode_t *dzp, znode_t *zp, cred_t *cr)
{
uint32_t dzp_working_mode = 0;
uint32_t zp_working_mode = 0;
int dzp_error, zp_error;
mode_t available_perms;
boolean_t dzpcheck_privs = B_TRUE;
boolean_t zpcheck_privs = B_TRUE;
/*
* We want specific DELETE permissions to
* take precedence over WRITE/EXECUTE. We don't
* want an ACL such as this to mess us up.
* user:joe:write_data:deny,user:joe:delete:allow
*
* However, deny permissions may ultimately be overridden
* by secpolicy_vnode_access().
*
* We will ask for all of the necessary permissions and then
* look at the working modes from the directory and target object
* to determine what was found.
*/
if (zp->z_pflags & (ZFS_IMMUTABLE | ZFS_NOUNLINK))
return (SET_ERROR(EPERM));
/*
* First row
* If the directory permissions allow the delete, we are done.
*/
if ((dzp_error = zfs_zaccess_common(dzp, ACE_DELETE_CHILD,
&dzp_working_mode, &dzpcheck_privs, B_FALSE, cr)) == 0)
return (0);
/*
* If target object has delete permission then we are done
*/
if ((zp_error = zfs_zaccess_common(zp, ACE_DELETE, &zp_working_mode,
&zpcheck_privs, B_FALSE, cr)) == 0)
return (0);
ASSERT(dzp_error);
ASSERT(zp_error);
if (!dzpcheck_privs)
return (dzp_error);
if (!zpcheck_privs)
return (zp_error);
/*
* Second row
*
* If directory returns EACCES then delete_child was denied
* due to deny delete_child. In this case send the request through
* secpolicy_vnode_remove(). We don't use zfs_delete_final_check()
* since that *could* allow the delete based on write/execute permission
* and we want delete permissions to override write/execute.
*/
if (dzp_error == EACCES) {
/* XXXPJD: s/dzp/zp/ ? */
return (secpolicy_vnode_remove(ZTOV(dzp), cr));
}
/*
* Third Row
* only need to see if we have write/execute on directory.
*/
dzp_error = zfs_zaccess_common(dzp, ACE_EXECUTE|ACE_WRITE_DATA,
&dzp_working_mode, &dzpcheck_privs, B_FALSE, cr);
if (dzp_error != 0 && !dzpcheck_privs)
return (dzp_error);
/*
* Fourth row
*/
available_perms = (dzp_working_mode & ACE_WRITE_DATA) ? 0 : VWRITE;
available_perms |= (dzp_working_mode & ACE_EXECUTE) ? 0 : VEXEC;
return (zfs_delete_final_check(zp, dzp, available_perms, cr));
}
int
zfs_zaccess_rename(znode_t *sdzp, znode_t *szp, znode_t *tdzp,
znode_t *tzp, cred_t *cr)
{
int add_perm;
int error;
if (szp->z_pflags & ZFS_AV_QUARANTINED)
return (SET_ERROR(EACCES));
add_perm = (ZTOV(szp)->v_type == VDIR) ?
ACE_ADD_SUBDIRECTORY : ACE_ADD_FILE;
/*
* Rename permissions are combination of delete permission +
* add file/subdir permission.
*
* BSD operating systems also require write permission
* on the directory being moved from one parent directory
* to another.
*/
if (ZTOV(szp)->v_type == VDIR && ZTOV(sdzp) != ZTOV(tdzp)) {
if ((error = zfs_zaccess(szp, ACE_WRITE_DATA, 0, B_FALSE, cr)))
return (error);
}
/*
* first make sure we do the delete portion.
*
* If that succeeds then check for add_file/add_subdir permissions
*/
if ((error = zfs_zaccess_delete(sdzp, szp, cr)))
return (error);
/*
* If we have a tzp, see if we can delete it?
*/
if (tzp && (error = zfs_zaccess_delete(tdzp, tzp, cr)))
return (error);
/*
* Now check for add permissions
*/
error = zfs_zaccess(tdzp, add_perm, 0, B_FALSE, cr);
return (error);
}
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c
index a9fe1b647238..3b405e9d68eb 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c
@@ -1,1361 +1,1361 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 by Delphix. All rights reserved.
* Copyright 2015, OmniTI Computer Consulting, Inc. All rights reserved.
*/
/*
* ZFS control directory (a.k.a. ".zfs")
*
* This directory provides a common location for all ZFS meta-objects.
* Currently, this is only the 'snapshot' directory, but this may expand in the
* future. The elements are built using the GFS primitives, as the hierarchy
* does not actually exist on disk.
*
* For 'snapshot', we don't want to have all snapshots always mounted, because
* this would take up a huge amount of space in /etc/mnttab. We have three
* types of objects:
*
* ctldir ------> snapshotdir -------> snapshot
* |
* |
* V
* mounted fs
*
* The 'snapshot' node contains just enough information to lookup '..' and act
* as a mountpoint for the snapshot. Whenever we lookup a specific snapshot, we
* perform an automount of the underlying filesystem and return the
* corresponding vnode.
*
* All mounts are handled automatically by the kernel, but unmounts are
* (currently) handled from user land. The main reason is that there is no
* reliable way to auto-unmount the filesystem when it's "no longer in use".
* When the user unmounts a filesystem, we call zfsctl_unmount(), which
* unmounts any snapshots within the snapshot directory.
*
* The '.zfs', '.zfs/snapshot', and all directories created under
* '.zfs/snapshot' (ie: '.zfs/snapshot/<snapname>') are all GFS nodes and
* share the same vfs_t as the head filesystem (what '.zfs' lives under).
*
* File systems mounted ontop of the GFS nodes '.zfs/snapshot/<snapname>'
* (ie: snapshots) are ZFS nodes and have their own unique vfs_t.
* However, vnodes within these mounted on file systems have their v_vfsp
* fields set to the head filesystem to make NFS happy (see
* zfsctl_snapdir_lookup()). We VFS_HOLD the head filesystem's vfs_t
* so that it cannot be freed until all snapshots have been unmounted.
*/
#include <sys/types.h>
#include <sys/param.h>
#include <sys/libkern.h>
#include <sys/dirent.h>
#include <sys/zfs_context.h>
#include <sys/zfs_ctldir.h>
#include <sys/zfs_ioctl.h>
#include <sys/zfs_vfsops.h>
#include <sys/namei.h>
#include <sys/stat.h>
#include <sys/dmu.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_destroy.h>
#include <sys/dsl_deleg.h>
#include <sys/mount.h>
#include <sys/zap.h>
#include <sys/sysproto.h>
#include "zfs_namecheck.h"
#include <sys/kernel.h>
#include <sys/ccompat.h>
/* Common access mode for all virtual directories under the ctldir */
const uint16_t zfsctl_ctldir_mode = S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP |
S_IROTH | S_IXOTH;
/*
* "Synthetic" filesystem implementation.
*/
/*
* Assert that A implies B.
*/
#define KASSERT_IMPLY(A, B, msg) KASSERT(!(A) || (B), (msg));
static MALLOC_DEFINE(M_SFSNODES, "sfs_nodes", "synthetic-fs nodes");
typedef struct sfs_node {
char sn_name[ZFS_MAX_DATASET_NAME_LEN];
uint64_t sn_parent_id;
uint64_t sn_id;
} sfs_node_t;
/*
* Check the parent's ID as well as the node's to account for a chance
* that IDs originating from different domains (snapshot IDs, artificial
* IDs, znode IDs) may clash.
*/
static int
sfs_compare_ids(struct vnode *vp, void *arg)
{
sfs_node_t *n1 = vp->v_data;
sfs_node_t *n2 = arg;
bool equal;
equal = n1->sn_id == n2->sn_id &&
n1->sn_parent_id == n2->sn_parent_id;
/* Zero means equality. */
return (!equal);
}
static int
sfs_vnode_get(const struct mount *mp, int flags, uint64_t parent_id,
uint64_t id, struct vnode **vpp)
{
sfs_node_t search;
int err;
search.sn_id = id;
search.sn_parent_id = parent_id;
err = vfs_hash_get(mp, (uint32_t)id, flags, curthread, vpp,
sfs_compare_ids, &search);
return (err);
}
static int
sfs_vnode_insert(struct vnode *vp, int flags, uint64_t parent_id,
uint64_t id, struct vnode **vpp)
{
int err;
KASSERT(vp->v_data != NULL, ("sfs_vnode_insert with NULL v_data"));
err = vfs_hash_insert(vp, (uint32_t)id, flags, curthread, vpp,
sfs_compare_ids, vp->v_data);
return (err);
}
static void
sfs_vnode_remove(struct vnode *vp)
{
vfs_hash_remove(vp);
}
typedef void sfs_vnode_setup_fn(vnode_t *vp, void *arg);
static int
sfs_vgetx(struct mount *mp, int flags, uint64_t parent_id, uint64_t id,
const char *tag, struct vop_vector *vops,
sfs_vnode_setup_fn setup, void *arg,
struct vnode **vpp)
{
struct vnode *vp;
int error;
error = sfs_vnode_get(mp, flags, parent_id, id, vpp);
if (error != 0 || *vpp != NULL) {
KASSERT_IMPLY(error == 0, (*vpp)->v_data != NULL,
"sfs vnode with no data");
return (error);
}
/* Allocate a new vnode/inode. */
error = getnewvnode(tag, mp, vops, &vp);
if (error != 0) {
*vpp = NULL;
return (error);
}
/*
* Exclusively lock the vnode vnode while it's being constructed.
*/
lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL);
error = insmntque(vp, mp);
if (error != 0) {
*vpp = NULL;
return (error);
}
setup(vp, arg);
error = sfs_vnode_insert(vp, flags, parent_id, id, vpp);
if (error != 0 || *vpp != NULL) {
KASSERT_IMPLY(error == 0, (*vpp)->v_data != NULL,
"sfs vnode with no data");
return (error);
}
*vpp = vp;
return (0);
}
static void
sfs_print_node(sfs_node_t *node)
{
printf("\tname = %s\n", node->sn_name);
printf("\tparent_id = %ju\n", (uintmax_t)node->sn_parent_id);
printf("\tid = %ju\n", (uintmax_t)node->sn_id);
}
static sfs_node_t *
sfs_alloc_node(size_t size, const char *name, uint64_t parent_id, uint64_t id)
{
struct sfs_node *node;
KASSERT(strlen(name) < sizeof (node->sn_name),
("sfs node name is too long"));
KASSERT(size >= sizeof (*node), ("sfs node size is too small"));
node = malloc(size, M_SFSNODES, M_WAITOK | M_ZERO);
strlcpy(node->sn_name, name, sizeof (node->sn_name));
node->sn_parent_id = parent_id;
node->sn_id = id;
return (node);
}
static void
sfs_destroy_node(sfs_node_t *node)
{
free(node, M_SFSNODES);
}
static void *
sfs_reclaim_vnode(vnode_t *vp)
{
void *data;
sfs_vnode_remove(vp);
data = vp->v_data;
vp->v_data = NULL;
return (data);
}
static int
sfs_readdir_common(uint64_t parent_id, uint64_t id, struct vop_readdir_args *ap,
zfs_uio_t *uio, off_t *offp)
{
struct dirent entry;
int error;
/* Reset ncookies for subsequent use of vfs_read_dirent. */
if (ap->a_ncookies != NULL)
*ap->a_ncookies = 0;
if (zfs_uio_resid(uio) < sizeof (entry))
return (SET_ERROR(EINVAL));
if (zfs_uio_offset(uio) < 0)
return (SET_ERROR(EINVAL));
if (zfs_uio_offset(uio) == 0) {
entry.d_fileno = id;
entry.d_type = DT_DIR;
entry.d_name[0] = '.';
entry.d_name[1] = '\0';
entry.d_namlen = 1;
entry.d_reclen = sizeof (entry);
error = vfs_read_dirent(ap, &entry, zfs_uio_offset(uio));
if (error != 0)
return (SET_ERROR(error));
}
if (zfs_uio_offset(uio) < sizeof (entry))
return (SET_ERROR(EINVAL));
if (zfs_uio_offset(uio) == sizeof (entry)) {
entry.d_fileno = parent_id;
entry.d_type = DT_DIR;
entry.d_name[0] = '.';
entry.d_name[1] = '.';
entry.d_name[2] = '\0';
entry.d_namlen = 2;
entry.d_reclen = sizeof (entry);
error = vfs_read_dirent(ap, &entry, zfs_uio_offset(uio));
if (error != 0)
return (SET_ERROR(error));
}
if (offp != NULL)
*offp = 2 * sizeof (entry);
return (0);
}
/*
* .zfs inode namespace
*
* We need to generate unique inode numbers for all files and directories
* within the .zfs pseudo-filesystem. We use the following scheme:
*
* ENTRY ZFSCTL_INODE
* .zfs 1
* .zfs/snapshot 2
* .zfs/snapshot/<snap> objectid(snap)
*/
#define ZFSCTL_INO_SNAP(id) (id)
static struct vop_vector zfsctl_ops_root;
static struct vop_vector zfsctl_ops_snapdir;
static struct vop_vector zfsctl_ops_snapshot;
void
zfsctl_init(void)
{
}
void
zfsctl_fini(void)
{
}
boolean_t
zfsctl_is_node(vnode_t *vp)
{
return (vn_matchops(vp, zfsctl_ops_root) ||
vn_matchops(vp, zfsctl_ops_snapdir) ||
vn_matchops(vp, zfsctl_ops_snapshot));
}
typedef struct zfsctl_root {
sfs_node_t node;
sfs_node_t *snapdir;
timestruc_t cmtime;
} zfsctl_root_t;
/*
* Create the '.zfs' directory.
*/
void
zfsctl_create(zfsvfs_t *zfsvfs)
{
zfsctl_root_t *dot_zfs;
sfs_node_t *snapdir;
vnode_t *rvp;
uint64_t crtime[2];
ASSERT3P(zfsvfs->z_ctldir, ==, NULL);
snapdir = sfs_alloc_node(sizeof (*snapdir), "snapshot", ZFSCTL_INO_ROOT,
ZFSCTL_INO_SNAPDIR);
dot_zfs = (zfsctl_root_t *)sfs_alloc_node(sizeof (*dot_zfs), ".zfs", 0,
ZFSCTL_INO_ROOT);
dot_zfs->snapdir = snapdir;
VERIFY0(VFS_ROOT(zfsvfs->z_vfs, LK_EXCLUSIVE, &rvp));
VERIFY0(sa_lookup(VTOZ(rvp)->z_sa_hdl, SA_ZPL_CRTIME(zfsvfs),
&crtime, sizeof (crtime)));
ZFS_TIME_DECODE(&dot_zfs->cmtime, crtime);
vput(rvp);
zfsvfs->z_ctldir = dot_zfs;
}
/*
* Destroy the '.zfs' directory. Only called when the filesystem is unmounted.
* The nodes must not have any associated vnodes by now as they should be
* vflush-ed.
*/
void
zfsctl_destroy(zfsvfs_t *zfsvfs)
{
sfs_destroy_node(zfsvfs->z_ctldir->snapdir);
sfs_destroy_node((sfs_node_t *)zfsvfs->z_ctldir);
zfsvfs->z_ctldir = NULL;
}
static int
zfsctl_fs_root_vnode(struct mount *mp, void *arg __unused, int flags,
struct vnode **vpp)
{
return (VFS_ROOT(mp, flags, vpp));
}
static void
zfsctl_common_vnode_setup(vnode_t *vp, void *arg)
{
ASSERT_VOP_ELOCKED(vp, __func__);
/* We support shared locking. */
VN_LOCK_ASHARE(vp);
vp->v_type = VDIR;
vp->v_data = arg;
}
static int
zfsctl_root_vnode(struct mount *mp, void *arg __unused, int flags,
struct vnode **vpp)
{
void *node;
int err;
node = ((zfsvfs_t *)mp->mnt_data)->z_ctldir;
err = sfs_vgetx(mp, flags, 0, ZFSCTL_INO_ROOT, "zfs", &zfsctl_ops_root,
zfsctl_common_vnode_setup, node, vpp);
return (err);
}
static int
zfsctl_snapdir_vnode(struct mount *mp, void *arg __unused, int flags,
struct vnode **vpp)
{
void *node;
int err;
node = ((zfsvfs_t *)mp->mnt_data)->z_ctldir->snapdir;
err = sfs_vgetx(mp, flags, ZFSCTL_INO_ROOT, ZFSCTL_INO_SNAPDIR, "zfs",
&zfsctl_ops_snapdir, zfsctl_common_vnode_setup, node, vpp);
return (err);
}
/*
* Given a root znode, retrieve the associated .zfs directory.
* Add a hold to the vnode and return it.
*/
int
zfsctl_root(zfsvfs_t *zfsvfs, int flags, vnode_t **vpp)
{
int error;
error = zfsctl_root_vnode(zfsvfs->z_vfs, NULL, flags, vpp);
return (error);
}
/*
* Common open routine. Disallow any write access.
*/
static int
zfsctl_common_open(struct vop_open_args *ap)
{
int flags = ap->a_mode;
if (flags & FWRITE)
return (SET_ERROR(EACCES));
return (0);
}
/*
* Common close routine. Nothing to do here.
*/
/* ARGSUSED */
static int
zfsctl_common_close(struct vop_close_args *ap)
{
return (0);
}
/*
* Common access routine. Disallow writes.
*/
static int
zfsctl_common_access(struct vop_access_args *ap)
{
accmode_t accmode = ap->a_accmode;
if (accmode & VWRITE)
return (SET_ERROR(EACCES));
return (0);
}
/*
* Common getattr function. Fill in basic information.
*/
static void
zfsctl_common_getattr(vnode_t *vp, vattr_t *vap)
{
timestruc_t now;
sfs_node_t *node;
node = vp->v_data;
vap->va_uid = 0;
vap->va_gid = 0;
vap->va_rdev = 0;
/*
* We are a purely virtual object, so we have no
* blocksize or allocated blocks.
*/
vap->va_blksize = 0;
vap->va_nblocks = 0;
vap->va_seq = 0;
vn_fsid(vp, vap);
vap->va_mode = zfsctl_ctldir_mode;
vap->va_type = VDIR;
/*
* We live in the now (for atime).
*/
gethrestime(&now);
vap->va_atime = now;
/* FreeBSD: Reset chflags(2) flags. */
vap->va_flags = 0;
vap->va_nodeid = node->sn_id;
/* At least '.' and '..'. */
vap->va_nlink = 2;
}
#ifndef _OPENSOLARIS_SYS_VNODE_H_
struct vop_fid_args {
struct vnode *a_vp;
struct fid *a_fid;
};
#endif
static int
zfsctl_common_fid(struct vop_fid_args *ap)
{
vnode_t *vp = ap->a_vp;
fid_t *fidp = (void *)ap->a_fid;
sfs_node_t *node = vp->v_data;
uint64_t object = node->sn_id;
zfid_short_t *zfid;
int i;
zfid = (zfid_short_t *)fidp;
zfid->zf_len = SHORT_FID_LEN;
for (i = 0; i < sizeof (zfid->zf_object); i++)
zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
/* .zfs nodes always have a generation number of 0 */
for (i = 0; i < sizeof (zfid->zf_gen); i++)
zfid->zf_gen[i] = 0;
return (0);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_reclaim_args {
struct vnode *a_vp;
struct thread *a_td;
};
#endif
static int
zfsctl_common_reclaim(struct vop_reclaim_args *ap)
{
vnode_t *vp = ap->a_vp;
(void) sfs_reclaim_vnode(vp);
return (0);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_print_args {
struct vnode *a_vp;
};
#endif
static int
zfsctl_common_print(struct vop_print_args *ap)
{
sfs_print_node(ap->a_vp->v_data);
return (0);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_getattr_args {
struct vnode *a_vp;
struct vattr *a_vap;
struct ucred *a_cred;
};
#endif
/*
* Get root directory attributes.
*/
static int
zfsctl_root_getattr(struct vop_getattr_args *ap)
{
struct vnode *vp = ap->a_vp;
struct vattr *vap = ap->a_vap;
zfsctl_root_t *node = vp->v_data;
zfsctl_common_getattr(vp, vap);
vap->va_ctime = node->cmtime;
vap->va_mtime = vap->va_ctime;
vap->va_birthtime = vap->va_ctime;
vap->va_nlink += 1; /* snapdir */
vap->va_size = vap->va_nlink;
return (0);
}
/*
* When we lookup "." we still can be asked to lock it
* differently, can't we?
*/
static int
zfsctl_relock_dot(vnode_t *dvp, int ltype)
{
vref(dvp);
if (ltype != VOP_ISLOCKED(dvp)) {
if (ltype == LK_EXCLUSIVE)
vn_lock(dvp, LK_UPGRADE | LK_RETRY);
else /* if (ltype == LK_SHARED) */
vn_lock(dvp, LK_DOWNGRADE | LK_RETRY);
/* Relock for the "." case may left us with reclaimed vnode. */
if (VN_IS_DOOMED(dvp)) {
vrele(dvp);
return (SET_ERROR(ENOENT));
}
}
return (0);
}
/*
* Special case the handling of "..".
*/
static int
zfsctl_root_lookup(struct vop_lookup_args *ap)
{
struct componentname *cnp = ap->a_cnp;
vnode_t *dvp = ap->a_dvp;
vnode_t **vpp = ap->a_vpp;
int flags = ap->a_cnp->cn_flags;
int lkflags = ap->a_cnp->cn_lkflags;
int nameiop = ap->a_cnp->cn_nameiop;
int err;
ASSERT3S(dvp->v_type, ==, VDIR);
if ((flags & ISLASTCN) != 0 && nameiop != LOOKUP)
return (SET_ERROR(ENOTSUP));
if (cnp->cn_namelen == 1 && *cnp->cn_nameptr == '.') {
err = zfsctl_relock_dot(dvp, lkflags & LK_TYPE_MASK);
if (err == 0)
*vpp = dvp;
} else if ((flags & ISDOTDOT) != 0) {
err = vn_vget_ino_gen(dvp, zfsctl_fs_root_vnode, NULL,
lkflags, vpp);
} else if (strncmp(cnp->cn_nameptr, "snapshot", cnp->cn_namelen) == 0) {
err = zfsctl_snapdir_vnode(dvp->v_mount, NULL, lkflags, vpp);
} else {
err = SET_ERROR(ENOENT);
}
if (err != 0)
*vpp = NULL;
return (err);
}
static int
zfsctl_root_readdir(struct vop_readdir_args *ap)
{
struct dirent entry;
vnode_t *vp = ap->a_vp;
zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
zfsctl_root_t *node = vp->v_data;
zfs_uio_t uio;
int *eofp = ap->a_eofflag;
off_t dots_offset;
int error;
zfs_uio_init(&uio, ap->a_uio);
ASSERT3S(vp->v_type, ==, VDIR);
error = sfs_readdir_common(zfsvfs->z_root, ZFSCTL_INO_ROOT, ap, &uio,
&dots_offset);
if (error != 0) {
if (error == ENAMETOOLONG) /* ran out of destination space */
error = 0;
return (error);
}
if (zfs_uio_offset(&uio) != dots_offset)
return (SET_ERROR(EINVAL));
CTASSERT(sizeof (node->snapdir->sn_name) <= sizeof (entry.d_name));
entry.d_fileno = node->snapdir->sn_id;
entry.d_type = DT_DIR;
strcpy(entry.d_name, node->snapdir->sn_name);
entry.d_namlen = strlen(entry.d_name);
entry.d_reclen = sizeof (entry);
error = vfs_read_dirent(ap, &entry, zfs_uio_offset(&uio));
if (error != 0) {
if (error == ENAMETOOLONG)
error = 0;
return (SET_ERROR(error));
}
if (eofp != NULL)
*eofp = 1;
return (0);
}
static int
zfsctl_root_vptocnp(struct vop_vptocnp_args *ap)
{
static const char dotzfs_name[4] = ".zfs";
vnode_t *dvp;
int error;
if (*ap->a_buflen < sizeof (dotzfs_name))
return (SET_ERROR(ENOMEM));
error = vn_vget_ino_gen(ap->a_vp, zfsctl_fs_root_vnode, NULL,
LK_SHARED, &dvp);
if (error != 0)
return (SET_ERROR(error));
VOP_UNLOCK1(dvp);
*ap->a_vpp = dvp;
*ap->a_buflen -= sizeof (dotzfs_name);
bcopy(dotzfs_name, ap->a_buf + *ap->a_buflen, sizeof (dotzfs_name));
return (0);
}
static int
zfsctl_common_pathconf(struct vop_pathconf_args *ap)
{
/*
* We care about ACL variables so that user land utilities like ls
* can display them correctly. Since the ctldir's st_dev is set to be
* the same as the parent dataset, we must support all variables that
* it supports.
*/
switch (ap->a_name) {
case _PC_LINK_MAX:
*ap->a_retval = MIN(LONG_MAX, ZFS_LINK_MAX);
return (0);
case _PC_FILESIZEBITS:
*ap->a_retval = 64;
return (0);
case _PC_MIN_HOLE_SIZE:
*ap->a_retval = (int)SPA_MINBLOCKSIZE;
return (0);
case _PC_ACL_EXTENDED:
*ap->a_retval = 0;
return (0);
case _PC_ACL_NFS4:
*ap->a_retval = 1;
return (0);
case _PC_ACL_PATH_MAX:
*ap->a_retval = ACL_MAX_ENTRIES;
return (0);
case _PC_NAME_MAX:
*ap->a_retval = NAME_MAX;
return (0);
default:
return (vop_stdpathconf(ap));
}
}
/*
* Returns a trivial ACL
*/
static int
zfsctl_common_getacl(struct vop_getacl_args *ap)
{
int i;
if (ap->a_type != ACL_TYPE_NFS4)
return (EINVAL);
acl_nfs4_sync_acl_from_mode(ap->a_aclp, zfsctl_ctldir_mode, 0);
/*
* acl_nfs4_sync_acl_from_mode assumes that the owner can always modify
* attributes. That is not the case for the ctldir, so we must clear
* those bits. We also must clear ACL_READ_NAMED_ATTRS, because xattrs
* aren't supported by the ctldir.
*/
for (i = 0; i < ap->a_aclp->acl_cnt; i++) {
struct acl_entry *entry;
entry = &(ap->a_aclp->acl_entry[i]);
entry->ae_perm &= ~(ACL_WRITE_ACL | ACL_WRITE_OWNER |
ACL_WRITE_ATTRIBUTES | ACL_WRITE_NAMED_ATTRS |
ACL_READ_NAMED_ATTRS);
}
return (0);
}
static struct vop_vector zfsctl_ops_root = {
.vop_default = &default_vnodeops,
#if __FreeBSD_version >= 1300121
.vop_fplookup_vexec = VOP_EAGAIN,
#endif
.vop_open = zfsctl_common_open,
.vop_close = zfsctl_common_close,
.vop_ioctl = VOP_EINVAL,
.vop_getattr = zfsctl_root_getattr,
.vop_access = zfsctl_common_access,
.vop_readdir = zfsctl_root_readdir,
.vop_lookup = zfsctl_root_lookup,
.vop_inactive = VOP_NULL,
.vop_reclaim = zfsctl_common_reclaim,
.vop_fid = zfsctl_common_fid,
.vop_print = zfsctl_common_print,
.vop_vptocnp = zfsctl_root_vptocnp,
.vop_pathconf = zfsctl_common_pathconf,
.vop_getacl = zfsctl_common_getacl,
};
VFS_VOP_VECTOR_REGISTER(zfsctl_ops_root);
static int
zfsctl_snapshot_zname(vnode_t *vp, const char *name, int len, char *zname)
{
objset_t *os = ((zfsvfs_t *)((vp)->v_vfsp->vfs_data))->z_os;
dmu_objset_name(os, zname);
if (strlen(zname) + 1 + strlen(name) >= len)
return (SET_ERROR(ENAMETOOLONG));
(void) strcat(zname, "@");
(void) strcat(zname, name);
return (0);
}
static int
zfsctl_snapshot_lookup(vnode_t *vp, const char *name, uint64_t *id)
{
objset_t *os = ((zfsvfs_t *)((vp)->v_vfsp->vfs_data))->z_os;
int err;
err = dsl_dataset_snap_lookup(dmu_objset_ds(os), name, id);
return (err);
}
/*
* Given a vnode get a root vnode of a filesystem mounted on top of
* the vnode, if any. The root vnode is referenced and locked.
* If no filesystem is mounted then the orinal vnode remains referenced
* and locked. If any error happens the orinal vnode is unlocked and
* released.
*/
static int
zfsctl_mounted_here(vnode_t **vpp, int flags)
{
struct mount *mp;
int err;
ASSERT_VOP_LOCKED(*vpp, __func__);
ASSERT3S((*vpp)->v_type, ==, VDIR);
if ((mp = (*vpp)->v_mountedhere) != NULL) {
err = vfs_busy(mp, 0);
KASSERT(err == 0, ("vfs_busy(mp, 0) failed with %d", err));
KASSERT(vrefcnt(*vpp) > 1, ("unreferenced mountpoint"));
vput(*vpp);
err = VFS_ROOT(mp, flags, vpp);
vfs_unbusy(mp);
return (err);
}
return (EJUSTRETURN);
}
typedef struct {
const char *snap_name;
uint64_t snap_id;
} snapshot_setup_arg_t;
static void
zfsctl_snapshot_vnode_setup(vnode_t *vp, void *arg)
{
snapshot_setup_arg_t *ssa = arg;
sfs_node_t *node;
ASSERT_VOP_ELOCKED(vp, __func__);
node = sfs_alloc_node(sizeof (sfs_node_t),
ssa->snap_name, ZFSCTL_INO_SNAPDIR, ssa->snap_id);
zfsctl_common_vnode_setup(vp, node);
/* We have to support recursive locking. */
VN_LOCK_AREC(vp);
}
/*
* Lookup entry point for the 'snapshot' directory. Try to open the
* snapshot if it exist, creating the pseudo filesystem vnode as necessary.
* Perform a mount of the associated dataset on top of the vnode.
* There are four possibilities:
* - the snapshot node and vnode do not exist
* - the snapshot vnode is covered by the mounted snapshot
* - the snapshot vnode is not covered yet, the mount operation is in progress
* - the snapshot vnode is not covered, because the snapshot has been unmounted
* The last two states are transient and should be relatively short-lived.
*/
static int
zfsctl_snapdir_lookup(struct vop_lookup_args *ap)
{
vnode_t *dvp = ap->a_dvp;
vnode_t **vpp = ap->a_vpp;
struct componentname *cnp = ap->a_cnp;
char name[NAME_MAX + 1];
char fullname[ZFS_MAX_DATASET_NAME_LEN];
char *mountpoint;
size_t mountpoint_len;
zfsvfs_t *zfsvfs = dvp->v_vfsp->vfs_data;
uint64_t snap_id;
int nameiop = cnp->cn_nameiop;
int lkflags = cnp->cn_lkflags;
int flags = cnp->cn_flags;
int err;
ASSERT3S(dvp->v_type, ==, VDIR);
if ((flags & ISLASTCN) != 0 && nameiop != LOOKUP)
return (SET_ERROR(ENOTSUP));
if (cnp->cn_namelen == 1 && *cnp->cn_nameptr == '.') {
err = zfsctl_relock_dot(dvp, lkflags & LK_TYPE_MASK);
if (err == 0)
*vpp = dvp;
return (err);
}
if (flags & ISDOTDOT) {
err = vn_vget_ino_gen(dvp, zfsctl_root_vnode, NULL, lkflags,
vpp);
return (err);
}
if (cnp->cn_namelen >= sizeof (name))
return (SET_ERROR(ENAMETOOLONG));
strlcpy(name, ap->a_cnp->cn_nameptr, ap->a_cnp->cn_namelen + 1);
err = zfsctl_snapshot_lookup(dvp, name, &snap_id);
if (err != 0)
return (SET_ERROR(ENOENT));
for (;;) {
snapshot_setup_arg_t ssa;
ssa.snap_name = name;
ssa.snap_id = snap_id;
err = sfs_vgetx(dvp->v_mount, LK_SHARED, ZFSCTL_INO_SNAPDIR,
snap_id, "zfs", &zfsctl_ops_snapshot,
zfsctl_snapshot_vnode_setup, &ssa, vpp);
if (err != 0)
return (err);
/* Check if a new vnode has just been created. */
if (VOP_ISLOCKED(*vpp) == LK_EXCLUSIVE)
break;
/*
* Check if a snapshot is already mounted on top of the vnode.
*/
err = zfsctl_mounted_here(vpp, lkflags);
if (err != EJUSTRETURN)
return (err);
/*
* If the vnode is not covered, then either the mount operation
* is in progress or the snapshot has already been unmounted
* but the vnode hasn't been inactivated and reclaimed yet.
* We can try to re-use the vnode in the latter case.
*/
VI_LOCK(*vpp);
if (((*vpp)->v_iflag & VI_MOUNT) == 0) {
/*
* Upgrade to exclusive lock in order to:
* - avoid race conditions
* - satisfy the contract of mount_snapshot()
*/
err = VOP_LOCK(*vpp, LK_TRYUPGRADE | LK_INTERLOCK);
if (err == 0)
break;
} else {
VI_UNLOCK(*vpp);
}
/*
* In this state we can loop on uncontested locks and starve
* the thread doing the lengthy, non-trivial mount operation.
* So, yield to prevent that from happening.
*/
vput(*vpp);
kern_yield(PRI_USER);
}
VERIFY0(zfsctl_snapshot_zname(dvp, name, sizeof (fullname), fullname));
mountpoint_len = strlen(dvp->v_vfsp->mnt_stat.f_mntonname) +
strlen("/" ZFS_CTLDIR_NAME "/snapshot/") + strlen(name) + 1;
mountpoint = kmem_alloc(mountpoint_len, KM_SLEEP);
(void) snprintf(mountpoint, mountpoint_len,
"%s/" ZFS_CTLDIR_NAME "/snapshot/%s",
dvp->v_vfsp->mnt_stat.f_mntonname, name);
err = mount_snapshot(curthread, vpp, "zfs", mountpoint, fullname, 0);
kmem_free(mountpoint, mountpoint_len);
if (err == 0) {
/*
* Fix up the root vnode mounted on .zfs/snapshot/<snapname>.
*
* This is where we lie about our v_vfsp in order to
* make .zfs/snapshot/<snapname> accessible over NFS
* without requiring manual mounts of <snapname>.
*/
ASSERT3P(VTOZ(*vpp)->z_zfsvfs, !=, zfsvfs);
VTOZ(*vpp)->z_zfsvfs->z_parent = zfsvfs;
/* Clear the root flag (set via VFS_ROOT) as well. */
(*vpp)->v_vflag &= ~VV_ROOT;
}
if (err != 0)
*vpp = NULL;
return (err);
}
static int
zfsctl_snapdir_readdir(struct vop_readdir_args *ap)
{
char snapname[ZFS_MAX_DATASET_NAME_LEN];
struct dirent entry;
vnode_t *vp = ap->a_vp;
zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
zfs_uio_t uio;
int *eofp = ap->a_eofflag;
off_t dots_offset;
int error;
zfs_uio_init(&uio, ap->a_uio);
ASSERT3S(vp->v_type, ==, VDIR);
error = sfs_readdir_common(ZFSCTL_INO_ROOT, ZFSCTL_INO_SNAPDIR, ap,
&uio, &dots_offset);
if (error != 0) {
if (error == ENAMETOOLONG) /* ran out of destination space */
error = 0;
return (error);
}
ZFS_ENTER(zfsvfs);
for (;;) {
uint64_t cookie;
uint64_t id;
cookie = zfs_uio_offset(&uio) - dots_offset;
dsl_pool_config_enter(dmu_objset_pool(zfsvfs->z_os), FTAG);
error = dmu_snapshot_list_next(zfsvfs->z_os, sizeof (snapname),
snapname, &id, &cookie, NULL);
dsl_pool_config_exit(dmu_objset_pool(zfsvfs->z_os), FTAG);
if (error != 0) {
if (error == ENOENT) {
if (eofp != NULL)
*eofp = 1;
error = 0;
}
ZFS_EXIT(zfsvfs);
return (error);
}
entry.d_fileno = id;
entry.d_type = DT_DIR;
strcpy(entry.d_name, snapname);
entry.d_namlen = strlen(entry.d_name);
entry.d_reclen = sizeof (entry);
error = vfs_read_dirent(ap, &entry, zfs_uio_offset(&uio));
if (error != 0) {
if (error == ENAMETOOLONG)
error = 0;
ZFS_EXIT(zfsvfs);
return (SET_ERROR(error));
}
zfs_uio_setoffset(&uio, cookie + dots_offset);
}
- /* NOTREACHED */
+ __builtin_unreachable();
}
static int
zfsctl_snapdir_getattr(struct vop_getattr_args *ap)
{
vnode_t *vp = ap->a_vp;
vattr_t *vap = ap->a_vap;
zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
dsl_dataset_t *ds;
uint64_t snap_count;
int err;
ZFS_ENTER(zfsvfs);
ds = dmu_objset_ds(zfsvfs->z_os);
zfsctl_common_getattr(vp, vap);
vap->va_ctime = dmu_objset_snap_cmtime(zfsvfs->z_os);
vap->va_mtime = vap->va_ctime;
vap->va_birthtime = vap->va_ctime;
if (dsl_dataset_phys(ds)->ds_snapnames_zapobj != 0) {
err = zap_count(dmu_objset_pool(ds->ds_objset)->dp_meta_objset,
dsl_dataset_phys(ds)->ds_snapnames_zapobj, &snap_count);
if (err != 0) {
ZFS_EXIT(zfsvfs);
return (err);
}
vap->va_nlink += snap_count;
}
vap->va_size = vap->va_nlink;
ZFS_EXIT(zfsvfs);
return (0);
}
static struct vop_vector zfsctl_ops_snapdir = {
.vop_default = &default_vnodeops,
#if __FreeBSD_version >= 1300121
.vop_fplookup_vexec = VOP_EAGAIN,
#endif
.vop_open = zfsctl_common_open,
.vop_close = zfsctl_common_close,
.vop_getattr = zfsctl_snapdir_getattr,
.vop_access = zfsctl_common_access,
.vop_readdir = zfsctl_snapdir_readdir,
.vop_lookup = zfsctl_snapdir_lookup,
.vop_reclaim = zfsctl_common_reclaim,
.vop_fid = zfsctl_common_fid,
.vop_print = zfsctl_common_print,
.vop_pathconf = zfsctl_common_pathconf,
.vop_getacl = zfsctl_common_getacl,
};
VFS_VOP_VECTOR_REGISTER(zfsctl_ops_snapdir);
static int
zfsctl_snapshot_inactive(struct vop_inactive_args *ap)
{
vnode_t *vp = ap->a_vp;
VERIFY3S(vrecycle(vp), ==, 1);
return (0);
}
static int
zfsctl_snapshot_reclaim(struct vop_reclaim_args *ap)
{
vnode_t *vp = ap->a_vp;
void *data = vp->v_data;
sfs_reclaim_vnode(vp);
sfs_destroy_node(data);
return (0);
}
static int
zfsctl_snapshot_vptocnp(struct vop_vptocnp_args *ap)
{
struct mount *mp;
vnode_t *dvp;
vnode_t *vp;
sfs_node_t *node;
size_t len;
int locked;
int error;
vp = ap->a_vp;
node = vp->v_data;
len = strlen(node->sn_name);
if (*ap->a_buflen < len)
return (SET_ERROR(ENOMEM));
/*
* Prevent unmounting of the snapshot while the vnode lock
* is not held. That is not strictly required, but allows
* us to assert that an uncovered snapshot vnode is never
* "leaked".
*/
mp = vp->v_mountedhere;
if (mp == NULL)
return (SET_ERROR(ENOENT));
error = vfs_busy(mp, 0);
KASSERT(error == 0, ("vfs_busy(mp, 0) failed with %d", error));
/*
* We can vput the vnode as we can now depend on the reference owned
* by the busied mp. But we also need to hold the vnode, because
* the reference may go after vfs_unbusy() which has to be called
* before we can lock the vnode again.
*/
locked = VOP_ISLOCKED(vp);
#if __FreeBSD_version >= 1300045
enum vgetstate vs = vget_prep(vp);
#else
vhold(vp);
#endif
vput(vp);
/* Look up .zfs/snapshot, our parent. */
error = zfsctl_snapdir_vnode(vp->v_mount, NULL, LK_SHARED, &dvp);
if (error == 0) {
VOP_UNLOCK1(dvp);
*ap->a_vpp = dvp;
*ap->a_buflen -= len;
bcopy(node->sn_name, ap->a_buf + *ap->a_buflen, len);
}
vfs_unbusy(mp);
#if __FreeBSD_version >= 1300045
vget_finish(vp, locked | LK_RETRY, vs);
#else
vget(vp, locked | LK_VNHELD | LK_RETRY, curthread);
#endif
return (error);
}
/*
* These VP's should never see the light of day. They should always
* be covered.
*/
static struct vop_vector zfsctl_ops_snapshot = {
.vop_default = NULL, /* ensure very restricted access */
#if __FreeBSD_version >= 1300121
.vop_fplookup_vexec = VOP_EAGAIN,
#endif
.vop_inactive = zfsctl_snapshot_inactive,
#if __FreeBSD_version >= 1300045
.vop_need_inactive = vop_stdneed_inactive,
#endif
.vop_reclaim = zfsctl_snapshot_reclaim,
.vop_vptocnp = zfsctl_snapshot_vptocnp,
.vop_lock1 = vop_stdlock,
.vop_unlock = vop_stdunlock,
.vop_islocked = vop_stdislocked,
.vop_advlockpurge = vop_stdadvlockpurge, /* called by vgone */
.vop_print = zfsctl_common_print,
};
VFS_VOP_VECTOR_REGISTER(zfsctl_ops_snapshot);
int
zfsctl_lookup_objset(vfs_t *vfsp, uint64_t objsetid, zfsvfs_t **zfsvfsp)
{
zfsvfs_t *zfsvfs __unused = vfsp->vfs_data;
vnode_t *vp;
int error;
ASSERT3P(zfsvfs->z_ctldir, !=, NULL);
*zfsvfsp = NULL;
error = sfs_vnode_get(vfsp, LK_EXCLUSIVE,
ZFSCTL_INO_SNAPDIR, objsetid, &vp);
if (error == 0 && vp != NULL) {
/*
* XXX Probably need to at least reference, if not busy, the mp.
*/
if (vp->v_mountedhere != NULL)
*zfsvfsp = vp->v_mountedhere->mnt_data;
vput(vp);
}
if (*zfsvfsp == NULL)
return (SET_ERROR(EINVAL));
return (0);
}
/*
* Unmount any snapshots for the given filesystem. This is called from
* zfs_umount() - if we have a ctldir, then go through and unmount all the
* snapshots.
*/
int
zfsctl_umount_snapshots(vfs_t *vfsp, int fflags, cred_t *cr)
{
char snapname[ZFS_MAX_DATASET_NAME_LEN];
zfsvfs_t *zfsvfs = vfsp->vfs_data;
struct mount *mp;
vnode_t *vp;
uint64_t cookie;
int error;
ASSERT3P(zfsvfs->z_ctldir, !=, NULL);
cookie = 0;
for (;;) {
uint64_t id;
dsl_pool_config_enter(dmu_objset_pool(zfsvfs->z_os), FTAG);
error = dmu_snapshot_list_next(zfsvfs->z_os, sizeof (snapname),
snapname, &id, &cookie, NULL);
dsl_pool_config_exit(dmu_objset_pool(zfsvfs->z_os), FTAG);
if (error != 0) {
if (error == ENOENT)
error = 0;
break;
}
for (;;) {
error = sfs_vnode_get(vfsp, LK_EXCLUSIVE,
ZFSCTL_INO_SNAPDIR, id, &vp);
if (error != 0 || vp == NULL)
break;
mp = vp->v_mountedhere;
/*
* v_mountedhere being NULL means that the
* (uncovered) vnode is in a transient state
* (mounting or unmounting), so loop until it
* settles down.
*/
if (mp != NULL)
break;
vput(vp);
}
if (error != 0)
break;
if (vp == NULL)
continue; /* no mountpoint, nothing to do */
/*
* The mount-point vnode is kept locked to avoid spurious EBUSY
* from a concurrent umount.
* The vnode lock must have recursive locking enabled.
*/
vfs_ref(mp);
error = dounmount(mp, fflags, curthread);
KASSERT_IMPLY(error == 0, vrefcnt(vp) == 1,
("extra references after unmount"));
vput(vp);
if (error != 0)
break;
}
KASSERT_IMPLY((fflags & MS_FORCE) != 0, error == 0,
("force unmounting failed"));
return (error);
}
int
zfsctl_snapshot_unmount(const char *snapname, int flags __unused)
{
vfs_t *vfsp = NULL;
zfsvfs_t *zfsvfs = NULL;
if (strchr(snapname, '@') == NULL)
return (0);
int err = getzfsvfs(snapname, &zfsvfs);
if (err != 0) {
ASSERT3P(zfsvfs, ==, NULL);
return (0);
}
vfsp = zfsvfs->z_vfs;
ASSERT(!dsl_pool_config_held(dmu_objset_pool(zfsvfs->z_os)));
vfs_ref(vfsp);
vfs_unbusy(vfsp);
return (dounmount(vfsp, MS_FORCE, curthread));
}
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zio_crypt.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zio_crypt.c
index aeb42b304e73..832378a92a8d 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zio_crypt.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zio_crypt.c
@@ -1,1826 +1,1824 @@
/*
* CDDL HEADER START
*
* This file and its contents are supplied under the terms of the
* Common Development and Distribution License ("CDDL"), version 1.0.
* You may only use this file in accordance with the terms of version
* 1.0 of the CDDL.
*
* A full copy of the text of the CDDL should have accompanied this
* source. A copy of the CDDL is also available via the Internet at
* http://www.illumos.org/license/CDDL.
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2017, Datto, Inc. All rights reserved.
*/
#include <sys/zio_crypt.h>
#include <sys/dmu.h>
#include <sys/dmu_objset.h>
#include <sys/dnode.h>
#include <sys/fs/zfs.h>
#include <sys/zio.h>
#include <sys/zil.h>
#include <sys/sha2.h>
#include <sys/hkdf.h>
/*
* This file is responsible for handling all of the details of generating
* encryption parameters and performing encryption and authentication.
*
* BLOCK ENCRYPTION PARAMETERS:
* Encryption /Authentication Algorithm Suite (crypt):
* The encryption algorithm, mode, and key length we are going to use. We
* currently support AES in either GCM or CCM modes with 128, 192, and 256 bit
* keys. All authentication is currently done with SHA512-HMAC.
*
* Plaintext:
* The unencrypted data that we want to encrypt.
*
* Initialization Vector (IV):
* An initialization vector for the encryption algorithms. This is used to
* "tweak" the encryption algorithms so that two blocks of the same data are
* encrypted into different ciphertext outputs, thus obfuscating block patterns.
* The supported encryption modes (AES-GCM and AES-CCM) require that an IV is
* never reused with the same encryption key. This value is stored unencrypted
* and must simply be provided to the decryption function. We use a 96 bit IV
* (as recommended by NIST) for all block encryption. For non-dedup blocks we
* derive the IV randomly. The first 64 bits of the IV are stored in the second
* word of DVA[2] and the remaining 32 bits are stored in the upper 32 bits of
* blk_fill. This is safe because encrypted blocks can't use the upper 32 bits
* of blk_fill. We only encrypt level 0 blocks, which normally have a fill count
* of 1. The only exception is for DMU_OT_DNODE objects, where the fill count of
* level 0 blocks is the number of allocated dnodes in that block. The on-disk
* format supports at most 2^15 slots per L0 dnode block, because the maximum
* block size is 16MB (2^24). In either case, for level 0 blocks this number
* will still be smaller than UINT32_MAX so it is safe to store the IV in the
* top 32 bits of blk_fill, while leaving the bottom 32 bits of the fill count
* for the dnode code.
*
* Master key:
* This is the most important secret data of an encrypted dataset. It is used
* along with the salt to generate that actual encryption keys via HKDF. We
* do not use the master key to directly encrypt any data because there are
* theoretical limits on how much data can actually be safely encrypted with
* any encryption mode. The master key is stored encrypted on disk with the
* user's wrapping key. Its length is determined by the encryption algorithm.
* For details on how this is stored see the block comment in dsl_crypt.c
*
* Salt:
* Used as an input to the HKDF function, along with the master key. We use a
* 64 bit salt, stored unencrypted in the first word of DVA[2]. Any given salt
* can be used for encrypting many blocks, so we cache the current salt and the
* associated derived key in zio_crypt_t so we do not need to derive it again
* needlessly.
*
* Encryption Key:
* A secret binary key, generated from an HKDF function used to encrypt and
* decrypt data.
*
* Message Authentication Code (MAC)
* The MAC is an output of authenticated encryption modes such as AES-GCM and
* AES-CCM. Its purpose is to ensure that an attacker cannot modify encrypted
* data on disk and return garbage to the application. Effectively, it is a
* checksum that can not be reproduced by an attacker. We store the MAC in the
* second 128 bits of blk_cksum, leaving the first 128 bits for a truncated
* regular checksum of the ciphertext which can be used for scrubbing.
*
* OBJECT AUTHENTICATION:
* Some object types, such as DMU_OT_MASTER_NODE cannot be encrypted because
* they contain some info that always needs to be readable. To prevent this
* data from being altered, we authenticate this data using SHA512-HMAC. This
* will produce a MAC (similar to the one produced via encryption) which can
* be used to verify the object was not modified. HMACs do not require key
* rotation or IVs, so we can keep up to the full 3 copies of authenticated
* data.
*
* ZIL ENCRYPTION:
* ZIL blocks have their bp written to disk ahead of the associated data, so we
* cannot store the MAC there as we normally do. For these blocks the MAC is
* stored in the embedded checksum within the zil_chain_t header. The salt and
* IV are generated for the block on bp allocation instead of at encryption
* time. In addition, ZIL blocks have some pieces that must be left in plaintext
* for claiming even though all of the sensitive user data still needs to be
* encrypted. The function zio_crypt_init_uios_zil() handles parsing which
* pieces of the block need to be encrypted. All data that is not encrypted is
* authenticated using the AAD mechanisms that the supported encryption modes
* provide for. In order to preserve the semantics of the ZIL for encrypted
* datasets, the ZIL is not protected at the objset level as described below.
*
* DNODE ENCRYPTION:
* Similarly to ZIL blocks, the core part of each dnode_phys_t needs to be left
* in plaintext for scrubbing and claiming, but the bonus buffers might contain
* sensitive user data. The function zio_crypt_init_uios_dnode() handles parsing
* which pieces of the block need to be encrypted. For more details about
* dnode authentication and encryption, see zio_crypt_init_uios_dnode().
*
* OBJECT SET AUTHENTICATION:
* Up to this point, everything we have encrypted and authenticated has been
* at level 0 (or -2 for the ZIL). If we did not do any further work the
* on-disk format would be susceptible to attacks that deleted or rearranged
* the order of level 0 blocks. Ideally, the cleanest solution would be to
* maintain a tree of authentication MACs going up the bp tree. However, this
* presents a problem for raw sends. Send files do not send information about
* indirect blocks so there would be no convenient way to transfer the MACs and
* they cannot be recalculated on the receive side without the master key which
* would defeat one of the purposes of raw sends in the first place. Instead,
* for the indirect levels of the bp tree, we use a regular SHA512 of the MACs
* from the level below. We also include some portable fields from blk_prop such
* as the lsize and compression algorithm to prevent the data from being
* misinterpreted.
*
* At the objset level, we maintain 2 separate 256 bit MACs in the
* objset_phys_t. The first one is "portable" and is the logical root of the
* MAC tree maintained in the metadnode's bps. The second, is "local" and is
* used as the root MAC for the user accounting objects, which are also not
* transferred via "zfs send". The portable MAC is sent in the DRR_BEGIN payload
* of the send file. The useraccounting code ensures that the useraccounting
* info is not present upon a receive, so the local MAC can simply be cleared
* out at that time. For more info about objset_phys_t authentication, see
* zio_crypt_do_objset_hmacs().
*
* CONSIDERATIONS FOR DEDUP:
* In order for dedup to work, blocks that we want to dedup with one another
* need to use the same IV and encryption key, so that they will have the same
* ciphertext. Normally, one should never reuse an IV with the same encryption
* key or else AES-GCM and AES-CCM can both actually leak the plaintext of both
* blocks. In this case, however, since we are using the same plaintext as
* well all that we end up with is a duplicate of the original ciphertext we
* already had. As a result, an attacker with read access to the raw disk will
* be able to tell which blocks are the same but this information is given away
* by dedup anyway. In order to get the same IVs and encryption keys for
* equivalent blocks of data we use an HMAC of the plaintext. We use an HMAC
* here so that a reproducible checksum of the plaintext is never available to
* the attacker. The HMAC key is kept alongside the master key, encrypted on
* disk. The first 64 bits of the HMAC are used in place of the random salt, and
* the next 96 bits are used as the IV. As a result of this mechanism, dedup
* will only work within a clone family since encrypted dedup requires use of
* the same master and HMAC keys.
*/
/*
* After encrypting many blocks with the same key we may start to run up
* against the theoretical limits of how much data can securely be encrypted
* with a single key using the supported encryption modes. The most obvious
* limitation is that our risk of generating 2 equivalent 96 bit IVs increases
* the more IVs we generate (which both GCM and CCM modes strictly forbid).
* This risk actually grows surprisingly quickly over time according to the
* Birthday Problem. With a total IV space of 2^(96 bits), and assuming we have
* generated n IVs with a cryptographically secure RNG, the approximate
* probability p(n) of a collision is given as:
*
* p(n) ~= e^(-n*(n-1)/(2*(2^96)))
*
* [http://www.math.cornell.edu/~mec/2008-2009/TianyiZheng/Birthday.html]
*
* Assuming that we want to ensure that p(n) never goes over 1 / 1 trillion
* we must not write more than 398,065,730 blocks with the same encryption key.
* Therefore, we rotate our keys after 400,000,000 blocks have been written by
* generating a new random 64 bit salt for our HKDF encryption key generation
* function.
*/
#define ZFS_KEY_MAX_SALT_USES_DEFAULT 400000000
#define ZFS_CURRENT_MAX_SALT_USES \
(MIN(zfs_key_max_salt_uses, ZFS_KEY_MAX_SALT_USES_DEFAULT))
unsigned long zfs_key_max_salt_uses = ZFS_KEY_MAX_SALT_USES_DEFAULT;
/*
* Set to a nonzero value to cause zio_do_crypt_uio() to fail 1/this many
* calls, to test decryption error handling code paths.
*/
uint64_t zio_decrypt_fail_fraction = 0;
typedef struct blkptr_auth_buf {
uint64_t bab_prop; /* blk_prop - portable mask */
uint8_t bab_mac[ZIO_DATA_MAC_LEN]; /* MAC from blk_cksum */
uint64_t bab_pad; /* reserved for future use */
} blkptr_auth_buf_t;
zio_crypt_info_t zio_crypt_table[ZIO_CRYPT_FUNCTIONS] = {
{"", ZC_TYPE_NONE, 0, "inherit"},
{"", ZC_TYPE_NONE, 0, "on"},
{"", ZC_TYPE_NONE, 0, "off"},
{SUN_CKM_AES_CCM, ZC_TYPE_CCM, 16, "aes-128-ccm"},
{SUN_CKM_AES_CCM, ZC_TYPE_CCM, 24, "aes-192-ccm"},
{SUN_CKM_AES_CCM, ZC_TYPE_CCM, 32, "aes-256-ccm"},
{SUN_CKM_AES_GCM, ZC_TYPE_GCM, 16, "aes-128-gcm"},
{SUN_CKM_AES_GCM, ZC_TYPE_GCM, 24, "aes-192-gcm"},
{SUN_CKM_AES_GCM, ZC_TYPE_GCM, 32, "aes-256-gcm"}
};
static void
zio_crypt_key_destroy_early(zio_crypt_key_t *key)
{
rw_destroy(&key->zk_salt_lock);
/* free crypto templates */
bzero(&key->zk_session, sizeof (key->zk_session));
/* zero out sensitive data */
bzero(key, sizeof (zio_crypt_key_t));
}
void
zio_crypt_key_destroy(zio_crypt_key_t *key)
{
freebsd_crypt_freesession(&key->zk_session);
zio_crypt_key_destroy_early(key);
}
int
zio_crypt_key_init(uint64_t crypt, zio_crypt_key_t *key)
{
int ret;
crypto_mechanism_t mech __unused;
uint_t keydata_len;
zio_crypt_info_t *ci = NULL;
ASSERT3P(key, !=, NULL);
ASSERT3U(crypt, <, ZIO_CRYPT_FUNCTIONS);
ci = &zio_crypt_table[crypt];
if (ci->ci_crypt_type != ZC_TYPE_GCM &&
ci->ci_crypt_type != ZC_TYPE_CCM)
return (ENOTSUP);
keydata_len = zio_crypt_table[crypt].ci_keylen;
bzero(key, sizeof (zio_crypt_key_t));
rw_init(&key->zk_salt_lock, NULL, RW_DEFAULT, NULL);
/* fill keydata buffers and salt with random data */
ret = random_get_bytes((uint8_t *)&key->zk_guid, sizeof (uint64_t));
if (ret != 0)
goto error;
ret = random_get_bytes(key->zk_master_keydata, keydata_len);
if (ret != 0)
goto error;
ret = random_get_bytes(key->zk_hmac_keydata, SHA512_HMAC_KEYLEN);
if (ret != 0)
goto error;
ret = random_get_bytes(key->zk_salt, ZIO_DATA_SALT_LEN);
if (ret != 0)
goto error;
/* derive the current key from the master key */
ret = hkdf_sha512(key->zk_master_keydata, keydata_len, NULL, 0,
key->zk_salt, ZIO_DATA_SALT_LEN, key->zk_current_keydata,
keydata_len);
if (ret != 0)
goto error;
/* initialize keys for the ICP */
key->zk_current_key.ck_format = CRYPTO_KEY_RAW;
key->zk_current_key.ck_data = key->zk_current_keydata;
key->zk_current_key.ck_length = CRYPTO_BYTES2BITS(keydata_len);
key->zk_hmac_key.ck_format = CRYPTO_KEY_RAW;
key->zk_hmac_key.ck_data = &key->zk_hmac_key;
key->zk_hmac_key.ck_length = CRYPTO_BYTES2BITS(SHA512_HMAC_KEYLEN);
ci = &zio_crypt_table[crypt];
if (ci->ci_crypt_type != ZC_TYPE_GCM &&
ci->ci_crypt_type != ZC_TYPE_CCM)
return (ENOTSUP);
ret = freebsd_crypt_newsession(&key->zk_session, ci,
&key->zk_current_key);
if (ret)
goto error;
key->zk_crypt = crypt;
key->zk_version = ZIO_CRYPT_KEY_CURRENT_VERSION;
key->zk_salt_count = 0;
return (0);
error:
zio_crypt_key_destroy_early(key);
return (ret);
}
static int
zio_crypt_key_change_salt(zio_crypt_key_t *key)
{
int ret = 0;
uint8_t salt[ZIO_DATA_SALT_LEN];
crypto_mechanism_t mech __unused;
uint_t keydata_len = zio_crypt_table[key->zk_crypt].ci_keylen;
/* generate a new salt */
ret = random_get_bytes(salt, ZIO_DATA_SALT_LEN);
if (ret != 0)
goto error;
rw_enter(&key->zk_salt_lock, RW_WRITER);
/* someone beat us to the salt rotation, just unlock and return */
if (key->zk_salt_count < ZFS_CURRENT_MAX_SALT_USES)
goto out_unlock;
/* derive the current key from the master key and the new salt */
ret = hkdf_sha512(key->zk_master_keydata, keydata_len, NULL, 0,
salt, ZIO_DATA_SALT_LEN, key->zk_current_keydata, keydata_len);
if (ret != 0)
goto out_unlock;
/* assign the salt and reset the usage count */
bcopy(salt, key->zk_salt, ZIO_DATA_SALT_LEN);
key->zk_salt_count = 0;
freebsd_crypt_freesession(&key->zk_session);
ret = freebsd_crypt_newsession(&key->zk_session,
&zio_crypt_table[key->zk_crypt], &key->zk_current_key);
if (ret != 0)
goto out_unlock;
rw_exit(&key->zk_salt_lock);
return (0);
out_unlock:
rw_exit(&key->zk_salt_lock);
error:
return (ret);
}
/* See comment above zfs_key_max_salt_uses definition for details */
int
zio_crypt_key_get_salt(zio_crypt_key_t *key, uint8_t *salt)
{
int ret;
boolean_t salt_change;
rw_enter(&key->zk_salt_lock, RW_READER);
bcopy(key->zk_salt, salt, ZIO_DATA_SALT_LEN);
salt_change = (atomic_inc_64_nv(&key->zk_salt_count) >=
ZFS_CURRENT_MAX_SALT_USES);
rw_exit(&key->zk_salt_lock);
if (salt_change) {
ret = zio_crypt_key_change_salt(key);
if (ret != 0)
goto error;
}
return (0);
error:
return (ret);
}
void *failed_decrypt_buf;
int failed_decrypt_size;
/*
* This function handles all encryption and decryption in zfs. When
* encrypting it expects puio to reference the plaintext and cuio to
* reference the ciphertext. cuio must have enough space for the
* ciphertext + room for a MAC. datalen should be the length of the
* plaintext / ciphertext alone.
*/
/*
* The implementation for FreeBSD's OpenCrypto.
*
* The big difference between ICP and FOC is that FOC uses a single
* buffer for input and output. This means that (for AES-GCM, the
* only one supported right now) the source must be copied into the
* destination, and the destination must have the AAD, and the tag/MAC,
* already associated with it. (Both implementations can use a uio.)
*
* Since the auth data is part of the iovec array, all we need to know
* is the length: 0 means there's no AAD.
*
*/
static int
zio_do_crypt_uio_opencrypto(boolean_t encrypt, freebsd_crypt_session_t *sess,
uint64_t crypt, crypto_key_t *key, uint8_t *ivbuf, uint_t datalen,
zfs_uio_t *uio, uint_t auth_len)
{
zio_crypt_info_t *ci;
int ret;
ci = &zio_crypt_table[crypt];
if (ci->ci_crypt_type != ZC_TYPE_GCM &&
ci->ci_crypt_type != ZC_TYPE_CCM)
return (ENOTSUP);
ret = freebsd_crypt_uio(encrypt, sess, ci, uio, key, ivbuf,
datalen, auth_len);
if (ret != 0) {
#ifdef FCRYPTO_DEBUG
printf("%s(%d): Returning error %s\n",
__FUNCTION__, __LINE__, encrypt ? "EIO" : "ECKSUM");
#endif
ret = SET_ERROR(encrypt ? EIO : ECKSUM);
}
return (ret);
}
int
zio_crypt_key_wrap(crypto_key_t *cwkey, zio_crypt_key_t *key, uint8_t *iv,
uint8_t *mac, uint8_t *keydata_out, uint8_t *hmac_keydata_out)
{
int ret;
uint64_t aad[3];
/*
* With OpenCrypto in FreeBSD, the same buffer is used for
* input and output. Also, the AAD (for AES-GMC at least)
* needs to logically go in front.
*/
zfs_uio_t cuio;
struct uio cuio_s;
iovec_t iovecs[4];
uint64_t crypt = key->zk_crypt;
uint_t enc_len, keydata_len, aad_len;
ASSERT3U(crypt, <, ZIO_CRYPT_FUNCTIONS);
ASSERT3U(cwkey->ck_format, ==, CRYPTO_KEY_RAW);
zfs_uio_init(&cuio, &cuio_s);
keydata_len = zio_crypt_table[crypt].ci_keylen;
/* generate iv for wrapping the master and hmac key */
ret = random_get_pseudo_bytes(iv, WRAPPING_IV_LEN);
if (ret != 0)
goto error;
/*
* Since we only support one buffer, we need to copy
* the plain text (source) to the cipher buffer (dest).
* We set iovecs[0] -- the authentication data -- below.
*/
bcopy((void*)key->zk_master_keydata, keydata_out, keydata_len);
bcopy((void*)key->zk_hmac_keydata, hmac_keydata_out,
SHA512_HMAC_KEYLEN);
iovecs[1].iov_base = keydata_out;
iovecs[1].iov_len = keydata_len;
iovecs[2].iov_base = hmac_keydata_out;
iovecs[2].iov_len = SHA512_HMAC_KEYLEN;
iovecs[3].iov_base = mac;
iovecs[3].iov_len = WRAPPING_MAC_LEN;
/*
* Although we don't support writing to the old format, we do
* support rewrapping the key so that the user can move and
* quarantine datasets on the old format.
*/
if (key->zk_version == 0) {
aad_len = sizeof (uint64_t);
aad[0] = LE_64(key->zk_guid);
} else {
ASSERT3U(key->zk_version, ==, ZIO_CRYPT_KEY_CURRENT_VERSION);
aad_len = sizeof (uint64_t) * 3;
aad[0] = LE_64(key->zk_guid);
aad[1] = LE_64(crypt);
aad[2] = LE_64(key->zk_version);
}
iovecs[0].iov_base = aad;
iovecs[0].iov_len = aad_len;
enc_len = zio_crypt_table[crypt].ci_keylen + SHA512_HMAC_KEYLEN;
GET_UIO_STRUCT(&cuio)->uio_iov = iovecs;
zfs_uio_iovcnt(&cuio) = 4;
zfs_uio_segflg(&cuio) = UIO_SYSSPACE;
/* encrypt the keys and store the resulting ciphertext and mac */
ret = zio_do_crypt_uio_opencrypto(B_TRUE, NULL, crypt, cwkey,
iv, enc_len, &cuio, aad_len);
if (ret != 0)
goto error;
return (0);
error:
return (ret);
}
int
zio_crypt_key_unwrap(crypto_key_t *cwkey, uint64_t crypt, uint64_t version,
uint64_t guid, uint8_t *keydata, uint8_t *hmac_keydata, uint8_t *iv,
uint8_t *mac, zio_crypt_key_t *key)
{
int ret;
uint64_t aad[3];
/*
* With OpenCrypto in FreeBSD, the same buffer is used for
* input and output. Also, the AAD (for AES-GMC at least)
* needs to logically go in front.
*/
zfs_uio_t cuio;
struct uio cuio_s;
iovec_t iovecs[4];
void *src, *dst;
uint_t enc_len, keydata_len, aad_len;
ASSERT3U(crypt, <, ZIO_CRYPT_FUNCTIONS);
ASSERT3U(cwkey->ck_format, ==, CRYPTO_KEY_RAW);
keydata_len = zio_crypt_table[crypt].ci_keylen;
rw_init(&key->zk_salt_lock, NULL, RW_DEFAULT, NULL);
zfs_uio_init(&cuio, &cuio_s);
/*
* Since we only support one buffer, we need to copy
* the encrypted buffer (source) to the plain buffer
* (dest). We set iovecs[0] -- the authentication data --
* below.
*/
dst = key->zk_master_keydata;
src = keydata;
bcopy(src, dst, keydata_len);
dst = key->zk_hmac_keydata;
src = hmac_keydata;
bcopy(src, dst, SHA512_HMAC_KEYLEN);
iovecs[1].iov_base = key->zk_master_keydata;
iovecs[1].iov_len = keydata_len;
iovecs[2].iov_base = key->zk_hmac_keydata;
iovecs[2].iov_len = SHA512_HMAC_KEYLEN;
iovecs[3].iov_base = mac;
iovecs[3].iov_len = WRAPPING_MAC_LEN;
if (version == 0) {
aad_len = sizeof (uint64_t);
aad[0] = LE_64(guid);
} else {
ASSERT3U(version, ==, ZIO_CRYPT_KEY_CURRENT_VERSION);
aad_len = sizeof (uint64_t) * 3;
aad[0] = LE_64(guid);
aad[1] = LE_64(crypt);
aad[2] = LE_64(version);
}
enc_len = keydata_len + SHA512_HMAC_KEYLEN;
iovecs[0].iov_base = aad;
iovecs[0].iov_len = aad_len;
GET_UIO_STRUCT(&cuio)->uio_iov = iovecs;
zfs_uio_iovcnt(&cuio) = 4;
zfs_uio_segflg(&cuio) = UIO_SYSSPACE;
/* decrypt the keys and store the result in the output buffers */
ret = zio_do_crypt_uio_opencrypto(B_FALSE, NULL, crypt, cwkey,
iv, enc_len, &cuio, aad_len);
if (ret != 0)
goto error;
/* generate a fresh salt */
ret = random_get_bytes(key->zk_salt, ZIO_DATA_SALT_LEN);
if (ret != 0)
goto error;
/* derive the current key from the master key */
ret = hkdf_sha512(key->zk_master_keydata, keydata_len, NULL, 0,
key->zk_salt, ZIO_DATA_SALT_LEN, key->zk_current_keydata,
keydata_len);
if (ret != 0)
goto error;
/* initialize keys for ICP */
key->zk_current_key.ck_format = CRYPTO_KEY_RAW;
key->zk_current_key.ck_data = key->zk_current_keydata;
key->zk_current_key.ck_length = CRYPTO_BYTES2BITS(keydata_len);
key->zk_hmac_key.ck_format = CRYPTO_KEY_RAW;
key->zk_hmac_key.ck_data = key->zk_hmac_keydata;
key->zk_hmac_key.ck_length = CRYPTO_BYTES2BITS(SHA512_HMAC_KEYLEN);
ret = freebsd_crypt_newsession(&key->zk_session,
&zio_crypt_table[crypt], &key->zk_current_key);
if (ret != 0)
goto error;
key->zk_crypt = crypt;
key->zk_version = version;
key->zk_guid = guid;
key->zk_salt_count = 0;
return (0);
error:
zio_crypt_key_destroy_early(key);
return (ret);
}
int
zio_crypt_generate_iv(uint8_t *ivbuf)
{
int ret;
/* randomly generate the IV */
ret = random_get_pseudo_bytes(ivbuf, ZIO_DATA_IV_LEN);
if (ret != 0)
goto error;
return (0);
error:
bzero(ivbuf, ZIO_DATA_IV_LEN);
return (ret);
}
int
zio_crypt_do_hmac(zio_crypt_key_t *key, uint8_t *data, uint_t datalen,
uint8_t *digestbuf, uint_t digestlen)
{
uint8_t raw_digestbuf[SHA512_DIGEST_LENGTH];
ASSERT3U(digestlen, <=, SHA512_DIGEST_LENGTH);
crypto_mac(&key->zk_hmac_key, data, datalen,
raw_digestbuf, SHA512_DIGEST_LENGTH);
bcopy(raw_digestbuf, digestbuf, digestlen);
return (0);
}
int
zio_crypt_generate_iv_salt_dedup(zio_crypt_key_t *key, uint8_t *data,
uint_t datalen, uint8_t *ivbuf, uint8_t *salt)
{
int ret;
uint8_t digestbuf[SHA512_DIGEST_LENGTH];
ret = zio_crypt_do_hmac(key, data, datalen,
digestbuf, SHA512_DIGEST_LENGTH);
if (ret != 0)
return (ret);
bcopy(digestbuf, salt, ZIO_DATA_SALT_LEN);
bcopy(digestbuf + ZIO_DATA_SALT_LEN, ivbuf, ZIO_DATA_IV_LEN);
return (0);
}
/*
* The following functions are used to encode and decode encryption parameters
* into blkptr_t and zil_header_t. The ICP wants to use these parameters as
* byte strings, which normally means that these strings would not need to deal
* with byteswapping at all. However, both blkptr_t and zil_header_t may be
* byteswapped by lower layers and so we must "undo" that byteswap here upon
* decoding and encoding in a non-native byteorder. These functions require
* that the byteorder bit is correct before being called.
*/
void
zio_crypt_encode_params_bp(blkptr_t *bp, uint8_t *salt, uint8_t *iv)
{
uint64_t val64;
uint32_t val32;
ASSERT(BP_IS_ENCRYPTED(bp));
if (!BP_SHOULD_BYTESWAP(bp)) {
bcopy(salt, &bp->blk_dva[2].dva_word[0], sizeof (uint64_t));
bcopy(iv, &bp->blk_dva[2].dva_word[1], sizeof (uint64_t));
bcopy(iv + sizeof (uint64_t), &val32, sizeof (uint32_t));
BP_SET_IV2(bp, val32);
} else {
bcopy(salt, &val64, sizeof (uint64_t));
bp->blk_dva[2].dva_word[0] = BSWAP_64(val64);
bcopy(iv, &val64, sizeof (uint64_t));
bp->blk_dva[2].dva_word[1] = BSWAP_64(val64);
bcopy(iv + sizeof (uint64_t), &val32, sizeof (uint32_t));
BP_SET_IV2(bp, BSWAP_32(val32));
}
}
void
zio_crypt_decode_params_bp(const blkptr_t *bp, uint8_t *salt, uint8_t *iv)
{
uint64_t val64;
uint32_t val32;
ASSERT(BP_IS_PROTECTED(bp));
/* for convenience, so callers don't need to check */
if (BP_IS_AUTHENTICATED(bp)) {
bzero(salt, ZIO_DATA_SALT_LEN);
bzero(iv, ZIO_DATA_IV_LEN);
return;
}
if (!BP_SHOULD_BYTESWAP(bp)) {
bcopy(&bp->blk_dva[2].dva_word[0], salt, sizeof (uint64_t));
bcopy(&bp->blk_dva[2].dva_word[1], iv, sizeof (uint64_t));
val32 = (uint32_t)BP_GET_IV2(bp);
bcopy(&val32, iv + sizeof (uint64_t), sizeof (uint32_t));
} else {
val64 = BSWAP_64(bp->blk_dva[2].dva_word[0]);
bcopy(&val64, salt, sizeof (uint64_t));
val64 = BSWAP_64(bp->blk_dva[2].dva_word[1]);
bcopy(&val64, iv, sizeof (uint64_t));
val32 = BSWAP_32((uint32_t)BP_GET_IV2(bp));
bcopy(&val32, iv + sizeof (uint64_t), sizeof (uint32_t));
}
}
void
zio_crypt_encode_mac_bp(blkptr_t *bp, uint8_t *mac)
{
uint64_t val64;
ASSERT(BP_USES_CRYPT(bp));
ASSERT3U(BP_GET_TYPE(bp), !=, DMU_OT_OBJSET);
if (!BP_SHOULD_BYTESWAP(bp)) {
bcopy(mac, &bp->blk_cksum.zc_word[2], sizeof (uint64_t));
bcopy(mac + sizeof (uint64_t), &bp->blk_cksum.zc_word[3],
sizeof (uint64_t));
} else {
bcopy(mac, &val64, sizeof (uint64_t));
bp->blk_cksum.zc_word[2] = BSWAP_64(val64);
bcopy(mac + sizeof (uint64_t), &val64, sizeof (uint64_t));
bp->blk_cksum.zc_word[3] = BSWAP_64(val64);
}
}
void
zio_crypt_decode_mac_bp(const blkptr_t *bp, uint8_t *mac)
{
uint64_t val64;
ASSERT(BP_USES_CRYPT(bp) || BP_IS_HOLE(bp));
/* for convenience, so callers don't need to check */
if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
bzero(mac, ZIO_DATA_MAC_LEN);
return;
}
if (!BP_SHOULD_BYTESWAP(bp)) {
bcopy(&bp->blk_cksum.zc_word[2], mac, sizeof (uint64_t));
bcopy(&bp->blk_cksum.zc_word[3], mac + sizeof (uint64_t),
sizeof (uint64_t));
} else {
val64 = BSWAP_64(bp->blk_cksum.zc_word[2]);
bcopy(&val64, mac, sizeof (uint64_t));
val64 = BSWAP_64(bp->blk_cksum.zc_word[3]);
bcopy(&val64, mac + sizeof (uint64_t), sizeof (uint64_t));
}
}
void
zio_crypt_encode_mac_zil(void *data, uint8_t *mac)
{
zil_chain_t *zilc = data;
bcopy(mac, &zilc->zc_eck.zec_cksum.zc_word[2], sizeof (uint64_t));
bcopy(mac + sizeof (uint64_t), &zilc->zc_eck.zec_cksum.zc_word[3],
sizeof (uint64_t));
}
void
zio_crypt_decode_mac_zil(const void *data, uint8_t *mac)
{
/*
* The ZIL MAC is embedded in the block it protects, which will
* not have been byteswapped by the time this function has been called.
* As a result, we don't need to worry about byteswapping the MAC.
*/
const zil_chain_t *zilc = data;
bcopy(&zilc->zc_eck.zec_cksum.zc_word[2], mac, sizeof (uint64_t));
bcopy(&zilc->zc_eck.zec_cksum.zc_word[3], mac + sizeof (uint64_t),
sizeof (uint64_t));
}
/*
* This routine takes a block of dnodes (src_abd) and copies only the bonus
* buffers to the same offsets in the dst buffer. datalen should be the size
* of both the src_abd and the dst buffer (not just the length of the bonus
* buffers).
*/
void
zio_crypt_copy_dnode_bonus(abd_t *src_abd, uint8_t *dst, uint_t datalen)
{
uint_t i, max_dnp = datalen >> DNODE_SHIFT;
uint8_t *src;
dnode_phys_t *dnp, *sdnp, *ddnp;
src = abd_borrow_buf_copy(src_abd, datalen);
sdnp = (dnode_phys_t *)src;
ddnp = (dnode_phys_t *)dst;
for (i = 0; i < max_dnp; i += sdnp[i].dn_extra_slots + 1) {
dnp = &sdnp[i];
if (dnp->dn_type != DMU_OT_NONE &&
DMU_OT_IS_ENCRYPTED(dnp->dn_bonustype) &&
dnp->dn_bonuslen != 0) {
bcopy(DN_BONUS(dnp), DN_BONUS(&ddnp[i]),
DN_MAX_BONUS_LEN(dnp));
}
}
abd_return_buf(src_abd, src, datalen);
}
/*
* This function decides what fields from blk_prop are included in
* the on-disk various MAC algorithms.
*/
static void
zio_crypt_bp_zero_nonportable_blkprop(blkptr_t *bp, uint64_t version)
{
int avoidlint = SPA_MINBLOCKSIZE;
/*
* Version 0 did not properly zero out all non-portable fields
* as it should have done. We maintain this code so that we can
* do read-only imports of pools on this version.
*/
if (version == 0) {
BP_SET_DEDUP(bp, 0);
BP_SET_CHECKSUM(bp, 0);
BP_SET_PSIZE(bp, avoidlint);
return;
}
ASSERT3U(version, ==, ZIO_CRYPT_KEY_CURRENT_VERSION);
/*
* The hole_birth feature might set these fields even if this bp
* is a hole. We zero them out here to guarantee that raw sends
* will function with or without the feature.
*/
if (BP_IS_HOLE(bp)) {
bp->blk_prop = 0ULL;
return;
}
/*
* At L0 we want to verify these fields to ensure that data blocks
* can not be reinterpreted. For instance, we do not want an attacker
* to trick us into returning raw lz4 compressed data to the user
* by modifying the compression bits. At higher levels, we cannot
* enforce this policy since raw sends do not convey any information
* about indirect blocks, so these values might be different on the
* receive side. Fortunately, this does not open any new attack
* vectors, since any alterations that can be made to a higher level
* bp must still verify the correct order of the layer below it.
*/
if (BP_GET_LEVEL(bp) != 0) {
BP_SET_BYTEORDER(bp, 0);
BP_SET_COMPRESS(bp, 0);
/*
* psize cannot be set to zero or it will trigger
* asserts, but the value doesn't really matter as
* long as it is constant.
*/
BP_SET_PSIZE(bp, avoidlint);
}
BP_SET_DEDUP(bp, 0);
BP_SET_CHECKSUM(bp, 0);
}
static void
zio_crypt_bp_auth_init(uint64_t version, boolean_t should_bswap, blkptr_t *bp,
blkptr_auth_buf_t *bab, uint_t *bab_len)
{
blkptr_t tmpbp = *bp;
if (should_bswap)
byteswap_uint64_array(&tmpbp, sizeof (blkptr_t));
ASSERT(BP_USES_CRYPT(&tmpbp) || BP_IS_HOLE(&tmpbp));
ASSERT0(BP_IS_EMBEDDED(&tmpbp));
zio_crypt_decode_mac_bp(&tmpbp, bab->bab_mac);
/*
* We always MAC blk_prop in LE to ensure portability. This
* must be done after decoding the mac, since the endianness
* will get zero'd out here.
*/
zio_crypt_bp_zero_nonportable_blkprop(&tmpbp, version);
bab->bab_prop = LE_64(tmpbp.blk_prop);
bab->bab_pad = 0ULL;
/* version 0 did not include the padding */
*bab_len = sizeof (blkptr_auth_buf_t);
if (version == 0)
*bab_len -= sizeof (uint64_t);
}
static int
zio_crypt_bp_do_hmac_updates(crypto_context_t ctx, uint64_t version,
boolean_t should_bswap, blkptr_t *bp)
{
uint_t bab_len;
blkptr_auth_buf_t bab;
zio_crypt_bp_auth_init(version, should_bswap, bp, &bab, &bab_len);
crypto_mac_update(ctx, &bab, bab_len);
return (0);
}
static void
zio_crypt_bp_do_indrect_checksum_updates(SHA2_CTX *ctx, uint64_t version,
boolean_t should_bswap, blkptr_t *bp)
{
uint_t bab_len;
blkptr_auth_buf_t bab;
zio_crypt_bp_auth_init(version, should_bswap, bp, &bab, &bab_len);
SHA2Update(ctx, &bab, bab_len);
}
static void
zio_crypt_bp_do_aad_updates(uint8_t **aadp, uint_t *aad_len, uint64_t version,
boolean_t should_bswap, blkptr_t *bp)
{
uint_t bab_len;
blkptr_auth_buf_t bab;
zio_crypt_bp_auth_init(version, should_bswap, bp, &bab, &bab_len);
bcopy(&bab, *aadp, bab_len);
*aadp += bab_len;
*aad_len += bab_len;
}
static int
zio_crypt_do_dnode_hmac_updates(crypto_context_t ctx, uint64_t version,
boolean_t should_bswap, dnode_phys_t *dnp)
{
int ret, i;
dnode_phys_t *adnp;
boolean_t le_bswap = (should_bswap == ZFS_HOST_BYTEORDER);
uint8_t tmp_dncore[offsetof(dnode_phys_t, dn_blkptr)];
/* authenticate the core dnode (masking out non-portable bits) */
bcopy(dnp, tmp_dncore, sizeof (tmp_dncore));
adnp = (dnode_phys_t *)tmp_dncore;
if (le_bswap) {
adnp->dn_datablkszsec = BSWAP_16(adnp->dn_datablkszsec);
adnp->dn_bonuslen = BSWAP_16(adnp->dn_bonuslen);
adnp->dn_maxblkid = BSWAP_64(adnp->dn_maxblkid);
adnp->dn_used = BSWAP_64(adnp->dn_used);
}
adnp->dn_flags &= DNODE_CRYPT_PORTABLE_FLAGS_MASK;
adnp->dn_used = 0;
crypto_mac_update(ctx, adnp, sizeof (tmp_dncore));
for (i = 0; i < dnp->dn_nblkptr; i++) {
ret = zio_crypt_bp_do_hmac_updates(ctx, version,
should_bswap, &dnp->dn_blkptr[i]);
if (ret != 0)
goto error;
}
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
ret = zio_crypt_bp_do_hmac_updates(ctx, version,
should_bswap, DN_SPILL_BLKPTR(dnp));
if (ret != 0)
goto error;
}
return (0);
error:
return (ret);
}
/*
* objset_phys_t blocks introduce a number of exceptions to the normal
* authentication process. objset_phys_t's contain 2 separate HMACS for
* protecting the integrity of their data. The portable_mac protects the
* metadnode. This MAC can be sent with a raw send and protects against
* reordering of data within the metadnode. The local_mac protects the user
* accounting objects which are not sent from one system to another.
*
* In addition, objset blocks are the only blocks that can be modified and
* written to disk without the key loaded under certain circumstances. During
* zil_claim() we need to be able to update the zil_header_t to complete
* claiming log blocks and during raw receives we need to write out the
* portable_mac from the send file. Both of these actions are possible
* because these fields are not protected by either MAC so neither one will
* need to modify the MACs without the key. However, when the modified blocks
* are written out they will be byteswapped into the host machine's native
* endianness which will modify fields protected by the MAC. As a result, MAC
* calculation for objset blocks works slightly differently from other block
* types. Where other block types MAC the data in whatever endianness is
* written to disk, objset blocks always MAC little endian version of their
* values. In the code, should_bswap is the value from BP_SHOULD_BYTESWAP()
* and le_bswap indicates whether a byteswap is needed to get this block
* into little endian format.
*/
/* ARGSUSED */
int
zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen,
boolean_t should_bswap, uint8_t *portable_mac, uint8_t *local_mac)
{
int ret;
struct hmac_ctx hash_ctx;
struct hmac_ctx *ctx = &hash_ctx;
objset_phys_t *osp = data;
uint64_t intval;
boolean_t le_bswap = (should_bswap == ZFS_HOST_BYTEORDER);
uint8_t raw_portable_mac[SHA512_DIGEST_LENGTH];
uint8_t raw_local_mac[SHA512_DIGEST_LENGTH];
/* calculate the portable MAC from the portable fields and metadnode */
crypto_mac_init(ctx, &key->zk_hmac_key);
/* add in the os_type */
intval = (le_bswap) ? osp->os_type : BSWAP_64(osp->os_type);
crypto_mac_update(ctx, &intval, sizeof (uint64_t));
/* add in the portable os_flags */
intval = osp->os_flags;
if (should_bswap)
intval = BSWAP_64(intval);
intval &= OBJSET_CRYPT_PORTABLE_FLAGS_MASK;
- /* CONSTCOND */
if (!ZFS_HOST_BYTEORDER)
intval = BSWAP_64(intval);
crypto_mac_update(ctx, &intval, sizeof (uint64_t));
/* add in fields from the metadnode */
ret = zio_crypt_do_dnode_hmac_updates(ctx, key->zk_version,
should_bswap, &osp->os_meta_dnode);
if (ret)
goto error;
crypto_mac_final(ctx, raw_portable_mac, SHA512_DIGEST_LENGTH);
bcopy(raw_portable_mac, portable_mac, ZIO_OBJSET_MAC_LEN);
/*
* The local MAC protects the user, group and project accounting.
* If these objects are not present, the local MAC is zeroed out.
*/
if ((datalen >= OBJSET_PHYS_SIZE_V3 &&
osp->os_userused_dnode.dn_type == DMU_OT_NONE &&
osp->os_groupused_dnode.dn_type == DMU_OT_NONE &&
osp->os_projectused_dnode.dn_type == DMU_OT_NONE) ||
(datalen >= OBJSET_PHYS_SIZE_V2 &&
osp->os_userused_dnode.dn_type == DMU_OT_NONE &&
osp->os_groupused_dnode.dn_type == DMU_OT_NONE) ||
(datalen <= OBJSET_PHYS_SIZE_V1)) {
bzero(local_mac, ZIO_OBJSET_MAC_LEN);
return (0);
}
/* calculate the local MAC from the userused and groupused dnodes */
crypto_mac_init(ctx, &key->zk_hmac_key);
/* add in the non-portable os_flags */
intval = osp->os_flags;
if (should_bswap)
intval = BSWAP_64(intval);
intval &= ~OBJSET_CRYPT_PORTABLE_FLAGS_MASK;
- /* CONSTCOND */
if (!ZFS_HOST_BYTEORDER)
intval = BSWAP_64(intval);
crypto_mac_update(ctx, &intval, sizeof (uint64_t));
/* XXX check dnode type ... */
/* add in fields from the user accounting dnodes */
if (osp->os_userused_dnode.dn_type != DMU_OT_NONE) {
ret = zio_crypt_do_dnode_hmac_updates(ctx, key->zk_version,
should_bswap, &osp->os_userused_dnode);
if (ret)
goto error;
}
if (osp->os_groupused_dnode.dn_type != DMU_OT_NONE) {
ret = zio_crypt_do_dnode_hmac_updates(ctx, key->zk_version,
should_bswap, &osp->os_groupused_dnode);
if (ret)
goto error;
}
if (osp->os_projectused_dnode.dn_type != DMU_OT_NONE &&
datalen >= OBJSET_PHYS_SIZE_V3) {
ret = zio_crypt_do_dnode_hmac_updates(ctx, key->zk_version,
should_bswap, &osp->os_projectused_dnode);
if (ret)
goto error;
}
crypto_mac_final(ctx, raw_local_mac, SHA512_DIGEST_LENGTH);
bcopy(raw_local_mac, local_mac, ZIO_OBJSET_MAC_LEN);
return (0);
error:
bzero(portable_mac, ZIO_OBJSET_MAC_LEN);
bzero(local_mac, ZIO_OBJSET_MAC_LEN);
return (ret);
}
static void
zio_crypt_destroy_uio(zfs_uio_t *uio)
{
if (GET_UIO_STRUCT(uio)->uio_iov)
kmem_free(GET_UIO_STRUCT(uio)->uio_iov,
zfs_uio_iovcnt(uio) * sizeof (iovec_t));
}
/*
* This function parses an uncompressed indirect block and returns a checksum
* of all the portable fields from all of the contained bps. The portable
* fields are the MAC and all of the fields from blk_prop except for the dedup,
* checksum, and psize bits. For an explanation of the purpose of this, see
* the comment block on object set authentication.
*/
static int
zio_crypt_do_indirect_mac_checksum_impl(boolean_t generate, void *buf,
uint_t datalen, uint64_t version, boolean_t byteswap, uint8_t *cksum)
{
blkptr_t *bp;
int i, epb = datalen >> SPA_BLKPTRSHIFT;
SHA2_CTX ctx;
uint8_t digestbuf[SHA512_DIGEST_LENGTH];
/* checksum all of the MACs from the layer below */
SHA2Init(SHA512, &ctx);
for (i = 0, bp = buf; i < epb; i++, bp++) {
zio_crypt_bp_do_indrect_checksum_updates(&ctx, version,
byteswap, bp);
}
SHA2Final(digestbuf, &ctx);
if (generate) {
bcopy(digestbuf, cksum, ZIO_DATA_MAC_LEN);
return (0);
}
if (bcmp(digestbuf, cksum, ZIO_DATA_MAC_LEN) != 0) {
#ifdef FCRYPTO_DEBUG
printf("%s(%d): Setting ECKSUM\n", __FUNCTION__, __LINE__);
#endif
return (SET_ERROR(ECKSUM));
}
return (0);
}
int
zio_crypt_do_indirect_mac_checksum(boolean_t generate, void *buf,
uint_t datalen, boolean_t byteswap, uint8_t *cksum)
{
int ret;
/*
* Unfortunately, callers of this function will not always have
* easy access to the on-disk format version. This info is
* normally found in the DSL Crypto Key, but the checksum-of-MACs
* is expected to be verifiable even when the key isn't loaded.
* Here, instead of doing a ZAP lookup for the version for each
* zio, we simply try both existing formats.
*/
ret = zio_crypt_do_indirect_mac_checksum_impl(generate, buf,
datalen, ZIO_CRYPT_KEY_CURRENT_VERSION, byteswap, cksum);
if (ret == ECKSUM) {
ASSERT(!generate);
ret = zio_crypt_do_indirect_mac_checksum_impl(generate,
buf, datalen, 0, byteswap, cksum);
}
return (ret);
}
int
zio_crypt_do_indirect_mac_checksum_abd(boolean_t generate, abd_t *abd,
uint_t datalen, boolean_t byteswap, uint8_t *cksum)
{
int ret;
void *buf;
buf = abd_borrow_buf_copy(abd, datalen);
ret = zio_crypt_do_indirect_mac_checksum(generate, buf, datalen,
byteswap, cksum);
abd_return_buf(abd, buf, datalen);
return (ret);
}
/*
* Special case handling routine for encrypting / decrypting ZIL blocks.
* We do not check for the older ZIL chain because the encryption feature
* was not available before the newer ZIL chain was introduced. The goal
* here is to encrypt everything except the blkptr_t of a lr_write_t and
* the zil_chain_t header. Everything that is not encrypted is authenticated.
*/
/*
* The OpenCrypto used in FreeBSD does not use separate source and
* destination buffers; instead, the same buffer is used. Further, to
* accommodate some of the drivers, the authbuf needs to be logically before
* the data. This means that we need to copy the source to the destination,
* and set up an extra iovec_t at the beginning to handle the authbuf.
* It also means we'll only return one zfs_uio_t.
*/
/* ARGSUSED */
static int
zio_crypt_init_uios_zil(boolean_t encrypt, uint8_t *plainbuf,
uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap, zfs_uio_t *puio,
zfs_uio_t *out_uio, uint_t *enc_len, uint8_t **authbuf, uint_t *auth_len,
boolean_t *no_crypt)
{
uint8_t *aadbuf = zio_buf_alloc(datalen);
uint8_t *src, *dst, *slrp, *dlrp, *blkend, *aadp;
iovec_t *dst_iovecs;
zil_chain_t *zilc;
lr_t *lr;
uint64_t txtype, lr_len;
uint_t crypt_len, nr_iovecs, vec;
uint_t aad_len = 0, total_len = 0;
if (encrypt) {
src = plainbuf;
dst = cipherbuf;
} else {
src = cipherbuf;
dst = plainbuf;
}
bcopy(src, dst, datalen);
/* Find the start and end record of the log block. */
zilc = (zil_chain_t *)src;
slrp = src + sizeof (zil_chain_t);
aadp = aadbuf;
blkend = src + ((byteswap) ? BSWAP_64(zilc->zc_nused) : zilc->zc_nused);
/*
* Calculate the number of encrypted iovecs we will need.
*/
/* We need at least two iovecs -- one for the AAD, one for the MAC. */
nr_iovecs = 2;
for (; slrp < blkend; slrp += lr_len) {
lr = (lr_t *)slrp;
if (byteswap) {
txtype = BSWAP_64(lr->lrc_txtype);
lr_len = BSWAP_64(lr->lrc_reclen);
} else {
txtype = lr->lrc_txtype;
lr_len = lr->lrc_reclen;
}
nr_iovecs++;
if (txtype == TX_WRITE && lr_len != sizeof (lr_write_t))
nr_iovecs++;
}
dst_iovecs = kmem_alloc(nr_iovecs * sizeof (iovec_t), KM_SLEEP);
/*
* Copy the plain zil header over and authenticate everything except
* the checksum that will store our MAC. If we are writing the data
* the embedded checksum will not have been calculated yet, so we don't
* authenticate that.
*/
bcopy(src, aadp, sizeof (zil_chain_t) - sizeof (zio_eck_t));
aadp += sizeof (zil_chain_t) - sizeof (zio_eck_t);
aad_len += sizeof (zil_chain_t) - sizeof (zio_eck_t);
slrp = src + sizeof (zil_chain_t);
dlrp = dst + sizeof (zil_chain_t);
/*
* Loop over records again, filling in iovecs.
*/
/* The first iovec will contain the authbuf. */
vec = 1;
for (; slrp < blkend; slrp += lr_len, dlrp += lr_len) {
lr = (lr_t *)slrp;
if (!byteswap) {
txtype = lr->lrc_txtype;
lr_len = lr->lrc_reclen;
} else {
txtype = BSWAP_64(lr->lrc_txtype);
lr_len = BSWAP_64(lr->lrc_reclen);
}
/* copy the common lr_t */
bcopy(slrp, dlrp, sizeof (lr_t));
bcopy(slrp, aadp, sizeof (lr_t));
aadp += sizeof (lr_t);
aad_len += sizeof (lr_t);
/*
* If this is a TX_WRITE record we want to encrypt everything
* except the bp if exists. If the bp does exist we want to
* authenticate it.
*/
if (txtype == TX_WRITE) {
crypt_len = sizeof (lr_write_t) -
sizeof (lr_t) - sizeof (blkptr_t);
dst_iovecs[vec].iov_base = (char *)dlrp +
sizeof (lr_t);
dst_iovecs[vec].iov_len = crypt_len;
/* copy the bp now since it will not be encrypted */
bcopy(slrp + sizeof (lr_write_t) - sizeof (blkptr_t),
dlrp + sizeof (lr_write_t) - sizeof (blkptr_t),
sizeof (blkptr_t));
bcopy(slrp + sizeof (lr_write_t) - sizeof (blkptr_t),
aadp, sizeof (blkptr_t));
aadp += sizeof (blkptr_t);
aad_len += sizeof (blkptr_t);
vec++;
total_len += crypt_len;
if (lr_len != sizeof (lr_write_t)) {
crypt_len = lr_len - sizeof (lr_write_t);
dst_iovecs[vec].iov_base = (char *)
dlrp + sizeof (lr_write_t);
dst_iovecs[vec].iov_len = crypt_len;
vec++;
total_len += crypt_len;
}
} else {
crypt_len = lr_len - sizeof (lr_t);
dst_iovecs[vec].iov_base = (char *)dlrp +
sizeof (lr_t);
dst_iovecs[vec].iov_len = crypt_len;
vec++;
total_len += crypt_len;
}
}
/* The last iovec will contain the MAC. */
ASSERT3U(vec, ==, nr_iovecs - 1);
/* AAD */
dst_iovecs[0].iov_base = aadbuf;
dst_iovecs[0].iov_len = aad_len;
/* MAC */
dst_iovecs[vec].iov_base = 0;
dst_iovecs[vec].iov_len = 0;
*no_crypt = (vec == 1);
*enc_len = total_len;
*authbuf = aadbuf;
*auth_len = aad_len;
GET_UIO_STRUCT(out_uio)->uio_iov = dst_iovecs;
zfs_uio_iovcnt(out_uio) = nr_iovecs;
return (0);
}
/*
* Special case handling routine for encrypting / decrypting dnode blocks.
*/
static int
zio_crypt_init_uios_dnode(boolean_t encrypt, uint64_t version,
uint8_t *plainbuf, uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap,
zfs_uio_t *puio, zfs_uio_t *out_uio, uint_t *enc_len, uint8_t **authbuf,
uint_t *auth_len, boolean_t *no_crypt)
{
uint8_t *aadbuf = zio_buf_alloc(datalen);
uint8_t *src, *dst, *aadp;
dnode_phys_t *dnp, *adnp, *sdnp, *ddnp;
iovec_t *dst_iovecs;
uint_t nr_iovecs, crypt_len, vec;
uint_t aad_len = 0, total_len = 0;
uint_t i, j, max_dnp = datalen >> DNODE_SHIFT;
if (encrypt) {
src = plainbuf;
dst = cipherbuf;
} else {
src = cipherbuf;
dst = plainbuf;
}
bcopy(src, dst, datalen);
sdnp = (dnode_phys_t *)src;
ddnp = (dnode_phys_t *)dst;
aadp = aadbuf;
/*
* Count the number of iovecs we will need to do the encryption by
* counting the number of bonus buffers that need to be encrypted.
*/
/* We need at least two iovecs -- one for the AAD, one for the MAC. */
nr_iovecs = 2;
for (i = 0; i < max_dnp; i += sdnp[i].dn_extra_slots + 1) {
/*
* This block may still be byteswapped. However, all of the
* values we use are either uint8_t's (for which byteswapping
* is a noop) or a * != 0 check, which will work regardless
* of whether or not we byteswap.
*/
if (sdnp[i].dn_type != DMU_OT_NONE &&
DMU_OT_IS_ENCRYPTED(sdnp[i].dn_bonustype) &&
sdnp[i].dn_bonuslen != 0) {
nr_iovecs++;
}
}
dst_iovecs = kmem_alloc(nr_iovecs * sizeof (iovec_t), KM_SLEEP);
/*
* Iterate through the dnodes again, this time filling in the uios
* we allocated earlier. We also concatenate any data we want to
* authenticate onto aadbuf.
*/
/* The first iovec will contain the authbuf. */
vec = 1;
for (i = 0; i < max_dnp; i += sdnp[i].dn_extra_slots + 1) {
dnp = &sdnp[i];
/* copy over the core fields and blkptrs (kept as plaintext) */
bcopy(dnp, &ddnp[i], (uint8_t *)DN_BONUS(dnp) - (uint8_t *)dnp);
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
bcopy(DN_SPILL_BLKPTR(dnp), DN_SPILL_BLKPTR(&ddnp[i]),
sizeof (blkptr_t));
}
/*
* Handle authenticated data. We authenticate everything in
* the dnode that can be brought over when we do a raw send.
* This includes all of the core fields as well as the MACs
* stored in the bp checksums and all of the portable bits
* from blk_prop. We include the dnode padding here in case it
* ever gets used in the future. Some dn_flags and dn_used are
* not portable so we mask those out values out of the
* authenticated data.
*/
crypt_len = offsetof(dnode_phys_t, dn_blkptr);
bcopy(dnp, aadp, crypt_len);
adnp = (dnode_phys_t *)aadp;
adnp->dn_flags &= DNODE_CRYPT_PORTABLE_FLAGS_MASK;
adnp->dn_used = 0;
aadp += crypt_len;
aad_len += crypt_len;
for (j = 0; j < dnp->dn_nblkptr; j++) {
zio_crypt_bp_do_aad_updates(&aadp, &aad_len,
version, byteswap, &dnp->dn_blkptr[j]);
}
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
zio_crypt_bp_do_aad_updates(&aadp, &aad_len,
version, byteswap, DN_SPILL_BLKPTR(dnp));
}
/*
* If this bonus buffer needs to be encrypted, we prepare an
* iovec_t. The encryption / decryption functions will fill
* this in for us with the encrypted or decrypted data.
* Otherwise we add the bonus buffer to the authenticated
* data buffer and copy it over to the destination. The
* encrypted iovec extends to DN_MAX_BONUS_LEN(dnp) so that
* we can guarantee alignment with the AES block size
* (128 bits).
*/
crypt_len = DN_MAX_BONUS_LEN(dnp);
if (dnp->dn_type != DMU_OT_NONE &&
DMU_OT_IS_ENCRYPTED(dnp->dn_bonustype) &&
dnp->dn_bonuslen != 0) {
dst_iovecs[vec].iov_base = DN_BONUS(&ddnp[i]);
dst_iovecs[vec].iov_len = crypt_len;
vec++;
total_len += crypt_len;
} else {
bcopy(DN_BONUS(dnp), DN_BONUS(&ddnp[i]), crypt_len);
bcopy(DN_BONUS(dnp), aadp, crypt_len);
aadp += crypt_len;
aad_len += crypt_len;
}
}
/* The last iovec will contain the MAC. */
ASSERT3U(vec, ==, nr_iovecs - 1);
/* AAD */
dst_iovecs[0].iov_base = aadbuf;
dst_iovecs[0].iov_len = aad_len;
/* MAC */
dst_iovecs[vec].iov_base = 0;
dst_iovecs[vec].iov_len = 0;
*no_crypt = (vec == 1);
*enc_len = total_len;
*authbuf = aadbuf;
*auth_len = aad_len;
GET_UIO_STRUCT(out_uio)->uio_iov = dst_iovecs;
zfs_uio_iovcnt(out_uio) = nr_iovecs;
return (0);
}
/* ARGSUSED */
static int
zio_crypt_init_uios_normal(boolean_t encrypt, uint8_t *plainbuf,
uint8_t *cipherbuf, uint_t datalen, zfs_uio_t *puio, zfs_uio_t *out_uio,
uint_t *enc_len)
{
int ret;
uint_t nr_plain = 1, nr_cipher = 2;
iovec_t *plain_iovecs = NULL, *cipher_iovecs = NULL;
void *src, *dst;
cipher_iovecs = kmem_alloc(nr_cipher * sizeof (iovec_t),
KM_SLEEP);
if (!cipher_iovecs) {
ret = SET_ERROR(ENOMEM);
goto error;
}
bzero(cipher_iovecs, nr_cipher * sizeof (iovec_t));
if (encrypt) {
src = plainbuf;
dst = cipherbuf;
} else {
src = cipherbuf;
dst = plainbuf;
}
bcopy(src, dst, datalen);
cipher_iovecs[0].iov_base = dst;
cipher_iovecs[0].iov_len = datalen;
*enc_len = datalen;
GET_UIO_STRUCT(out_uio)->uio_iov = cipher_iovecs;
zfs_uio_iovcnt(out_uio) = nr_cipher;
return (0);
error:
if (plain_iovecs != NULL)
kmem_free(plain_iovecs, nr_plain * sizeof (iovec_t));
if (cipher_iovecs != NULL)
kmem_free(cipher_iovecs, nr_cipher * sizeof (iovec_t));
*enc_len = 0;
GET_UIO_STRUCT(out_uio)->uio_iov = NULL;
zfs_uio_iovcnt(out_uio) = 0;
return (ret);
}
/*
* This function builds up the plaintext (puio) and ciphertext (cuio) uios so
* that they can be used for encryption and decryption by zio_do_crypt_uio().
* Most blocks will use zio_crypt_init_uios_normal(), with ZIL and dnode blocks
* requiring special handling to parse out pieces that are to be encrypted. The
* authbuf is used by these special cases to store additional authenticated
* data (AAD) for the encryption modes.
*/
static int
zio_crypt_init_uios(boolean_t encrypt, uint64_t version, dmu_object_type_t ot,
uint8_t *plainbuf, uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap,
uint8_t *mac, zfs_uio_t *puio, zfs_uio_t *cuio, uint_t *enc_len,
uint8_t **authbuf, uint_t *auth_len, boolean_t *no_crypt)
{
int ret;
iovec_t *mac_iov;
ASSERT(DMU_OT_IS_ENCRYPTED(ot) || ot == DMU_OT_NONE);
/* route to handler */
switch (ot) {
case DMU_OT_INTENT_LOG:
ret = zio_crypt_init_uios_zil(encrypt, plainbuf, cipherbuf,
datalen, byteswap, puio, cuio, enc_len, authbuf, auth_len,
no_crypt);
break;
case DMU_OT_DNODE:
ret = zio_crypt_init_uios_dnode(encrypt, version, plainbuf,
cipherbuf, datalen, byteswap, puio, cuio, enc_len, authbuf,
auth_len, no_crypt);
break;
default:
ret = zio_crypt_init_uios_normal(encrypt, plainbuf, cipherbuf,
datalen, puio, cuio, enc_len);
*authbuf = NULL;
*auth_len = 0;
*no_crypt = B_FALSE;
break;
}
if (ret != 0)
goto error;
/* populate the uios */
zfs_uio_segflg(cuio) = UIO_SYSSPACE;
mac_iov =
((iovec_t *)&(GET_UIO_STRUCT(cuio)->
uio_iov[zfs_uio_iovcnt(cuio) - 1]));
mac_iov->iov_base = (void *)mac;
mac_iov->iov_len = ZIO_DATA_MAC_LEN;
return (0);
error:
return (ret);
}
void *failed_decrypt_buf;
int faile_decrypt_size;
/*
* Primary encryption / decryption entrypoint for zio data.
*/
int
zio_do_crypt_data(boolean_t encrypt, zio_crypt_key_t *key,
dmu_object_type_t ot, boolean_t byteswap, uint8_t *salt, uint8_t *iv,
uint8_t *mac, uint_t datalen, uint8_t *plainbuf, uint8_t *cipherbuf,
boolean_t *no_crypt)
{
int ret;
boolean_t locked = B_FALSE;
uint64_t crypt = key->zk_crypt;
uint_t keydata_len = zio_crypt_table[crypt].ci_keylen;
uint_t enc_len, auth_len;
zfs_uio_t puio, cuio;
struct uio puio_s, cuio_s;
uint8_t enc_keydata[MASTER_KEY_MAX_LEN];
crypto_key_t tmp_ckey, *ckey = NULL;
freebsd_crypt_session_t *tmpl = NULL;
uint8_t *authbuf = NULL;
zfs_uio_init(&puio, &puio_s);
zfs_uio_init(&cuio, &cuio_s);
bzero(GET_UIO_STRUCT(&puio), sizeof (struct uio));
bzero(GET_UIO_STRUCT(&cuio), sizeof (struct uio));
#ifdef FCRYPTO_DEBUG
printf("%s(%s, %p, %p, %d, %p, %p, %u, %s, %p, %p, %p)\n",
__FUNCTION__,
encrypt ? "encrypt" : "decrypt",
key, salt, ot, iv, mac, datalen,
byteswap ? "byteswap" : "native_endian", plainbuf,
cipherbuf, no_crypt);
printf("\tkey = {");
for (int i = 0; i < key->zk_current_key.ck_length/8; i++)
printf("%02x ", ((uint8_t *)key->zk_current_key.ck_data)[i]);
printf("}\n");
#endif
/* create uios for encryption */
ret = zio_crypt_init_uios(encrypt, key->zk_version, ot, plainbuf,
cipherbuf, datalen, byteswap, mac, &puio, &cuio, &enc_len,
&authbuf, &auth_len, no_crypt);
if (ret != 0)
return (ret);
/*
* If the needed key is the current one, just use it. Otherwise we
* need to generate a temporary one from the given salt + master key.
* If we are encrypting, we must return a copy of the current salt
* so that it can be stored in the blkptr_t.
*/
rw_enter(&key->zk_salt_lock, RW_READER);
locked = B_TRUE;
if (bcmp(salt, key->zk_salt, ZIO_DATA_SALT_LEN) == 0) {
ckey = &key->zk_current_key;
tmpl = &key->zk_session;
} else {
rw_exit(&key->zk_salt_lock);
locked = B_FALSE;
ret = hkdf_sha512(key->zk_master_keydata, keydata_len, NULL, 0,
salt, ZIO_DATA_SALT_LEN, enc_keydata, keydata_len);
if (ret != 0)
goto error;
tmp_ckey.ck_format = CRYPTO_KEY_RAW;
tmp_ckey.ck_data = enc_keydata;
tmp_ckey.ck_length = CRYPTO_BYTES2BITS(keydata_len);
ckey = &tmp_ckey;
tmpl = NULL;
}
/* perform the encryption / decryption */
ret = zio_do_crypt_uio_opencrypto(encrypt, tmpl, key->zk_crypt,
ckey, iv, enc_len, &cuio, auth_len);
if (ret != 0)
goto error;
if (locked) {
rw_exit(&key->zk_salt_lock);
locked = B_FALSE;
}
if (authbuf != NULL)
zio_buf_free(authbuf, datalen);
if (ckey == &tmp_ckey)
bzero(enc_keydata, keydata_len);
zio_crypt_destroy_uio(&puio);
zio_crypt_destroy_uio(&cuio);
return (0);
error:
if (!encrypt) {
if (failed_decrypt_buf != NULL)
kmem_free(failed_decrypt_buf, failed_decrypt_size);
failed_decrypt_buf = kmem_alloc(datalen, KM_SLEEP);
failed_decrypt_size = datalen;
bcopy(cipherbuf, failed_decrypt_buf, datalen);
}
if (locked)
rw_exit(&key->zk_salt_lock);
if (authbuf != NULL)
zio_buf_free(authbuf, datalen);
if (ckey == &tmp_ckey)
bzero(enc_keydata, keydata_len);
zio_crypt_destroy_uio(&puio);
zio_crypt_destroy_uio(&cuio);
return (SET_ERROR(ret));
}
/*
* Simple wrapper around zio_do_crypt_data() to work with abd's instead of
* linear buffers.
*/
int
zio_do_crypt_abd(boolean_t encrypt, zio_crypt_key_t *key, dmu_object_type_t ot,
boolean_t byteswap, uint8_t *salt, uint8_t *iv, uint8_t *mac,
uint_t datalen, abd_t *pabd, abd_t *cabd, boolean_t *no_crypt)
{
int ret;
void *ptmp, *ctmp;
if (encrypt) {
ptmp = abd_borrow_buf_copy(pabd, datalen);
ctmp = abd_borrow_buf(cabd, datalen);
} else {
ptmp = abd_borrow_buf(pabd, datalen);
ctmp = abd_borrow_buf_copy(cabd, datalen);
}
ret = zio_do_crypt_data(encrypt, key, ot, byteswap, salt, iv, mac,
datalen, ptmp, ctmp, no_crypt);
if (ret != 0)
goto error;
if (encrypt) {
abd_return_buf(pabd, ptmp, datalen);
abd_return_buf_copy(cabd, ctmp, datalen);
} else {
abd_return_buf_copy(pabd, ptmp, datalen);
abd_return_buf(cabd, ctmp, datalen);
}
return (0);
error:
if (encrypt) {
abd_return_buf(pabd, ptmp, datalen);
abd_return_buf_copy(cabd, ctmp, datalen);
} else {
abd_return_buf_copy(pabd, ptmp, datalen);
abd_return_buf(cabd, ctmp, datalen);
}
return (SET_ERROR(ret));
}
#if defined(_KERNEL) && defined(HAVE_SPL)
/* BEGIN CSTYLED */
module_param(zfs_key_max_salt_uses, ulong, 0644);
MODULE_PARM_DESC(zfs_key_max_salt_uses, "Max number of times a salt value "
"can be used for generating encryption keys before it is rotated");
/* END CSTYLED */
#endif
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/abd_os.c b/sys/contrib/openzfs/module/os/linux/zfs/abd_os.c
index d1d238a4e303..a8f1ea7ca3de 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/abd_os.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/abd_os.c
@@ -1,1147 +1,1147 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2014 by Chunwei Chen. All rights reserved.
* Copyright (c) 2019 by Delphix. All rights reserved.
*/
/*
* See abd.c for a general overview of the arc buffered data (ABD).
*
* Linear buffers act exactly like normal buffers and are always mapped into the
* kernel's virtual memory space, while scattered ABD data chunks are allocated
* as physical pages and then mapped in only while they are actually being
* accessed through one of the abd_* library functions. Using scattered ABDs
* provides several benefits:
*
* (1) They avoid use of kmem_*, preventing performance problems where running
* kmem_reap on very large memory systems never finishes and causes
* constant TLB shootdowns.
*
* (2) Fragmentation is less of an issue since when we are at the limit of
* allocatable space, we won't have to search around for a long free
* hole in the VA space for large ARC allocations. Each chunk is mapped in
* individually, so even if we are using HIGHMEM (see next point) we
* wouldn't need to worry about finding a contiguous address range.
*
* (3) If we are not using HIGHMEM, then all physical memory is always
* mapped into the kernel's address space, so we also avoid the map /
* unmap costs on each ABD access.
*
* If we are not using HIGHMEM, scattered buffers which have only one chunk
* can be treated as linear buffers, because they are contiguous in the
* kernel's virtual address space. See abd_alloc_chunks() for details.
*/
#include <sys/abd_impl.h>
#include <sys/param.h>
#include <sys/zio.h>
#include <sys/arc.h>
#include <sys/zfs_context.h>
#include <sys/zfs_znode.h>
#ifdef _KERNEL
#include <linux/kmap_compat.h>
#include <linux/scatterlist.h>
#else
#define MAX_ORDER 1
#endif
typedef struct abd_stats {
kstat_named_t abdstat_struct_size;
kstat_named_t abdstat_linear_cnt;
kstat_named_t abdstat_linear_data_size;
kstat_named_t abdstat_scatter_cnt;
kstat_named_t abdstat_scatter_data_size;
kstat_named_t abdstat_scatter_chunk_waste;
kstat_named_t abdstat_scatter_orders[MAX_ORDER];
kstat_named_t abdstat_scatter_page_multi_chunk;
kstat_named_t abdstat_scatter_page_multi_zone;
kstat_named_t abdstat_scatter_page_alloc_retry;
kstat_named_t abdstat_scatter_sg_table_retry;
} abd_stats_t;
static abd_stats_t abd_stats = {
/* Amount of memory occupied by all of the abd_t struct allocations */
{ "struct_size", KSTAT_DATA_UINT64 },
/*
* The number of linear ABDs which are currently allocated, excluding
* ABDs which don't own their data (for instance the ones which were
* allocated through abd_get_offset() and abd_get_from_buf()). If an
* ABD takes ownership of its buf then it will become tracked.
*/
{ "linear_cnt", KSTAT_DATA_UINT64 },
/* Amount of data stored in all linear ABDs tracked by linear_cnt */
{ "linear_data_size", KSTAT_DATA_UINT64 },
/*
* The number of scatter ABDs which are currently allocated, excluding
* ABDs which don't own their data (for instance the ones which were
* allocated through abd_get_offset()).
*/
{ "scatter_cnt", KSTAT_DATA_UINT64 },
/* Amount of data stored in all scatter ABDs tracked by scatter_cnt */
{ "scatter_data_size", KSTAT_DATA_UINT64 },
/*
* The amount of space wasted at the end of the last chunk across all
* scatter ABDs tracked by scatter_cnt.
*/
{ "scatter_chunk_waste", KSTAT_DATA_UINT64 },
/*
* The number of compound allocations of a given order. These
* allocations are spread over all currently allocated ABDs, and
* act as a measure of memory fragmentation.
*/
{ { "scatter_order_N", KSTAT_DATA_UINT64 } },
/*
* The number of scatter ABDs which contain multiple chunks.
* ABDs are preferentially allocated from the minimum number of
* contiguous multi-page chunks, a single chunk is optimal.
*/
{ "scatter_page_multi_chunk", KSTAT_DATA_UINT64 },
/*
* The number of scatter ABDs which are split across memory zones.
* ABDs are preferentially allocated using pages from a single zone.
*/
{ "scatter_page_multi_zone", KSTAT_DATA_UINT64 },
/*
* The total number of retries encountered when attempting to
* allocate the pages to populate the scatter ABD.
*/
{ "scatter_page_alloc_retry", KSTAT_DATA_UINT64 },
/*
* The total number of retries encountered when attempting to
* allocate the sg table for an ABD.
*/
{ "scatter_sg_table_retry", KSTAT_DATA_UINT64 },
};
struct {
wmsum_t abdstat_struct_size;
wmsum_t abdstat_linear_cnt;
wmsum_t abdstat_linear_data_size;
wmsum_t abdstat_scatter_cnt;
wmsum_t abdstat_scatter_data_size;
wmsum_t abdstat_scatter_chunk_waste;
wmsum_t abdstat_scatter_orders[MAX_ORDER];
wmsum_t abdstat_scatter_page_multi_chunk;
wmsum_t abdstat_scatter_page_multi_zone;
wmsum_t abdstat_scatter_page_alloc_retry;
wmsum_t abdstat_scatter_sg_table_retry;
} abd_sums;
#define abd_for_each_sg(abd, sg, n, i) \
for_each_sg(ABD_SCATTER(abd).abd_sgl, sg, n, i)
unsigned zfs_abd_scatter_max_order = MAX_ORDER - 1;
/*
* zfs_abd_scatter_min_size is the minimum allocation size to use scatter
* ABD's. Smaller allocations will use linear ABD's which uses
* zio_[data_]buf_alloc().
*
* Scatter ABD's use at least one page each, so sub-page allocations waste
* some space when allocated as scatter (e.g. 2KB scatter allocation wastes
* half of each page). Using linear ABD's for small allocations means that
* they will be put on slabs which contain many allocations. This can
* improve memory efficiency, but it also makes it much harder for ARC
* evictions to actually free pages, because all the buffers on one slab need
* to be freed in order for the slab (and underlying pages) to be freed.
* Typically, 512B and 1KB kmem caches have 16 buffers per slab, so it's
* possible for them to actually waste more memory than scatter (one page per
* buf = wasting 3/4 or 7/8th; one buf per slab = wasting 15/16th).
*
* Spill blocks are typically 512B and are heavily used on systems running
* selinux with the default dnode size and the `xattr=sa` property set.
*
* By default we use linear allocations for 512B and 1KB, and scatter
* allocations for larger (1.5KB and up).
*/
int zfs_abd_scatter_min_size = 512 * 3;
/*
* We use a scattered SPA_MAXBLOCKSIZE sized ABD whose pages are
* just a single zero'd page. This allows us to conserve memory by
* only using a single zero page for the scatterlist.
*/
abd_t *abd_zero_scatter = NULL;
struct page;
/*
* abd_zero_page we will be an allocated zero'd PAGESIZE buffer, which is
* assigned to set each of the pages of abd_zero_scatter.
*/
static struct page *abd_zero_page = NULL;
static kmem_cache_t *abd_cache = NULL;
static kstat_t *abd_ksp;
static uint_t
abd_chunkcnt_for_bytes(size_t size)
{
return (P2ROUNDUP(size, PAGESIZE) / PAGESIZE);
}
abd_t *
abd_alloc_struct_impl(size_t size)
{
/*
* In Linux we do not use the size passed in during ABD
* allocation, so we just ignore it.
*/
abd_t *abd = kmem_cache_alloc(abd_cache, KM_PUSHPAGE);
ASSERT3P(abd, !=, NULL);
ABDSTAT_INCR(abdstat_struct_size, sizeof (abd_t));
return (abd);
}
void
abd_free_struct_impl(abd_t *abd)
{
kmem_cache_free(abd_cache, abd);
ABDSTAT_INCR(abdstat_struct_size, -(int)sizeof (abd_t));
}
#ifdef _KERNEL
/*
* Mark zfs data pages so they can be excluded from kernel crash dumps
*/
#ifdef _LP64
#define ABD_FILE_CACHE_PAGE 0x2F5ABDF11ECAC4E
static inline void
abd_mark_zfs_page(struct page *page)
{
get_page(page);
SetPagePrivate(page);
set_page_private(page, ABD_FILE_CACHE_PAGE);
}
static inline void
abd_unmark_zfs_page(struct page *page)
{
set_page_private(page, 0UL);
ClearPagePrivate(page);
put_page(page);
}
#else
#define abd_mark_zfs_page(page)
#define abd_unmark_zfs_page(page)
#endif /* _LP64 */
#ifndef CONFIG_HIGHMEM
#ifndef __GFP_RECLAIM
#define __GFP_RECLAIM __GFP_WAIT
#endif
/*
* The goal is to minimize fragmentation by preferentially populating ABDs
* with higher order compound pages from a single zone. Allocation size is
* progressively decreased until it can be satisfied without performing
* reclaim or compaction. When necessary this function will degenerate to
* allocating individual pages and allowing reclaim to satisfy allocations.
*/
void
abd_alloc_chunks(abd_t *abd, size_t size)
{
struct list_head pages;
struct sg_table table;
struct scatterlist *sg;
struct page *page, *tmp_page = NULL;
gfp_t gfp = __GFP_NOWARN | GFP_NOIO;
gfp_t gfp_comp = (gfp | __GFP_NORETRY | __GFP_COMP) & ~__GFP_RECLAIM;
int max_order = MIN(zfs_abd_scatter_max_order, MAX_ORDER - 1);
int nr_pages = abd_chunkcnt_for_bytes(size);
int chunks = 0, zones = 0;
size_t remaining_size;
int nid = NUMA_NO_NODE;
int alloc_pages = 0;
INIT_LIST_HEAD(&pages);
while (alloc_pages < nr_pages) {
unsigned chunk_pages;
int order;
order = MIN(highbit64(nr_pages - alloc_pages) - 1, max_order);
chunk_pages = (1U << order);
page = alloc_pages_node(nid, order ? gfp_comp : gfp, order);
if (page == NULL) {
if (order == 0) {
ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry);
schedule_timeout_interruptible(1);
} else {
max_order = MAX(0, order - 1);
}
continue;
}
list_add_tail(&page->lru, &pages);
if ((nid != NUMA_NO_NODE) && (page_to_nid(page) != nid))
zones++;
nid = page_to_nid(page);
ABDSTAT_BUMP(abdstat_scatter_orders[order]);
chunks++;
alloc_pages += chunk_pages;
}
ASSERT3S(alloc_pages, ==, nr_pages);
while (sg_alloc_table(&table, chunks, gfp)) {
ABDSTAT_BUMP(abdstat_scatter_sg_table_retry);
schedule_timeout_interruptible(1);
}
sg = table.sgl;
remaining_size = size;
list_for_each_entry_safe(page, tmp_page, &pages, lru) {
size_t sg_size = MIN(PAGESIZE << compound_order(page),
remaining_size);
sg_set_page(sg, page, sg_size, 0);
abd_mark_zfs_page(page);
remaining_size -= sg_size;
sg = sg_next(sg);
list_del(&page->lru);
}
/*
* These conditions ensure that a possible transformation to a linear
* ABD would be valid.
*/
ASSERT(!PageHighMem(sg_page(table.sgl)));
ASSERT0(ABD_SCATTER(abd).abd_offset);
if (table.nents == 1) {
/*
* Since there is only one entry, this ABD can be represented
* as a linear buffer. All single-page (4K) ABD's can be
* represented this way. Some multi-page ABD's can also be
* represented this way, if we were able to allocate a single
* "chunk" (higher-order "page" which represents a power-of-2
* series of physically-contiguous pages). This is often the
* case for 2-page (8K) ABD's.
*
* Representing a single-entry scatter ABD as a linear ABD
* has the performance advantage of avoiding the copy (and
* allocation) in abd_borrow_buf_copy / abd_return_buf_copy.
* A performance increase of around 5% has been observed for
* ARC-cached reads (of small blocks which can take advantage
* of this).
*
* Note that this optimization is only possible because the
* pages are always mapped into the kernel's address space.
* This is not the case for highmem pages, so the
* optimization can not be made there.
*/
abd->abd_flags |= ABD_FLAG_LINEAR;
abd->abd_flags |= ABD_FLAG_LINEAR_PAGE;
abd->abd_u.abd_linear.abd_sgl = table.sgl;
ABD_LINEAR_BUF(abd) = page_address(sg_page(table.sgl));
} else if (table.nents > 1) {
ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk);
abd->abd_flags |= ABD_FLAG_MULTI_CHUNK;
if (zones) {
ABDSTAT_BUMP(abdstat_scatter_page_multi_zone);
abd->abd_flags |= ABD_FLAG_MULTI_ZONE;
}
ABD_SCATTER(abd).abd_sgl = table.sgl;
ABD_SCATTER(abd).abd_nents = table.nents;
}
}
#else
/*
* Allocate N individual pages to construct a scatter ABD. This function
* makes no attempt to request contiguous pages and requires the minimal
* number of kernel interfaces. It's designed for maximum compatibility.
*/
void
abd_alloc_chunks(abd_t *abd, size_t size)
{
struct scatterlist *sg = NULL;
struct sg_table table;
struct page *page;
gfp_t gfp = __GFP_NOWARN | GFP_NOIO;
int nr_pages = abd_chunkcnt_for_bytes(size);
int i = 0;
while (sg_alloc_table(&table, nr_pages, gfp)) {
ABDSTAT_BUMP(abdstat_scatter_sg_table_retry);
schedule_timeout_interruptible(1);
}
ASSERT3U(table.nents, ==, nr_pages);
ABD_SCATTER(abd).abd_sgl = table.sgl;
ABD_SCATTER(abd).abd_nents = nr_pages;
abd_for_each_sg(abd, sg, nr_pages, i) {
while ((page = __page_cache_alloc(gfp)) == NULL) {
ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry);
schedule_timeout_interruptible(1);
}
ABDSTAT_BUMP(abdstat_scatter_orders[0]);
sg_set_page(sg, page, PAGESIZE, 0);
abd_mark_zfs_page(page);
}
if (nr_pages > 1) {
ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk);
abd->abd_flags |= ABD_FLAG_MULTI_CHUNK;
}
}
#endif /* !CONFIG_HIGHMEM */
/*
* This must be called if any of the sg_table allocation functions
* are called.
*/
static void
abd_free_sg_table(abd_t *abd)
{
struct sg_table table;
table.sgl = ABD_SCATTER(abd).abd_sgl;
table.nents = table.orig_nents = ABD_SCATTER(abd).abd_nents;
sg_free_table(&table);
}
void
abd_free_chunks(abd_t *abd)
{
struct scatterlist *sg = NULL;
struct page *page;
int nr_pages = ABD_SCATTER(abd).abd_nents;
int order, i = 0;
if (abd->abd_flags & ABD_FLAG_MULTI_ZONE)
ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_zone);
if (abd->abd_flags & ABD_FLAG_MULTI_CHUNK)
ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_chunk);
abd_for_each_sg(abd, sg, nr_pages, i) {
page = sg_page(sg);
abd_unmark_zfs_page(page);
order = compound_order(page);
__free_pages(page, order);
ASSERT3U(sg->length, <=, PAGE_SIZE << order);
ABDSTAT_BUMPDOWN(abdstat_scatter_orders[order]);
}
abd_free_sg_table(abd);
}
/*
* Allocate scatter ABD of size SPA_MAXBLOCKSIZE, where each page in
* the scatterlist will be set to the zero'd out buffer abd_zero_page.
*/
static void
abd_alloc_zero_scatter(void)
{
struct scatterlist *sg = NULL;
struct sg_table table;
gfp_t gfp = __GFP_NOWARN | GFP_NOIO;
gfp_t gfp_zero_page = gfp | __GFP_ZERO;
int nr_pages = abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE);
int i = 0;
while ((abd_zero_page = __page_cache_alloc(gfp_zero_page)) == NULL) {
ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry);
schedule_timeout_interruptible(1);
}
abd_mark_zfs_page(abd_zero_page);
while (sg_alloc_table(&table, nr_pages, gfp)) {
ABDSTAT_BUMP(abdstat_scatter_sg_table_retry);
schedule_timeout_interruptible(1);
}
ASSERT3U(table.nents, ==, nr_pages);
abd_zero_scatter = abd_alloc_struct(SPA_MAXBLOCKSIZE);
abd_zero_scatter->abd_flags |= ABD_FLAG_OWNER;
ABD_SCATTER(abd_zero_scatter).abd_offset = 0;
ABD_SCATTER(abd_zero_scatter).abd_sgl = table.sgl;
ABD_SCATTER(abd_zero_scatter).abd_nents = nr_pages;
abd_zero_scatter->abd_size = SPA_MAXBLOCKSIZE;
abd_zero_scatter->abd_flags |= ABD_FLAG_MULTI_CHUNK | ABD_FLAG_ZEROS;
abd_for_each_sg(abd_zero_scatter, sg, nr_pages, i) {
sg_set_page(sg, abd_zero_page, PAGESIZE, 0);
}
ABDSTAT_BUMP(abdstat_scatter_cnt);
ABDSTAT_INCR(abdstat_scatter_data_size, PAGESIZE);
ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk);
}
#else /* _KERNEL */
#ifndef PAGE_SHIFT
#define PAGE_SHIFT (highbit64(PAGESIZE)-1)
#endif
#define zfs_kmap_atomic(chunk) ((void *)chunk)
#define zfs_kunmap_atomic(addr) do { (void)(addr); } while (0)
#define local_irq_save(flags) do { (void)(flags); } while (0)
#define local_irq_restore(flags) do { (void)(flags); } while (0)
#define nth_page(pg, i) \
((struct page *)((void *)(pg) + (i) * PAGESIZE))
struct scatterlist {
struct page *page;
int length;
int end;
};
static void
sg_init_table(struct scatterlist *sg, int nr)
{
memset(sg, 0, nr * sizeof (struct scatterlist));
sg[nr - 1].end = 1;
}
/*
* This must be called if any of the sg_table allocation functions
* are called.
*/
static void
abd_free_sg_table(abd_t *abd)
{
int nents = ABD_SCATTER(abd).abd_nents;
vmem_free(ABD_SCATTER(abd).abd_sgl,
nents * sizeof (struct scatterlist));
}
#define for_each_sg(sgl, sg, nr, i) \
for ((i) = 0, (sg) = (sgl); (i) < (nr); (i)++, (sg) = sg_next(sg))
static inline void
sg_set_page(struct scatterlist *sg, struct page *page, unsigned int len,
unsigned int offset)
{
/* currently we don't use offset */
ASSERT(offset == 0);
sg->page = page;
sg->length = len;
}
static inline struct page *
sg_page(struct scatterlist *sg)
{
return (sg->page);
}
static inline struct scatterlist *
sg_next(struct scatterlist *sg)
{
if (sg->end)
return (NULL);
return (sg + 1);
}
void
abd_alloc_chunks(abd_t *abd, size_t size)
{
unsigned nr_pages = abd_chunkcnt_for_bytes(size);
struct scatterlist *sg;
int i;
ABD_SCATTER(abd).abd_sgl = vmem_alloc(nr_pages *
sizeof (struct scatterlist), KM_SLEEP);
sg_init_table(ABD_SCATTER(abd).abd_sgl, nr_pages);
abd_for_each_sg(abd, sg, nr_pages, i) {
struct page *p = umem_alloc_aligned(PAGESIZE, 64, KM_SLEEP);
sg_set_page(sg, p, PAGESIZE, 0);
}
ABD_SCATTER(abd).abd_nents = nr_pages;
}
void
abd_free_chunks(abd_t *abd)
{
int i, n = ABD_SCATTER(abd).abd_nents;
struct scatterlist *sg;
abd_for_each_sg(abd, sg, n, i) {
for (int j = 0; j < sg->length; j += PAGESIZE) {
struct page *p = nth_page(sg_page(sg), j >> PAGE_SHIFT);
umem_free(p, PAGESIZE);
}
}
abd_free_sg_table(abd);
}
static void
abd_alloc_zero_scatter(void)
{
unsigned nr_pages = abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE);
struct scatterlist *sg;
int i;
abd_zero_page = umem_alloc_aligned(PAGESIZE, 64, KM_SLEEP);
memset(abd_zero_page, 0, PAGESIZE);
abd_zero_scatter = abd_alloc_struct(SPA_MAXBLOCKSIZE);
abd_zero_scatter->abd_flags |= ABD_FLAG_OWNER;
abd_zero_scatter->abd_flags |= ABD_FLAG_MULTI_CHUNK | ABD_FLAG_ZEROS;
ABD_SCATTER(abd_zero_scatter).abd_offset = 0;
ABD_SCATTER(abd_zero_scatter).abd_nents = nr_pages;
abd_zero_scatter->abd_size = SPA_MAXBLOCKSIZE;
zfs_refcount_create(&abd_zero_scatter->abd_children);
ABD_SCATTER(abd_zero_scatter).abd_sgl = vmem_alloc(nr_pages *
sizeof (struct scatterlist), KM_SLEEP);
sg_init_table(ABD_SCATTER(abd_zero_scatter).abd_sgl, nr_pages);
abd_for_each_sg(abd_zero_scatter, sg, nr_pages, i) {
sg_set_page(sg, abd_zero_page, PAGESIZE, 0);
}
ABDSTAT_BUMP(abdstat_scatter_cnt);
ABDSTAT_INCR(abdstat_scatter_data_size, PAGESIZE);
ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk);
}
#endif /* _KERNEL */
boolean_t
abd_size_alloc_linear(size_t size)
{
- return (size < zfs_abd_scatter_min_size ? B_TRUE : B_FALSE);
+ return (!zfs_abd_scatter_enabled || size < zfs_abd_scatter_min_size);
}
void
abd_update_scatter_stats(abd_t *abd, abd_stats_op_t op)
{
ASSERT(op == ABDSTAT_INCR || op == ABDSTAT_DECR);
int waste = P2ROUNDUP(abd->abd_size, PAGESIZE) - abd->abd_size;
if (op == ABDSTAT_INCR) {
ABDSTAT_BUMP(abdstat_scatter_cnt);
ABDSTAT_INCR(abdstat_scatter_data_size, abd->abd_size);
ABDSTAT_INCR(abdstat_scatter_chunk_waste, waste);
arc_space_consume(waste, ARC_SPACE_ABD_CHUNK_WASTE);
} else {
ABDSTAT_BUMPDOWN(abdstat_scatter_cnt);
ABDSTAT_INCR(abdstat_scatter_data_size, -(int)abd->abd_size);
ABDSTAT_INCR(abdstat_scatter_chunk_waste, -waste);
arc_space_return(waste, ARC_SPACE_ABD_CHUNK_WASTE);
}
}
void
abd_update_linear_stats(abd_t *abd, abd_stats_op_t op)
{
ASSERT(op == ABDSTAT_INCR || op == ABDSTAT_DECR);
if (op == ABDSTAT_INCR) {
ABDSTAT_BUMP(abdstat_linear_cnt);
ABDSTAT_INCR(abdstat_linear_data_size, abd->abd_size);
} else {
ABDSTAT_BUMPDOWN(abdstat_linear_cnt);
ABDSTAT_INCR(abdstat_linear_data_size, -(int)abd->abd_size);
}
}
void
abd_verify_scatter(abd_t *abd)
{
size_t n;
int i = 0;
struct scatterlist *sg = NULL;
ASSERT3U(ABD_SCATTER(abd).abd_nents, >, 0);
ASSERT3U(ABD_SCATTER(abd).abd_offset, <,
ABD_SCATTER(abd).abd_sgl->length);
n = ABD_SCATTER(abd).abd_nents;
abd_for_each_sg(abd, sg, n, i) {
ASSERT3P(sg_page(sg), !=, NULL);
}
}
static void
abd_free_zero_scatter(void)
{
ABDSTAT_BUMPDOWN(abdstat_scatter_cnt);
ABDSTAT_INCR(abdstat_scatter_data_size, -(int)PAGESIZE);
ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_chunk);
abd_free_sg_table(abd_zero_scatter);
abd_free_struct(abd_zero_scatter);
abd_zero_scatter = NULL;
ASSERT3P(abd_zero_page, !=, NULL);
#if defined(_KERNEL)
abd_unmark_zfs_page(abd_zero_page);
__free_page(abd_zero_page);
#else
umem_free(abd_zero_page, PAGESIZE);
#endif /* _KERNEL */
}
static int
abd_kstats_update(kstat_t *ksp, int rw)
{
abd_stats_t *as = ksp->ks_data;
if (rw == KSTAT_WRITE)
return (EACCES);
as->abdstat_struct_size.value.ui64 =
wmsum_value(&abd_sums.abdstat_struct_size);
as->abdstat_linear_cnt.value.ui64 =
wmsum_value(&abd_sums.abdstat_linear_cnt);
as->abdstat_linear_data_size.value.ui64 =
wmsum_value(&abd_sums.abdstat_linear_data_size);
as->abdstat_scatter_cnt.value.ui64 =
wmsum_value(&abd_sums.abdstat_scatter_cnt);
as->abdstat_scatter_data_size.value.ui64 =
wmsum_value(&abd_sums.abdstat_scatter_data_size);
as->abdstat_scatter_chunk_waste.value.ui64 =
wmsum_value(&abd_sums.abdstat_scatter_chunk_waste);
for (int i = 0; i < MAX_ORDER; i++) {
as->abdstat_scatter_orders[i].value.ui64 =
wmsum_value(&abd_sums.abdstat_scatter_orders[i]);
}
as->abdstat_scatter_page_multi_chunk.value.ui64 =
wmsum_value(&abd_sums.abdstat_scatter_page_multi_chunk);
as->abdstat_scatter_page_multi_zone.value.ui64 =
wmsum_value(&abd_sums.abdstat_scatter_page_multi_zone);
as->abdstat_scatter_page_alloc_retry.value.ui64 =
wmsum_value(&abd_sums.abdstat_scatter_page_alloc_retry);
as->abdstat_scatter_sg_table_retry.value.ui64 =
wmsum_value(&abd_sums.abdstat_scatter_sg_table_retry);
return (0);
}
void
abd_init(void)
{
int i;
abd_cache = kmem_cache_create("abd_t", sizeof (abd_t),
0, NULL, NULL, NULL, NULL, NULL, 0);
wmsum_init(&abd_sums.abdstat_struct_size, 0);
wmsum_init(&abd_sums.abdstat_linear_cnt, 0);
wmsum_init(&abd_sums.abdstat_linear_data_size, 0);
wmsum_init(&abd_sums.abdstat_scatter_cnt, 0);
wmsum_init(&abd_sums.abdstat_scatter_data_size, 0);
wmsum_init(&abd_sums.abdstat_scatter_chunk_waste, 0);
for (i = 0; i < MAX_ORDER; i++)
wmsum_init(&abd_sums.abdstat_scatter_orders[i], 0);
wmsum_init(&abd_sums.abdstat_scatter_page_multi_chunk, 0);
wmsum_init(&abd_sums.abdstat_scatter_page_multi_zone, 0);
wmsum_init(&abd_sums.abdstat_scatter_page_alloc_retry, 0);
wmsum_init(&abd_sums.abdstat_scatter_sg_table_retry, 0);
abd_ksp = kstat_create("zfs", 0, "abdstats", "misc", KSTAT_TYPE_NAMED,
sizeof (abd_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
if (abd_ksp != NULL) {
for (i = 0; i < MAX_ORDER; i++) {
snprintf(abd_stats.abdstat_scatter_orders[i].name,
KSTAT_STRLEN, "scatter_order_%d", i);
abd_stats.abdstat_scatter_orders[i].data_type =
KSTAT_DATA_UINT64;
}
abd_ksp->ks_data = &abd_stats;
abd_ksp->ks_update = abd_kstats_update;
kstat_install(abd_ksp);
}
abd_alloc_zero_scatter();
}
void
abd_fini(void)
{
abd_free_zero_scatter();
if (abd_ksp != NULL) {
kstat_delete(abd_ksp);
abd_ksp = NULL;
}
wmsum_fini(&abd_sums.abdstat_struct_size);
wmsum_fini(&abd_sums.abdstat_linear_cnt);
wmsum_fini(&abd_sums.abdstat_linear_data_size);
wmsum_fini(&abd_sums.abdstat_scatter_cnt);
wmsum_fini(&abd_sums.abdstat_scatter_data_size);
wmsum_fini(&abd_sums.abdstat_scatter_chunk_waste);
for (int i = 0; i < MAX_ORDER; i++)
wmsum_fini(&abd_sums.abdstat_scatter_orders[i]);
wmsum_fini(&abd_sums.abdstat_scatter_page_multi_chunk);
wmsum_fini(&abd_sums.abdstat_scatter_page_multi_zone);
wmsum_fini(&abd_sums.abdstat_scatter_page_alloc_retry);
wmsum_fini(&abd_sums.abdstat_scatter_sg_table_retry);
if (abd_cache) {
kmem_cache_destroy(abd_cache);
abd_cache = NULL;
}
}
void
abd_free_linear_page(abd_t *abd)
{
/* Transform it back into a scatter ABD for freeing */
struct scatterlist *sg = abd->abd_u.abd_linear.abd_sgl;
abd->abd_flags &= ~ABD_FLAG_LINEAR;
abd->abd_flags &= ~ABD_FLAG_LINEAR_PAGE;
ABD_SCATTER(abd).abd_nents = 1;
ABD_SCATTER(abd).abd_offset = 0;
ABD_SCATTER(abd).abd_sgl = sg;
abd_free_chunks(abd);
abd_update_scatter_stats(abd, ABDSTAT_DECR);
}
/*
* If we're going to use this ABD for doing I/O using the block layer, the
* consumer of the ABD data doesn't care if it's scattered or not, and we don't
* plan to store this ABD in memory for a long period of time, we should
* allocate the ABD type that requires the least data copying to do the I/O.
*
* On Linux the optimal thing to do would be to use abd_get_offset() and
* construct a new ABD which shares the original pages thereby eliminating
* the copy. But for the moment a new linear ABD is allocated until this
* performance optimization can be implemented.
*/
abd_t *
abd_alloc_for_io(size_t size, boolean_t is_metadata)
{
return (abd_alloc(size, is_metadata));
}
abd_t *
abd_get_offset_scatter(abd_t *abd, abd_t *sabd, size_t off,
size_t size)
{
int i = 0;
struct scatterlist *sg = NULL;
abd_verify(sabd);
ASSERT3U(off, <=, sabd->abd_size);
size_t new_offset = ABD_SCATTER(sabd).abd_offset + off;
if (abd == NULL)
abd = abd_alloc_struct(0);
/*
* Even if this buf is filesystem metadata, we only track that
* if we own the underlying data buffer, which is not true in
* this case. Therefore, we don't ever use ABD_FLAG_META here.
*/
abd_for_each_sg(sabd, sg, ABD_SCATTER(sabd).abd_nents, i) {
if (new_offset < sg->length)
break;
new_offset -= sg->length;
}
ABD_SCATTER(abd).abd_sgl = sg;
ABD_SCATTER(abd).abd_offset = new_offset;
ABD_SCATTER(abd).abd_nents = ABD_SCATTER(sabd).abd_nents - i;
return (abd);
}
/*
* Initialize the abd_iter.
*/
void
abd_iter_init(struct abd_iter *aiter, abd_t *abd)
{
ASSERT(!abd_is_gang(abd));
abd_verify(abd);
aiter->iter_abd = abd;
aiter->iter_mapaddr = NULL;
aiter->iter_mapsize = 0;
aiter->iter_pos = 0;
if (abd_is_linear(abd)) {
aiter->iter_offset = 0;
aiter->iter_sg = NULL;
} else {
aiter->iter_offset = ABD_SCATTER(abd).abd_offset;
aiter->iter_sg = ABD_SCATTER(abd).abd_sgl;
}
}
/*
* This is just a helper function to see if we have exhausted the
* abd_iter and reached the end.
*/
boolean_t
abd_iter_at_end(struct abd_iter *aiter)
{
return (aiter->iter_pos == aiter->iter_abd->abd_size);
}
/*
* Advance the iterator by a certain amount. Cannot be called when a chunk is
* in use. This can be safely called when the aiter has already exhausted, in
* which case this does nothing.
*/
void
abd_iter_advance(struct abd_iter *aiter, size_t amount)
{
ASSERT3P(aiter->iter_mapaddr, ==, NULL);
ASSERT0(aiter->iter_mapsize);
/* There's nothing left to advance to, so do nothing */
if (abd_iter_at_end(aiter))
return;
aiter->iter_pos += amount;
aiter->iter_offset += amount;
if (!abd_is_linear(aiter->iter_abd)) {
while (aiter->iter_offset >= aiter->iter_sg->length) {
aiter->iter_offset -= aiter->iter_sg->length;
aiter->iter_sg = sg_next(aiter->iter_sg);
if (aiter->iter_sg == NULL) {
ASSERT0(aiter->iter_offset);
break;
}
}
}
}
/*
* Map the current chunk into aiter. This can be safely called when the aiter
* has already exhausted, in which case this does nothing.
*/
void
abd_iter_map(struct abd_iter *aiter)
{
void *paddr;
size_t offset = 0;
ASSERT3P(aiter->iter_mapaddr, ==, NULL);
ASSERT0(aiter->iter_mapsize);
/* There's nothing left to iterate over, so do nothing */
if (abd_iter_at_end(aiter))
return;
if (abd_is_linear(aiter->iter_abd)) {
ASSERT3U(aiter->iter_pos, ==, aiter->iter_offset);
offset = aiter->iter_offset;
aiter->iter_mapsize = aiter->iter_abd->abd_size - offset;
paddr = ABD_LINEAR_BUF(aiter->iter_abd);
} else {
offset = aiter->iter_offset;
aiter->iter_mapsize = MIN(aiter->iter_sg->length - offset,
aiter->iter_abd->abd_size - aiter->iter_pos);
paddr = zfs_kmap_atomic(sg_page(aiter->iter_sg));
}
aiter->iter_mapaddr = (char *)paddr + offset;
}
/*
* Unmap the current chunk from aiter. This can be safely called when the aiter
* has already exhausted, in which case this does nothing.
*/
void
abd_iter_unmap(struct abd_iter *aiter)
{
/* There's nothing left to unmap, so do nothing */
if (abd_iter_at_end(aiter))
return;
if (!abd_is_linear(aiter->iter_abd)) {
/* LINTED E_FUNC_SET_NOT_USED */
zfs_kunmap_atomic(aiter->iter_mapaddr - aiter->iter_offset);
}
ASSERT3P(aiter->iter_mapaddr, !=, NULL);
ASSERT3U(aiter->iter_mapsize, >, 0);
aiter->iter_mapaddr = NULL;
aiter->iter_mapsize = 0;
}
void
abd_cache_reap_now(void)
{
}
#if defined(_KERNEL)
/*
* bio_nr_pages for ABD.
* @off is the offset in @abd
*/
unsigned long
abd_nr_pages_off(abd_t *abd, unsigned int size, size_t off)
{
unsigned long pos;
if (abd_is_gang(abd)) {
unsigned long count = 0;
for (abd_t *cabd = abd_gang_get_offset(abd, &off);
cabd != NULL && size != 0;
cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
ASSERT3U(off, <, cabd->abd_size);
int mysize = MIN(size, cabd->abd_size - off);
count += abd_nr_pages_off(cabd, mysize, off);
size -= mysize;
off = 0;
}
return (count);
}
if (abd_is_linear(abd))
pos = (unsigned long)abd_to_buf(abd) + off;
else
pos = ABD_SCATTER(abd).abd_offset + off;
return (((pos + size + PAGESIZE - 1) >> PAGE_SHIFT) -
(pos >> PAGE_SHIFT));
}
static unsigned int
bio_map(struct bio *bio, void *buf_ptr, unsigned int bio_size)
{
unsigned int offset, size, i;
struct page *page;
offset = offset_in_page(buf_ptr);
for (i = 0; i < bio->bi_max_vecs; i++) {
size = PAGE_SIZE - offset;
if (bio_size <= 0)
break;
if (size > bio_size)
size = bio_size;
if (is_vmalloc_addr(buf_ptr))
page = vmalloc_to_page(buf_ptr);
else
page = virt_to_page(buf_ptr);
/*
* Some network related block device uses tcp_sendpage, which
* doesn't behave well when using 0-count page, this is a
* safety net to catch them.
*/
ASSERT3S(page_count(page), >, 0);
if (bio_add_page(bio, page, size, offset) != size)
break;
buf_ptr += size;
bio_size -= size;
offset = 0;
}
return (bio_size);
}
/*
* bio_map for gang ABD.
*/
static unsigned int
abd_gang_bio_map_off(struct bio *bio, abd_t *abd,
unsigned int io_size, size_t off)
{
ASSERT(abd_is_gang(abd));
for (abd_t *cabd = abd_gang_get_offset(abd, &off);
cabd != NULL;
cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
ASSERT3U(off, <, cabd->abd_size);
int size = MIN(io_size, cabd->abd_size - off);
int remainder = abd_bio_map_off(bio, cabd, size, off);
io_size -= (size - remainder);
if (io_size == 0 || remainder > 0)
return (io_size);
off = 0;
}
ASSERT0(io_size);
return (io_size);
}
/*
* bio_map for ABD.
* @off is the offset in @abd
* Remaining IO size is returned
*/
unsigned int
abd_bio_map_off(struct bio *bio, abd_t *abd,
unsigned int io_size, size_t off)
{
struct abd_iter aiter;
ASSERT3U(io_size, <=, abd->abd_size - off);
if (abd_is_linear(abd))
return (bio_map(bio, ((char *)abd_to_buf(abd)) + off, io_size));
ASSERT(!abd_is_linear(abd));
if (abd_is_gang(abd))
return (abd_gang_bio_map_off(bio, abd, io_size, off));
abd_iter_init(&aiter, abd);
abd_iter_advance(&aiter, off);
for (int i = 0; i < bio->bi_max_vecs; i++) {
struct page *pg;
size_t len, sgoff, pgoff;
struct scatterlist *sg;
if (io_size <= 0)
break;
sg = aiter.iter_sg;
sgoff = aiter.iter_offset;
pgoff = sgoff & (PAGESIZE - 1);
len = MIN(io_size, PAGESIZE - pgoff);
ASSERT(len > 0);
pg = nth_page(sg_page(sg), sgoff >> PAGE_SHIFT);
if (bio_add_page(bio, pg, len, pgoff) != len)
break;
io_size -= len;
abd_iter_advance(&aiter, len);
}
return (io_size);
}
/* Tunable Parameters */
module_param(zfs_abd_scatter_enabled, int, 0644);
MODULE_PARM_DESC(zfs_abd_scatter_enabled,
"Toggle whether ABD allocations must be linear.");
module_param(zfs_abd_scatter_min_size, int, 0644);
MODULE_PARM_DESC(zfs_abd_scatter_min_size,
"Minimum size of scatter allocations.");
/* CSTYLED */
module_param(zfs_abd_scatter_max_order, uint, 0644);
MODULE_PARM_DESC(zfs_abd_scatter_max_order,
"Maximum order allocation used for a scatter ABD.");
#endif
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/qat_compress.c b/sys/contrib/openzfs/module/os/linux/zfs/qat_compress.c
index ad3ead3b16e3..1d099c95bc7c 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/qat_compress.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/qat_compress.c
@@ -1,569 +1,550 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
#if defined(_KERNEL) && defined(HAVE_QAT)
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
#include <linux/completion.h>
#include <sys/zfs_context.h>
#include <sys/byteorder.h>
#include <sys/zio.h>
#include <sys/qat.h>
/*
* Max instances in a QAT device, each instance is a channel to submit
* jobs to QAT hardware, this is only for pre-allocating instance and
* session arrays; the actual number of instances are defined in the
* QAT driver's configuration file.
*/
#define QAT_DC_MAX_INSTANCES 48
/*
* ZLIB head and foot size
*/
#define ZLIB_HEAD_SZ 2
#define ZLIB_FOOT_SZ 4
static CpaInstanceHandle dc_inst_handles[QAT_DC_MAX_INSTANCES];
static CpaDcSessionHandle session_handles[QAT_DC_MAX_INSTANCES];
static CpaBufferList **buffer_array[QAT_DC_MAX_INSTANCES];
static Cpa16U num_inst = 0;
static Cpa32U inst_num = 0;
static boolean_t qat_dc_init_done = B_FALSE;
int zfs_qat_compress_disable = 0;
boolean_t
qat_dc_use_accel(size_t s_len)
{
return (!zfs_qat_compress_disable &&
qat_dc_init_done &&
s_len >= QAT_MIN_BUF_SIZE &&
s_len <= QAT_MAX_BUF_SIZE);
}
static void
qat_dc_callback(void *p_callback, CpaStatus status)
{
if (p_callback != NULL)
complete((struct completion *)p_callback);
}
static void
qat_dc_clean(void)
{
Cpa16U buff_num = 0;
Cpa16U num_inter_buff_lists = 0;
for (Cpa16U i = 0; i < num_inst; i++) {
cpaDcStopInstance(dc_inst_handles[i]);
QAT_PHYS_CONTIG_FREE(session_handles[i]);
/* free intermediate buffers */
if (buffer_array[i] != NULL) {
cpaDcGetNumIntermediateBuffers(
dc_inst_handles[i], &num_inter_buff_lists);
for (buff_num = 0; buff_num < num_inter_buff_lists;
buff_num++) {
CpaBufferList *buffer_inter =
buffer_array[i][buff_num];
if (buffer_inter->pBuffers) {
QAT_PHYS_CONTIG_FREE(
buffer_inter->pBuffers->pData);
QAT_PHYS_CONTIG_FREE(
buffer_inter->pBuffers);
}
QAT_PHYS_CONTIG_FREE(
buffer_inter->pPrivateMetaData);
QAT_PHYS_CONTIG_FREE(buffer_inter);
}
}
}
num_inst = 0;
qat_dc_init_done = B_FALSE;
}
int
qat_dc_init(void)
{
CpaStatus status = CPA_STATUS_SUCCESS;
Cpa32U sess_size = 0;
Cpa32U ctx_size = 0;
Cpa16U num_inter_buff_lists = 0;
Cpa16U buff_num = 0;
Cpa32U buff_meta_size = 0;
CpaDcSessionSetupData sd = {0};
if (qat_dc_init_done)
return (0);
status = cpaDcGetNumInstances(&num_inst);
if (status != CPA_STATUS_SUCCESS)
return (-1);
/* if the user has configured no QAT compression units just return */
if (num_inst == 0)
return (0);
if (num_inst > QAT_DC_MAX_INSTANCES)
num_inst = QAT_DC_MAX_INSTANCES;
status = cpaDcGetInstances(num_inst, &dc_inst_handles[0]);
if (status != CPA_STATUS_SUCCESS)
return (-1);
for (Cpa16U i = 0; i < num_inst; i++) {
cpaDcSetAddressTranslation(dc_inst_handles[i],
(void*)virt_to_phys);
status = cpaDcBufferListGetMetaSize(dc_inst_handles[i],
1, &buff_meta_size);
if (status == CPA_STATUS_SUCCESS)
status = cpaDcGetNumIntermediateBuffers(
dc_inst_handles[i], &num_inter_buff_lists);
if (status == CPA_STATUS_SUCCESS && num_inter_buff_lists != 0)
status = QAT_PHYS_CONTIG_ALLOC(&buffer_array[i],
num_inter_buff_lists *
sizeof (CpaBufferList *));
for (buff_num = 0; buff_num < num_inter_buff_lists;
buff_num++) {
if (status == CPA_STATUS_SUCCESS)
status = QAT_PHYS_CONTIG_ALLOC(
&buffer_array[i][buff_num],
sizeof (CpaBufferList));
if (status == CPA_STATUS_SUCCESS)
status = QAT_PHYS_CONTIG_ALLOC(
&buffer_array[i][buff_num]->
pPrivateMetaData,
buff_meta_size);
if (status == CPA_STATUS_SUCCESS)
status = QAT_PHYS_CONTIG_ALLOC(
&buffer_array[i][buff_num]->pBuffers,
sizeof (CpaFlatBuffer));
if (status == CPA_STATUS_SUCCESS) {
/*
* implementation requires an intermediate
* buffer approximately twice the size of
* output buffer, which is 2x max buffer
* size here.
*/
status = QAT_PHYS_CONTIG_ALLOC(
&buffer_array[i][buff_num]->pBuffers->
pData, 2 * QAT_MAX_BUF_SIZE);
if (status != CPA_STATUS_SUCCESS)
goto fail;
buffer_array[i][buff_num]->numBuffers = 1;
buffer_array[i][buff_num]->pBuffers->
dataLenInBytes = 2 * QAT_MAX_BUF_SIZE;
}
}
status = cpaDcStartInstance(dc_inst_handles[i],
num_inter_buff_lists, buffer_array[i]);
if (status != CPA_STATUS_SUCCESS)
goto fail;
sd.compLevel = CPA_DC_L1;
sd.compType = CPA_DC_DEFLATE;
sd.huffType = CPA_DC_HT_FULL_DYNAMIC;
sd.sessDirection = CPA_DC_DIR_COMBINED;
sd.sessState = CPA_DC_STATELESS;
sd.deflateWindowSize = 7;
sd.checksum = CPA_DC_ADLER32;
status = cpaDcGetSessionSize(dc_inst_handles[i],
&sd, &sess_size, &ctx_size);
if (status != CPA_STATUS_SUCCESS)
goto fail;
QAT_PHYS_CONTIG_ALLOC(&session_handles[i], sess_size);
if (session_handles[i] == NULL)
goto fail;
status = cpaDcInitSession(dc_inst_handles[i],
session_handles[i],
&sd, NULL, qat_dc_callback);
if (status != CPA_STATUS_SUCCESS)
goto fail;
}
qat_dc_init_done = B_TRUE;
return (0);
fail:
qat_dc_clean();
return (-1);
}
void
qat_dc_fini(void)
{
if (!qat_dc_init_done)
return;
qat_dc_clean();
}
/*
* The "add" parameter is an additional buffer which is passed
* to QAT as a scratch buffer alongside the destination buffer
* in case the "compressed" data ends up being larger than the
* original source data. This is necessary to prevent QAT from
* generating buffer overflow warnings for incompressible data.
*/
static int
qat_compress_impl(qat_compress_dir_t dir, char *src, int src_len,
char *dst, int dst_len, char *add, int add_len, size_t *c_len)
{
CpaInstanceHandle dc_inst_handle;
CpaDcSessionHandle session_handle;
CpaBufferList *buf_list_src = NULL;
CpaBufferList *buf_list_dst = NULL;
CpaFlatBuffer *flat_buf_src = NULL;
CpaFlatBuffer *flat_buf_dst = NULL;
Cpa8U *buffer_meta_src = NULL;
Cpa8U *buffer_meta_dst = NULL;
Cpa32U buffer_meta_size = 0;
CpaDcRqResults dc_results;
CpaStatus status = CPA_STATUS_FAIL;
Cpa32U hdr_sz = 0;
Cpa32U compressed_sz;
Cpa32U num_src_buf = (src_len >> PAGE_SHIFT) + 2;
Cpa32U num_dst_buf = (dst_len >> PAGE_SHIFT) + 2;
Cpa32U num_add_buf = (add_len >> PAGE_SHIFT) + 2;
Cpa32U bytes_left;
Cpa32U dst_pages = 0;
Cpa32U adler32 = 0;
char *data;
struct page *page;
struct page **in_pages = NULL;
struct page **out_pages = NULL;
struct page **add_pages = NULL;
Cpa32U page_off = 0;
struct completion complete;
Cpa32U page_num = 0;
Cpa16U i;
/*
* We increment num_src_buf and num_dst_buf by 2 to allow
* us to handle non page-aligned buffer addresses and buffers
* whose sizes are not divisible by PAGE_SIZE.
*/
Cpa32U src_buffer_list_mem_size = sizeof (CpaBufferList) +
(num_src_buf * sizeof (CpaFlatBuffer));
Cpa32U dst_buffer_list_mem_size = sizeof (CpaBufferList) +
((num_dst_buf + num_add_buf) * sizeof (CpaFlatBuffer));
status = QAT_PHYS_CONTIG_ALLOC(&in_pages,
num_src_buf * sizeof (struct page *));
if (status != CPA_STATUS_SUCCESS)
goto fail;
status = QAT_PHYS_CONTIG_ALLOC(&out_pages,
num_dst_buf * sizeof (struct page *));
if (status != CPA_STATUS_SUCCESS)
goto fail;
status = QAT_PHYS_CONTIG_ALLOC(&add_pages,
num_add_buf * sizeof (struct page *));
if (status != CPA_STATUS_SUCCESS)
goto fail;
i = (Cpa32U)atomic_inc_32_nv(&inst_num) % num_inst;
dc_inst_handle = dc_inst_handles[i];
session_handle = session_handles[i];
cpaDcBufferListGetMetaSize(dc_inst_handle, num_src_buf,
&buffer_meta_size);
status = QAT_PHYS_CONTIG_ALLOC(&buffer_meta_src, buffer_meta_size);
if (status != CPA_STATUS_SUCCESS)
goto fail;
cpaDcBufferListGetMetaSize(dc_inst_handle, num_dst_buf + num_add_buf,
&buffer_meta_size);
status = QAT_PHYS_CONTIG_ALLOC(&buffer_meta_dst, buffer_meta_size);
if (status != CPA_STATUS_SUCCESS)
goto fail;
/* build source buffer list */
status = QAT_PHYS_CONTIG_ALLOC(&buf_list_src, src_buffer_list_mem_size);
if (status != CPA_STATUS_SUCCESS)
goto fail;
flat_buf_src = (CpaFlatBuffer *)(buf_list_src + 1);
buf_list_src->pBuffers = flat_buf_src; /* always point to first one */
/* build destination buffer list */
status = QAT_PHYS_CONTIG_ALLOC(&buf_list_dst, dst_buffer_list_mem_size);
if (status != CPA_STATUS_SUCCESS)
goto fail;
flat_buf_dst = (CpaFlatBuffer *)(buf_list_dst + 1);
buf_list_dst->pBuffers = flat_buf_dst; /* always point to first one */
buf_list_src->numBuffers = 0;
buf_list_src->pPrivateMetaData = buffer_meta_src;
bytes_left = src_len;
data = src;
page_num = 0;
while (bytes_left > 0) {
page_off = ((long)data & ~PAGE_MASK);
page = qat_mem_to_page(data);
in_pages[page_num] = page;
flat_buf_src->pData = kmap(page) + page_off;
flat_buf_src->dataLenInBytes =
min((long)PAGE_SIZE - page_off, (long)bytes_left);
bytes_left -= flat_buf_src->dataLenInBytes;
data += flat_buf_src->dataLenInBytes;
flat_buf_src++;
buf_list_src->numBuffers++;
page_num++;
}
buf_list_dst->numBuffers = 0;
buf_list_dst->pPrivateMetaData = buffer_meta_dst;
bytes_left = dst_len;
data = dst;
page_num = 0;
while (bytes_left > 0) {
page_off = ((long)data & ~PAGE_MASK);
page = qat_mem_to_page(data);
flat_buf_dst->pData = kmap(page) + page_off;
out_pages[page_num] = page;
flat_buf_dst->dataLenInBytes =
min((long)PAGE_SIZE - page_off, (long)bytes_left);
bytes_left -= flat_buf_dst->dataLenInBytes;
data += flat_buf_dst->dataLenInBytes;
flat_buf_dst++;
buf_list_dst->numBuffers++;
page_num++;
dst_pages++;
}
/* map additional scratch pages into the destination buffer list */
bytes_left = add_len;
data = add;
page_num = 0;
while (bytes_left > 0) {
page_off = ((long)data & ~PAGE_MASK);
page = qat_mem_to_page(data);
flat_buf_dst->pData = kmap(page) + page_off;
add_pages[page_num] = page;
flat_buf_dst->dataLenInBytes =
min((long)PAGE_SIZE - page_off, (long)bytes_left);
bytes_left -= flat_buf_dst->dataLenInBytes;
data += flat_buf_dst->dataLenInBytes;
flat_buf_dst++;
buf_list_dst->numBuffers++;
page_num++;
}
init_completion(&complete);
if (dir == QAT_COMPRESS) {
QAT_STAT_BUMP(comp_requests);
QAT_STAT_INCR(comp_total_in_bytes, src_len);
cpaDcGenerateHeader(session_handle,
buf_list_dst->pBuffers, &hdr_sz);
buf_list_dst->pBuffers->pData += hdr_sz;
buf_list_dst->pBuffers->dataLenInBytes -= hdr_sz;
status = cpaDcCompressData(
dc_inst_handle, session_handle,
buf_list_src, buf_list_dst,
&dc_results, CPA_DC_FLUSH_FINAL,
&complete);
if (status != CPA_STATUS_SUCCESS) {
goto fail;
}
/* we now wait until the completion of the operation. */
wait_for_completion(&complete);
if (dc_results.status != CPA_STATUS_SUCCESS) {
status = CPA_STATUS_FAIL;
goto fail;
}
compressed_sz = dc_results.produced;
if (compressed_sz + hdr_sz + ZLIB_FOOT_SZ > dst_len) {
status = CPA_STATUS_INCOMPRESSIBLE;
goto fail;
}
- flat_buf_dst = (CpaFlatBuffer *)(buf_list_dst + 1);
- /* move to the last page */
- flat_buf_dst += (compressed_sz + hdr_sz) >> PAGE_SHIFT;
+ /* get adler32 checksum and append footer */
+ *(Cpa32U*)(dst + hdr_sz + compressed_sz) =
+ BSWAP_32(dc_results.checksum);
- /* no space for gzip footer in the last page */
- if (((compressed_sz + hdr_sz) % PAGE_SIZE)
- + ZLIB_FOOT_SZ > PAGE_SIZE) {
- status = CPA_STATUS_INCOMPRESSIBLE;
- goto fail;
- }
-
- /* jump to the end of the buffer and append footer */
- flat_buf_dst->pData =
- (char *)((unsigned long)flat_buf_dst->pData & PAGE_MASK)
- + ((compressed_sz + hdr_sz) % PAGE_SIZE);
- flat_buf_dst->dataLenInBytes = ZLIB_FOOT_SZ;
-
- dc_results.produced = 0;
- status = cpaDcGenerateFooter(session_handle,
- flat_buf_dst, &dc_results);
- if (status != CPA_STATUS_SUCCESS)
- goto fail;
-
- *c_len = compressed_sz + dc_results.produced + hdr_sz;
+ *c_len = hdr_sz + compressed_sz + ZLIB_FOOT_SZ;
QAT_STAT_INCR(comp_total_out_bytes, *c_len);
} else {
ASSERT3U(dir, ==, QAT_DECOMPRESS);
QAT_STAT_BUMP(decomp_requests);
QAT_STAT_INCR(decomp_total_in_bytes, src_len);
buf_list_src->pBuffers->pData += ZLIB_HEAD_SZ;
buf_list_src->pBuffers->dataLenInBytes -= ZLIB_HEAD_SZ;
status = cpaDcDecompressData(dc_inst_handle, session_handle,
buf_list_src, buf_list_dst, &dc_results, CPA_DC_FLUSH_FINAL,
&complete);
if (CPA_STATUS_SUCCESS != status) {
status = CPA_STATUS_FAIL;
goto fail;
}
/* we now wait until the completion of the operation. */
wait_for_completion(&complete);
if (dc_results.status != CPA_STATUS_SUCCESS) {
status = CPA_STATUS_FAIL;
goto fail;
}
/* verify adler checksum */
adler32 = *(Cpa32U *)(src + dc_results.consumed + ZLIB_HEAD_SZ);
if (adler32 != BSWAP_32(dc_results.checksum)) {
status = CPA_STATUS_FAIL;
goto fail;
}
*c_len = dc_results.produced;
QAT_STAT_INCR(decomp_total_out_bytes, *c_len);
}
fail:
if (status != CPA_STATUS_SUCCESS && status != CPA_STATUS_INCOMPRESSIBLE)
QAT_STAT_BUMP(dc_fails);
if (in_pages) {
for (page_num = 0;
page_num < buf_list_src->numBuffers;
page_num++) {
kunmap(in_pages[page_num]);
}
QAT_PHYS_CONTIG_FREE(in_pages);
}
if (out_pages) {
for (page_num = 0; page_num < dst_pages; page_num++) {
kunmap(out_pages[page_num]);
}
QAT_PHYS_CONTIG_FREE(out_pages);
}
if (add_pages) {
for (page_num = 0;
page_num < buf_list_dst->numBuffers - dst_pages;
page_num++) {
kunmap(add_pages[page_num]);
}
QAT_PHYS_CONTIG_FREE(add_pages);
}
QAT_PHYS_CONTIG_FREE(buffer_meta_src);
QAT_PHYS_CONTIG_FREE(buffer_meta_dst);
QAT_PHYS_CONTIG_FREE(buf_list_src);
QAT_PHYS_CONTIG_FREE(buf_list_dst);
return (status);
}
/*
* Entry point for QAT accelerated compression / decompression.
*/
int
qat_compress(qat_compress_dir_t dir, char *src, int src_len,
char *dst, int dst_len, size_t *c_len)
{
int ret;
size_t add_len = 0;
void *add = NULL;
if (dir == QAT_COMPRESS) {
add_len = dst_len;
add = zio_data_buf_alloc(add_len);
}
ret = qat_compress_impl(dir, src, src_len, dst,
dst_len, add, add_len, c_len);
if (dir == QAT_COMPRESS)
zio_data_buf_free(add, add_len);
return (ret);
}
static int
param_set_qat_compress(const char *val, zfs_kernel_param_t *kp)
{
int ret;
int *pvalue = kp->arg;
ret = param_set_int(val, kp);
if (ret)
return (ret);
/*
* zfs_qat_compress_disable = 0: enable qat compress
* try to initialize qat instance if it has not been done
*/
if (*pvalue == 0 && !qat_dc_init_done) {
ret = qat_dc_init();
if (ret != 0) {
zfs_qat_compress_disable = 1;
return (ret);
}
}
return (ret);
}
module_param_call(zfs_qat_compress_disable, param_set_qat_compress,
param_get_int, &zfs_qat_compress_disable, 0644);
MODULE_PARM_DESC(zfs_qat_compress_disable, "Enable/Disable QAT compression");
#endif
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/vdev_disk.c b/sys/contrib/openzfs/module/os/linux/zfs/vdev_disk.c
index c56fd3a6ff21..59d062ebe2a6 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/vdev_disk.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/vdev_disk.c
@@ -1,928 +1,930 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>.
* LLNL-CODE-403049.
* Copyright (c) 2012, 2019 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/spa_impl.h>
#include <sys/vdev_disk.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_trim.h>
#include <sys/abd.h>
#include <sys/fs/zfs.h>
#include <sys/zio.h>
#include <linux/blkpg.h>
#include <linux/msdos_fs.h>
#include <linux/vfs_compat.h>
typedef struct vdev_disk {
struct block_device *vd_bdev;
krwlock_t vd_lock;
} vdev_disk_t;
/*
* Unique identifier for the exclusive vdev holder.
*/
static void *zfs_vdev_holder = VDEV_HOLDER;
/*
* Wait up to zfs_vdev_open_timeout_ms milliseconds before determining the
* device is missing. The missing path may be transient since the links
* can be briefly removed and recreated in response to udev events.
*/
static unsigned zfs_vdev_open_timeout_ms = 1000;
/*
* Size of the "reserved" partition, in blocks.
*/
#define EFI_MIN_RESV_SIZE (16 * 1024)
/*
* Virtual device vector for disks.
*/
typedef struct dio_request {
zio_t *dr_zio; /* Parent ZIO */
atomic_t dr_ref; /* References */
int dr_error; /* Bio error */
int dr_bio_count; /* Count of bio's */
struct bio *dr_bio[0]; /* Attached bio's */
} dio_request_t;
static fmode_t
vdev_bdev_mode(spa_mode_t spa_mode)
{
fmode_t mode = 0;
if (spa_mode & SPA_MODE_READ)
mode |= FMODE_READ;
if (spa_mode & SPA_MODE_WRITE)
mode |= FMODE_WRITE;
return (mode);
}
/*
* Returns the usable capacity (in bytes) for the partition or disk.
*/
static uint64_t
bdev_capacity(struct block_device *bdev)
{
return (i_size_read(bdev->bd_inode));
}
#if !defined(HAVE_BDEV_WHOLE)
static inline struct block_device *
bdev_whole(struct block_device *bdev)
{
return (bdev->bd_contains);
}
#endif
/*
* Returns the maximum expansion capacity of the block device (in bytes).
*
* It is possible to expand a vdev when it has been created as a wholedisk
* and the containing block device has increased in capacity. Or when the
* partition containing the pool has been manually increased in size.
*
* This function is only responsible for calculating the potential expansion
* size so it can be reported by 'zpool list'. The efi_use_whole_disk() is
* responsible for verifying the expected partition layout in the wholedisk
* case, and updating the partition table if appropriate. Once the partition
* size has been increased the additional capacity will be visible using
* bdev_capacity().
*
* The returned maximum expansion capacity is always expected to be larger, or
* at the very least equal, to its usable capacity to prevent overestimating
* the pool expandsize.
*/
static uint64_t
bdev_max_capacity(struct block_device *bdev, uint64_t wholedisk)
{
uint64_t psize;
int64_t available;
if (wholedisk && bdev != bdev_whole(bdev)) {
/*
* When reporting maximum expansion capacity for a wholedisk
* deduct any capacity which is expected to be lost due to
* alignment restrictions. Over reporting this value isn't
* harmful and would only result in slightly less capacity
* than expected post expansion.
* The estimated available space may be slightly smaller than
* bdev_capacity() for devices where the number of sectors is
* not a multiple of the alignment size and the partition layout
* is keeping less than PARTITION_END_ALIGNMENT bytes after the
* "reserved" EFI partition: in such cases return the device
* usable capacity.
*/
available = i_size_read(bdev_whole(bdev)->bd_inode) -
((EFI_MIN_RESV_SIZE + NEW_START_BLOCK +
PARTITION_END_ALIGNMENT) << SECTOR_BITS);
psize = MAX(available, bdev_capacity(bdev));
} else {
psize = bdev_capacity(bdev);
}
return (psize);
}
static void
vdev_disk_error(zio_t *zio)
{
/*
* This function can be called in interrupt context, for instance while
* handling IRQs coming from a misbehaving disk device; use printk()
* which is safe from any context.
*/
printk(KERN_WARNING "zio pool=%s vdev=%s error=%d type=%d "
"offset=%llu size=%llu flags=%x\n", spa_name(zio->io_spa),
zio->io_vd->vdev_path, zio->io_error, zio->io_type,
(u_longlong_t)zio->io_offset, (u_longlong_t)zio->io_size,
zio->io_flags);
}
static int
vdev_disk_open(vdev_t *v, uint64_t *psize, uint64_t *max_psize,
uint64_t *logical_ashift, uint64_t *physical_ashift)
{
struct block_device *bdev;
fmode_t mode = vdev_bdev_mode(spa_mode(v->vdev_spa));
hrtime_t timeout = MSEC2NSEC(zfs_vdev_open_timeout_ms);
vdev_disk_t *vd;
/* Must have a pathname and it must be absolute. */
if (v->vdev_path == NULL || v->vdev_path[0] != '/') {
v->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
vdev_dbgmsg(v, "invalid vdev_path");
return (SET_ERROR(EINVAL));
}
/*
* Reopen the device if it is currently open. When expanding a
* partition force re-scanning the partition table if userland
* did not take care of this already. We need to do this while closed
* in order to get an accurate updated block device size. Then
* since udev may need to recreate the device links increase the
* open retry timeout before reporting the device as unavailable.
*/
vd = v->vdev_tsd;
if (vd) {
char disk_name[BDEVNAME_SIZE + 6] = "/dev/";
boolean_t reread_part = B_FALSE;
rw_enter(&vd->vd_lock, RW_WRITER);
bdev = vd->vd_bdev;
vd->vd_bdev = NULL;
if (bdev) {
if (v->vdev_expanding && bdev != bdev_whole(bdev)) {
bdevname(bdev_whole(bdev), disk_name + 5);
/*
* If userland has BLKPG_RESIZE_PARTITION,
* then it should have updated the partition
* table already. We can detect this by
* comparing our current physical size
* with that of the device. If they are
* the same, then we must not have
* BLKPG_RESIZE_PARTITION or it failed to
* update the partition table online. We
* fallback to rescanning the partition
* table from the kernel below. However,
* if the capacity already reflects the
* updated partition, then we skip
* rescanning the partition table here.
*/
if (v->vdev_psize == bdev_capacity(bdev))
reread_part = B_TRUE;
}
blkdev_put(bdev, mode | FMODE_EXCL);
}
if (reread_part) {
bdev = blkdev_get_by_path(disk_name, mode | FMODE_EXCL,
zfs_vdev_holder);
if (!IS_ERR(bdev)) {
int error = vdev_bdev_reread_part(bdev);
blkdev_put(bdev, mode | FMODE_EXCL);
if (error == 0) {
timeout = MSEC2NSEC(
zfs_vdev_open_timeout_ms * 2);
}
}
}
} else {
vd = kmem_zalloc(sizeof (vdev_disk_t), KM_SLEEP);
rw_init(&vd->vd_lock, NULL, RW_DEFAULT, NULL);
rw_enter(&vd->vd_lock, RW_WRITER);
}
/*
* Devices are always opened by the path provided at configuration
* time. This means that if the provided path is a udev by-id path
* then drives may be re-cabled without an issue. If the provided
* path is a udev by-path path, then the physical location information
* will be preserved. This can be critical for more complicated
* configurations where drives are located in specific physical
* locations to maximize the systems tolerance to component failure.
*
* Alternatively, you can provide your own udev rule to flexibly map
* the drives as you see fit. It is not advised that you use the
* /dev/[hd]d devices which may be reordered due to probing order.
* Devices in the wrong locations will be detected by the higher
* level vdev validation.
*
* The specified paths may be briefly removed and recreated in
* response to udev events. This should be exceptionally unlikely
* because the zpool command makes every effort to verify these paths
* have already settled prior to reaching this point. Therefore,
* a ENOENT failure at this point is highly likely to be transient
* and it is reasonable to sleep and retry before giving up. In
* practice delays have been observed to be on the order of 100ms.
*/
hrtime_t start = gethrtime();
bdev = ERR_PTR(-ENXIO);
while (IS_ERR(bdev) && ((gethrtime() - start) < timeout)) {
bdev = blkdev_get_by_path(v->vdev_path, mode | FMODE_EXCL,
zfs_vdev_holder);
if (unlikely(PTR_ERR(bdev) == -ENOENT)) {
schedule_timeout(MSEC_TO_TICK(10));
} else if (IS_ERR(bdev)) {
break;
}
}
if (IS_ERR(bdev)) {
int error = -PTR_ERR(bdev);
vdev_dbgmsg(v, "open error=%d timeout=%llu/%llu", error,
(u_longlong_t)(gethrtime() - start),
(u_longlong_t)timeout);
vd->vd_bdev = NULL;
v->vdev_tsd = vd;
rw_exit(&vd->vd_lock);
return (SET_ERROR(error));
} else {
vd->vd_bdev = bdev;
v->vdev_tsd = vd;
rw_exit(&vd->vd_lock);
}
struct request_queue *q = bdev_get_queue(vd->vd_bdev);
/* Determine the physical block size */
int physical_block_size = bdev_physical_block_size(vd->vd_bdev);
/* Determine the logical block size */
int logical_block_size = bdev_logical_block_size(vd->vd_bdev);
/* Clear the nowritecache bit, causes vdev_reopen() to try again. */
v->vdev_nowritecache = B_FALSE;
/* Set when device reports it supports TRIM. */
v->vdev_has_trim = !!blk_queue_discard(q);
/* Set when device reports it supports secure TRIM. */
v->vdev_has_securetrim = !!blk_queue_discard_secure(q);
/* Inform the ZIO pipeline that we are non-rotational */
v->vdev_nonrot = blk_queue_nonrot(q);
/* Physical volume size in bytes for the partition */
*psize = bdev_capacity(vd->vd_bdev);
/* Physical volume size in bytes including possible expansion space */
*max_psize = bdev_max_capacity(vd->vd_bdev, v->vdev_wholedisk);
/* Based on the minimum sector size set the block size */
*physical_ashift = highbit64(MAX(physical_block_size,
SPA_MINBLOCKSIZE)) - 1;
*logical_ashift = highbit64(MAX(logical_block_size,
SPA_MINBLOCKSIZE)) - 1;
return (0);
}
static void
vdev_disk_close(vdev_t *v)
{
vdev_disk_t *vd = v->vdev_tsd;
if (v->vdev_reopening || vd == NULL)
return;
if (vd->vd_bdev != NULL) {
blkdev_put(vd->vd_bdev,
vdev_bdev_mode(spa_mode(v->vdev_spa)) | FMODE_EXCL);
}
rw_destroy(&vd->vd_lock);
kmem_free(vd, sizeof (vdev_disk_t));
v->vdev_tsd = NULL;
}
static dio_request_t *
vdev_disk_dio_alloc(int bio_count)
{
dio_request_t *dr = kmem_zalloc(sizeof (dio_request_t) +
sizeof (struct bio *) * bio_count, KM_SLEEP);
atomic_set(&dr->dr_ref, 0);
dr->dr_bio_count = bio_count;
dr->dr_error = 0;
for (int i = 0; i < dr->dr_bio_count; i++)
dr->dr_bio[i] = NULL;
return (dr);
}
static void
vdev_disk_dio_free(dio_request_t *dr)
{
int i;
for (i = 0; i < dr->dr_bio_count; i++)
if (dr->dr_bio[i])
bio_put(dr->dr_bio[i]);
kmem_free(dr, sizeof (dio_request_t) +
sizeof (struct bio *) * dr->dr_bio_count);
}
static void
vdev_disk_dio_get(dio_request_t *dr)
{
atomic_inc(&dr->dr_ref);
}
static int
vdev_disk_dio_put(dio_request_t *dr)
{
int rc = atomic_dec_return(&dr->dr_ref);
/*
* Free the dio_request when the last reference is dropped and
* ensure zio_interpret is called only once with the correct zio
*/
if (rc == 0) {
zio_t *zio = dr->dr_zio;
int error = dr->dr_error;
vdev_disk_dio_free(dr);
if (zio) {
zio->io_error = error;
ASSERT3S(zio->io_error, >=, 0);
if (zio->io_error)
vdev_disk_error(zio);
zio_delay_interrupt(zio);
}
}
return (rc);
}
BIO_END_IO_PROTO(vdev_disk_physio_completion, bio, error)
{
dio_request_t *dr = bio->bi_private;
int rc;
if (dr->dr_error == 0) {
#ifdef HAVE_1ARG_BIO_END_IO_T
dr->dr_error = BIO_END_IO_ERROR(bio);
#else
if (error)
dr->dr_error = -(error);
else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
dr->dr_error = EIO;
#endif
}
/* Drop reference acquired by __vdev_disk_physio */
rc = vdev_disk_dio_put(dr);
}
static inline void
vdev_submit_bio_impl(struct bio *bio)
{
#ifdef HAVE_1ARG_SUBMIT_BIO
submit_bio(bio);
#else
submit_bio(0, bio);
#endif
}
/*
* preempt_schedule_notrace is GPL-only which breaks the ZFS build, so
* replace it with preempt_schedule under the following condition:
*/
#if defined(CONFIG_ARM64) && \
defined(CONFIG_PREEMPTION) && \
defined(CONFIG_BLK_CGROUP)
#define preempt_schedule_notrace(x) preempt_schedule(x)
#endif
#ifdef HAVE_BIO_SET_DEV
#if defined(CONFIG_BLK_CGROUP) && defined(HAVE_BIO_SET_DEV_GPL_ONLY)
/*
* The Linux 5.5 kernel updated percpu_ref_tryget() which is inlined by
* blkg_tryget() to use rcu_read_lock() instead of rcu_read_lock_sched().
* As a side effect the function was converted to GPL-only. Define our
* own version when needed which uses rcu_read_lock_sched().
*/
#if defined(HAVE_BLKG_TRYGET_GPL_ONLY)
static inline bool
vdev_blkg_tryget(struct blkcg_gq *blkg)
{
struct percpu_ref *ref = &blkg->refcnt;
unsigned long __percpu *count;
bool rc;
rcu_read_lock_sched();
if (__ref_is_percpu(ref, &count)) {
this_cpu_inc(*count);
rc = true;
} else {
#ifdef ZFS_PERCPU_REF_COUNT_IN_DATA
rc = atomic_long_inc_not_zero(&ref->data->count);
#else
rc = atomic_long_inc_not_zero(&ref->count);
#endif
}
rcu_read_unlock_sched();
return (rc);
}
#elif defined(HAVE_BLKG_TRYGET)
#define vdev_blkg_tryget(bg) blkg_tryget(bg)
#endif
/*
* The Linux 5.0 kernel updated the bio_set_dev() macro so it calls the
* GPL-only bio_associate_blkg() symbol thus inadvertently converting
* the entire macro. Provide a minimal version which always assigns the
* request queue's root_blkg to the bio.
*/
static inline void
vdev_bio_associate_blkg(struct bio *bio)
{
#if defined(HAVE_BIO_BDEV_DISK)
struct request_queue *q = bio->bi_bdev->bd_disk->queue;
#else
struct request_queue *q = bio->bi_disk->queue;
#endif
ASSERT3P(q, !=, NULL);
ASSERT3P(bio->bi_blkg, ==, NULL);
if (q->root_blkg && vdev_blkg_tryget(q->root_blkg))
bio->bi_blkg = q->root_blkg;
}
#define bio_associate_blkg vdev_bio_associate_blkg
#endif
#else
/*
* Provide a bio_set_dev() helper macro for pre-Linux 4.14 kernels.
*/
static inline void
bio_set_dev(struct bio *bio, struct block_device *bdev)
{
bio->bi_bdev = bdev;
}
#endif /* HAVE_BIO_SET_DEV */
static inline void
vdev_submit_bio(struct bio *bio)
{
struct bio_list *bio_list = current->bio_list;
current->bio_list = NULL;
vdev_submit_bio_impl(bio);
current->bio_list = bio_list;
}
static int
__vdev_disk_physio(struct block_device *bdev, zio_t *zio,
size_t io_size, uint64_t io_offset, int rw, int flags)
{
dio_request_t *dr;
uint64_t abd_offset;
uint64_t bio_offset;
int bio_size;
int bio_count = 16;
int error = 0;
struct blk_plug plug;
/*
* Accessing outside the block device is never allowed.
*/
if (io_offset + io_size > bdev->bd_inode->i_size) {
vdev_dbgmsg(zio->io_vd,
"Illegal access %llu size %llu, device size %llu",
- io_offset, io_size, i_size_read(bdev->bd_inode));
+ (u_longlong_t)io_offset,
+ (u_longlong_t)io_size,
+ (u_longlong_t)i_size_read(bdev->bd_inode));
return (SET_ERROR(EIO));
}
retry:
dr = vdev_disk_dio_alloc(bio_count);
if (zio && !(zio->io_flags & (ZIO_FLAG_IO_RETRY | ZIO_FLAG_TRYHARD)))
bio_set_flags_failfast(bdev, &flags);
dr->dr_zio = zio;
/*
* Since bio's can have up to BIO_MAX_PAGES=256 iovec's, each of which
* is at least 512 bytes and at most PAGESIZE (typically 4K), one bio
* can cover at least 128KB and at most 1MB. When the required number
* of iovec's exceeds this, we are forced to break the IO in multiple
* bio's and wait for them all to complete. This is likely if the
* recordsize property is increased beyond 1MB. The default
* bio_count=16 should typically accommodate the maximum-size zio of
* 16MB.
*/
abd_offset = 0;
bio_offset = io_offset;
bio_size = io_size;
for (int i = 0; i <= dr->dr_bio_count; i++) {
/* Finished constructing bio's for given buffer */
if (bio_size <= 0)
break;
/*
* If additional bio's are required, we have to retry, but
* this should be rare - see the comment above.
*/
if (dr->dr_bio_count == i) {
vdev_disk_dio_free(dr);
bio_count *= 2;
goto retry;
}
/* bio_alloc() with __GFP_WAIT never returns NULL */
#ifdef HAVE_BIO_MAX_SEGS
dr->dr_bio[i] = bio_alloc(GFP_NOIO, bio_max_segs(
abd_nr_pages_off(zio->io_abd, bio_size, abd_offset)));
#else
dr->dr_bio[i] = bio_alloc(GFP_NOIO,
MIN(abd_nr_pages_off(zio->io_abd, bio_size, abd_offset),
BIO_MAX_PAGES));
#endif
if (unlikely(dr->dr_bio[i] == NULL)) {
vdev_disk_dio_free(dr);
return (SET_ERROR(ENOMEM));
}
/* Matching put called by vdev_disk_physio_completion */
vdev_disk_dio_get(dr);
bio_set_dev(dr->dr_bio[i], bdev);
BIO_BI_SECTOR(dr->dr_bio[i]) = bio_offset >> 9;
dr->dr_bio[i]->bi_end_io = vdev_disk_physio_completion;
dr->dr_bio[i]->bi_private = dr;
bio_set_op_attrs(dr->dr_bio[i], rw, flags);
/* Remaining size is returned to become the new size */
bio_size = abd_bio_map_off(dr->dr_bio[i], zio->io_abd,
bio_size, abd_offset);
/* Advance in buffer and construct another bio if needed */
abd_offset += BIO_BI_SIZE(dr->dr_bio[i]);
bio_offset += BIO_BI_SIZE(dr->dr_bio[i]);
}
/* Extra reference to protect dio_request during vdev_submit_bio */
vdev_disk_dio_get(dr);
if (dr->dr_bio_count > 1)
blk_start_plug(&plug);
/* Submit all bio's associated with this dio */
for (int i = 0; i < dr->dr_bio_count; i++) {
if (dr->dr_bio[i])
vdev_submit_bio(dr->dr_bio[i]);
}
if (dr->dr_bio_count > 1)
blk_finish_plug(&plug);
(void) vdev_disk_dio_put(dr);
return (error);
}
BIO_END_IO_PROTO(vdev_disk_io_flush_completion, bio, error)
{
zio_t *zio = bio->bi_private;
#ifdef HAVE_1ARG_BIO_END_IO_T
zio->io_error = BIO_END_IO_ERROR(bio);
#else
zio->io_error = -error;
#endif
if (zio->io_error && (zio->io_error == EOPNOTSUPP))
zio->io_vd->vdev_nowritecache = B_TRUE;
bio_put(bio);
ASSERT3S(zio->io_error, >=, 0);
if (zio->io_error)
vdev_disk_error(zio);
zio_interrupt(zio);
}
static int
vdev_disk_io_flush(struct block_device *bdev, zio_t *zio)
{
struct request_queue *q;
struct bio *bio;
q = bdev_get_queue(bdev);
if (!q)
return (SET_ERROR(ENXIO));
bio = bio_alloc(GFP_NOIO, 0);
/* bio_alloc() with __GFP_WAIT never returns NULL */
if (unlikely(bio == NULL))
return (SET_ERROR(ENOMEM));
bio->bi_end_io = vdev_disk_io_flush_completion;
bio->bi_private = zio;
bio_set_dev(bio, bdev);
bio_set_flush(bio);
vdev_submit_bio(bio);
invalidate_bdev(bdev);
return (0);
}
static void
vdev_disk_io_start(zio_t *zio)
{
vdev_t *v = zio->io_vd;
vdev_disk_t *vd = v->vdev_tsd;
unsigned long trim_flags = 0;
int rw, error;
/*
* If the vdev is closed, it's likely in the REMOVED or FAULTED state.
* Nothing to be done here but return failure.
*/
if (vd == NULL) {
zio->io_error = ENXIO;
zio_interrupt(zio);
return;
}
rw_enter(&vd->vd_lock, RW_READER);
/*
* If the vdev is closed, it's likely due to a failed reopen and is
* in the UNAVAIL state. Nothing to be done here but return failure.
*/
if (vd->vd_bdev == NULL) {
rw_exit(&vd->vd_lock);
zio->io_error = ENXIO;
zio_interrupt(zio);
return;
}
switch (zio->io_type) {
case ZIO_TYPE_IOCTL:
if (!vdev_readable(v)) {
rw_exit(&vd->vd_lock);
zio->io_error = SET_ERROR(ENXIO);
zio_interrupt(zio);
return;
}
switch (zio->io_cmd) {
case DKIOCFLUSHWRITECACHE:
if (zfs_nocacheflush)
break;
if (v->vdev_nowritecache) {
zio->io_error = SET_ERROR(ENOTSUP);
break;
}
error = vdev_disk_io_flush(vd->vd_bdev, zio);
if (error == 0) {
rw_exit(&vd->vd_lock);
return;
}
zio->io_error = error;
break;
default:
zio->io_error = SET_ERROR(ENOTSUP);
}
rw_exit(&vd->vd_lock);
zio_execute(zio);
return;
case ZIO_TYPE_WRITE:
rw = WRITE;
break;
case ZIO_TYPE_READ:
rw = READ;
break;
case ZIO_TYPE_TRIM:
#if defined(BLKDEV_DISCARD_SECURE)
if (zio->io_trim_flags & ZIO_TRIM_SECURE)
trim_flags |= BLKDEV_DISCARD_SECURE;
#endif
zio->io_error = -blkdev_issue_discard(vd->vd_bdev,
zio->io_offset >> 9, zio->io_size >> 9, GFP_NOFS,
trim_flags);
rw_exit(&vd->vd_lock);
zio_interrupt(zio);
return;
default:
rw_exit(&vd->vd_lock);
zio->io_error = SET_ERROR(ENOTSUP);
zio_interrupt(zio);
return;
}
zio->io_target_timestamp = zio_handle_io_delay(zio);
error = __vdev_disk_physio(vd->vd_bdev, zio,
zio->io_size, zio->io_offset, rw, 0);
rw_exit(&vd->vd_lock);
if (error) {
zio->io_error = error;
zio_interrupt(zio);
return;
}
}
static void
vdev_disk_io_done(zio_t *zio)
{
/*
* If the device returned EIO, we revalidate the media. If it is
* determined the media has changed this triggers the asynchronous
* removal of the device from the configuration.
*/
if (zio->io_error == EIO) {
vdev_t *v = zio->io_vd;
vdev_disk_t *vd = v->vdev_tsd;
if (zfs_check_media_change(vd->vd_bdev)) {
invalidate_bdev(vd->vd_bdev);
v->vdev_remove_wanted = B_TRUE;
spa_async_request(zio->io_spa, SPA_ASYNC_REMOVE);
}
}
}
static void
vdev_disk_hold(vdev_t *vd)
{
ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER));
/* We must have a pathname, and it must be absolute. */
if (vd->vdev_path == NULL || vd->vdev_path[0] != '/')
return;
/*
* Only prefetch path and devid info if the device has
* never been opened.
*/
if (vd->vdev_tsd != NULL)
return;
}
static void
vdev_disk_rele(vdev_t *vd)
{
ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER));
/* XXX: Implement me as a vnode rele for the device */
}
vdev_ops_t vdev_disk_ops = {
.vdev_op_init = NULL,
.vdev_op_fini = NULL,
.vdev_op_open = vdev_disk_open,
.vdev_op_close = vdev_disk_close,
.vdev_op_asize = vdev_default_asize,
.vdev_op_min_asize = vdev_default_min_asize,
.vdev_op_min_alloc = NULL,
.vdev_op_io_start = vdev_disk_io_start,
.vdev_op_io_done = vdev_disk_io_done,
.vdev_op_state_change = NULL,
.vdev_op_need_resilver = NULL,
.vdev_op_hold = vdev_disk_hold,
.vdev_op_rele = vdev_disk_rele,
.vdev_op_remap = NULL,
.vdev_op_xlate = vdev_default_xlate,
.vdev_op_rebuild_asize = NULL,
.vdev_op_metaslab_init = NULL,
.vdev_op_config_generate = NULL,
.vdev_op_nparity = NULL,
.vdev_op_ndisks = NULL,
.vdev_op_type = VDEV_TYPE_DISK, /* name of this vdev type */
.vdev_op_leaf = B_TRUE /* leaf vdev */
};
/*
* The zfs_vdev_scheduler module option has been deprecated. Setting this
* value no longer has any effect. It has not yet been entirely removed
* to allow the module to be loaded if this option is specified in the
* /etc/modprobe.d/zfs.conf file. The following warning will be logged.
*/
static int
param_set_vdev_scheduler(const char *val, zfs_kernel_param_t *kp)
{
int error = param_set_charp(val, kp);
if (error == 0) {
printk(KERN_INFO "The 'zfs_vdev_scheduler' module option "
"is not supported.\n");
}
return (error);
}
char *zfs_vdev_scheduler = "unused";
module_param_call(zfs_vdev_scheduler, param_set_vdev_scheduler,
param_get_charp, &zfs_vdev_scheduler, 0644);
MODULE_PARM_DESC(zfs_vdev_scheduler, "I/O scheduler");
int
param_set_min_auto_ashift(const char *buf, zfs_kernel_param_t *kp)
{
uint64_t val;
int error;
error = kstrtoull(buf, 0, &val);
if (error < 0)
return (SET_ERROR(error));
if (val < ASHIFT_MIN || val > zfs_vdev_max_auto_ashift)
return (SET_ERROR(-EINVAL));
error = param_set_ulong(buf, kp);
if (error < 0)
return (SET_ERROR(error));
return (0);
}
int
param_set_max_auto_ashift(const char *buf, zfs_kernel_param_t *kp)
{
uint64_t val;
int error;
error = kstrtoull(buf, 0, &val);
if (error < 0)
return (SET_ERROR(error));
if (val > ASHIFT_MAX || val < zfs_vdev_min_auto_ashift)
return (SET_ERROR(-EINVAL));
error = param_set_ulong(buf, kp);
if (error < 0)
return (SET_ERROR(error));
return (0);
}
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_acl.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_acl.c
index f8bf55f75e97..1233c32deac1 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_acl.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_acl.c
@@ -1,2948 +1,2948 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
* Copyright 2014 Nexenta Systems, Inc. All rights reserved.
*/
#include <sys/types.h>
#include <sys/param.h>
#include <sys/time.h>
#include <sys/sysmacros.h>
#include <sys/vfs.h>
#include <sys/vnode.h>
#include <sys/sid.h>
#include <sys/file.h>
#include <sys/stat.h>
#include <sys/kmem.h>
#include <sys/cmn_err.h>
#include <sys/errno.h>
#include <sys/fs/zfs.h>
#include <sys/policy.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_fuid.h>
#include <sys/zfs_acl.h>
#include <sys/zfs_dir.h>
#include <sys/zfs_quota.h>
#include <sys/zfs_vfsops.h>
#include <sys/dmu.h>
#include <sys/dnode.h>
#include <sys/zap.h>
#include <sys/sa.h>
#include <sys/trace_acl.h>
#include <sys/zpl.h>
#define ALLOW ACE_ACCESS_ALLOWED_ACE_TYPE
#define DENY ACE_ACCESS_DENIED_ACE_TYPE
#define MAX_ACE_TYPE ACE_SYSTEM_ALARM_CALLBACK_OBJECT_ACE_TYPE
#define MIN_ACE_TYPE ALLOW
#define OWNING_GROUP (ACE_GROUP|ACE_IDENTIFIER_GROUP)
#define EVERYONE_ALLOW_MASK (ACE_READ_ACL|ACE_READ_ATTRIBUTES | \
ACE_READ_NAMED_ATTRS|ACE_SYNCHRONIZE)
#define EVERYONE_DENY_MASK (ACE_WRITE_ACL|ACE_WRITE_OWNER | \
ACE_WRITE_ATTRIBUTES|ACE_WRITE_NAMED_ATTRS)
#define OWNER_ALLOW_MASK (ACE_WRITE_ACL | ACE_WRITE_OWNER | \
ACE_WRITE_ATTRIBUTES|ACE_WRITE_NAMED_ATTRS)
#define ZFS_CHECKED_MASKS (ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_READ_DATA| \
ACE_READ_NAMED_ATTRS|ACE_WRITE_DATA|ACE_WRITE_ATTRIBUTES| \
ACE_WRITE_NAMED_ATTRS|ACE_APPEND_DATA|ACE_EXECUTE|ACE_WRITE_OWNER| \
ACE_WRITE_ACL|ACE_DELETE|ACE_DELETE_CHILD|ACE_SYNCHRONIZE)
#define WRITE_MASK_DATA (ACE_WRITE_DATA|ACE_APPEND_DATA|ACE_WRITE_NAMED_ATTRS)
#define WRITE_MASK_ATTRS (ACE_WRITE_ACL|ACE_WRITE_OWNER|ACE_WRITE_ATTRIBUTES| \
ACE_DELETE|ACE_DELETE_CHILD)
#define WRITE_MASK (WRITE_MASK_DATA|WRITE_MASK_ATTRS)
#define OGE_CLEAR (ACE_READ_DATA|ACE_LIST_DIRECTORY|ACE_WRITE_DATA| \
ACE_ADD_FILE|ACE_APPEND_DATA|ACE_ADD_SUBDIRECTORY|ACE_EXECUTE)
#define OKAY_MASK_BITS (ACE_READ_DATA|ACE_LIST_DIRECTORY|ACE_WRITE_DATA| \
ACE_ADD_FILE|ACE_APPEND_DATA|ACE_ADD_SUBDIRECTORY|ACE_EXECUTE)
#define ALL_INHERIT (ACE_FILE_INHERIT_ACE|ACE_DIRECTORY_INHERIT_ACE | \
ACE_NO_PROPAGATE_INHERIT_ACE|ACE_INHERIT_ONLY_ACE|ACE_INHERITED_ACE)
#define RESTRICTED_CLEAR (ACE_WRITE_ACL|ACE_WRITE_OWNER)
#define V4_ACL_WIDE_FLAGS (ZFS_ACL_AUTO_INHERIT|ZFS_ACL_DEFAULTED|\
ZFS_ACL_PROTECTED)
#define ZFS_ACL_WIDE_FLAGS (V4_ACL_WIDE_FLAGS|ZFS_ACL_TRIVIAL|ZFS_INHERIT_ACE|\
ZFS_ACL_OBJ_ACE)
#define ALL_MODE_EXECS (S_IXUSR | S_IXGRP | S_IXOTH)
#define IDMAP_WK_CREATOR_OWNER_UID 2147483648U
static uint16_t
zfs_ace_v0_get_type(void *acep)
{
return (((zfs_oldace_t *)acep)->z_type);
}
static uint16_t
zfs_ace_v0_get_flags(void *acep)
{
return (((zfs_oldace_t *)acep)->z_flags);
}
static uint32_t
zfs_ace_v0_get_mask(void *acep)
{
return (((zfs_oldace_t *)acep)->z_access_mask);
}
static uint64_t
zfs_ace_v0_get_who(void *acep)
{
return (((zfs_oldace_t *)acep)->z_fuid);
}
static void
zfs_ace_v0_set_type(void *acep, uint16_t type)
{
((zfs_oldace_t *)acep)->z_type = type;
}
static void
zfs_ace_v0_set_flags(void *acep, uint16_t flags)
{
((zfs_oldace_t *)acep)->z_flags = flags;
}
static void
zfs_ace_v0_set_mask(void *acep, uint32_t mask)
{
((zfs_oldace_t *)acep)->z_access_mask = mask;
}
static void
zfs_ace_v0_set_who(void *acep, uint64_t who)
{
((zfs_oldace_t *)acep)->z_fuid = who;
}
/*ARGSUSED*/
static size_t
zfs_ace_v0_size(void *acep)
{
return (sizeof (zfs_oldace_t));
}
static size_t
zfs_ace_v0_abstract_size(void)
{
return (sizeof (zfs_oldace_t));
}
static int
zfs_ace_v0_mask_off(void)
{
return (offsetof(zfs_oldace_t, z_access_mask));
}
/*ARGSUSED*/
static int
zfs_ace_v0_data(void *acep, void **datap)
{
*datap = NULL;
return (0);
}
static acl_ops_t zfs_acl_v0_ops = {
.ace_mask_get = zfs_ace_v0_get_mask,
.ace_mask_set = zfs_ace_v0_set_mask,
.ace_flags_get = zfs_ace_v0_get_flags,
.ace_flags_set = zfs_ace_v0_set_flags,
.ace_type_get = zfs_ace_v0_get_type,
.ace_type_set = zfs_ace_v0_set_type,
.ace_who_get = zfs_ace_v0_get_who,
.ace_who_set = zfs_ace_v0_set_who,
.ace_size = zfs_ace_v0_size,
.ace_abstract_size = zfs_ace_v0_abstract_size,
.ace_mask_off = zfs_ace_v0_mask_off,
.ace_data = zfs_ace_v0_data
};
static uint16_t
zfs_ace_fuid_get_type(void *acep)
{
return (((zfs_ace_hdr_t *)acep)->z_type);
}
static uint16_t
zfs_ace_fuid_get_flags(void *acep)
{
return (((zfs_ace_hdr_t *)acep)->z_flags);
}
static uint32_t
zfs_ace_fuid_get_mask(void *acep)
{
return (((zfs_ace_hdr_t *)acep)->z_access_mask);
}
static uint64_t
zfs_ace_fuid_get_who(void *args)
{
uint16_t entry_type;
zfs_ace_t *acep = args;
entry_type = acep->z_hdr.z_flags & ACE_TYPE_FLAGS;
if (entry_type == ACE_OWNER || entry_type == OWNING_GROUP ||
entry_type == ACE_EVERYONE)
return (-1);
return (((zfs_ace_t *)acep)->z_fuid);
}
static void
zfs_ace_fuid_set_type(void *acep, uint16_t type)
{
((zfs_ace_hdr_t *)acep)->z_type = type;
}
static void
zfs_ace_fuid_set_flags(void *acep, uint16_t flags)
{
((zfs_ace_hdr_t *)acep)->z_flags = flags;
}
static void
zfs_ace_fuid_set_mask(void *acep, uint32_t mask)
{
((zfs_ace_hdr_t *)acep)->z_access_mask = mask;
}
static void
zfs_ace_fuid_set_who(void *arg, uint64_t who)
{
zfs_ace_t *acep = arg;
uint16_t entry_type = acep->z_hdr.z_flags & ACE_TYPE_FLAGS;
if (entry_type == ACE_OWNER || entry_type == OWNING_GROUP ||
entry_type == ACE_EVERYONE)
return;
acep->z_fuid = who;
}
static size_t
zfs_ace_fuid_size(void *acep)
{
zfs_ace_hdr_t *zacep = acep;
uint16_t entry_type;
switch (zacep->z_type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
return (sizeof (zfs_object_ace_t));
case ALLOW:
case DENY:
entry_type =
(((zfs_ace_hdr_t *)acep)->z_flags & ACE_TYPE_FLAGS);
if (entry_type == ACE_OWNER ||
entry_type == OWNING_GROUP ||
entry_type == ACE_EVERYONE)
return (sizeof (zfs_ace_hdr_t));
- /*FALLTHROUGH*/
+ /* FALLTHROUGH */
default:
return (sizeof (zfs_ace_t));
}
}
static size_t
zfs_ace_fuid_abstract_size(void)
{
return (sizeof (zfs_ace_hdr_t));
}
static int
zfs_ace_fuid_mask_off(void)
{
return (offsetof(zfs_ace_hdr_t, z_access_mask));
}
static int
zfs_ace_fuid_data(void *acep, void **datap)
{
zfs_ace_t *zacep = acep;
zfs_object_ace_t *zobjp;
switch (zacep->z_hdr.z_type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
zobjp = acep;
*datap = (caddr_t)zobjp + sizeof (zfs_ace_t);
return (sizeof (zfs_object_ace_t) - sizeof (zfs_ace_t));
default:
*datap = NULL;
return (0);
}
}
static acl_ops_t zfs_acl_fuid_ops = {
.ace_mask_get = zfs_ace_fuid_get_mask,
.ace_mask_set = zfs_ace_fuid_set_mask,
.ace_flags_get = zfs_ace_fuid_get_flags,
.ace_flags_set = zfs_ace_fuid_set_flags,
.ace_type_get = zfs_ace_fuid_get_type,
.ace_type_set = zfs_ace_fuid_set_type,
.ace_who_get = zfs_ace_fuid_get_who,
.ace_who_set = zfs_ace_fuid_set_who,
.ace_size = zfs_ace_fuid_size,
.ace_abstract_size = zfs_ace_fuid_abstract_size,
.ace_mask_off = zfs_ace_fuid_mask_off,
.ace_data = zfs_ace_fuid_data
};
/*
* The following three functions are provided for compatibility with
* older ZPL version in order to determine if the file use to have
* an external ACL and what version of ACL previously existed on the
* file. Would really be nice to not need this, sigh.
*/
uint64_t
zfs_external_acl(znode_t *zp)
{
zfs_acl_phys_t acl_phys;
int error;
if (zp->z_is_sa)
return (0);
/*
* Need to deal with a potential
* race where zfs_sa_upgrade could cause
* z_isa_sa to change.
*
* If the lookup fails then the state of z_is_sa should have
* changed.
*/
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(ZTOZSB(zp)),
&acl_phys, sizeof (acl_phys))) == 0)
return (acl_phys.z_acl_extern_obj);
else {
/*
* after upgrade the SA_ZPL_ZNODE_ACL should have been
* removed
*/
VERIFY(zp->z_is_sa && error == ENOENT);
return (0);
}
}
/*
* Determine size of ACL in bytes
*
* This is more complicated than it should be since we have to deal
* with old external ACLs.
*/
static int
zfs_acl_znode_info(znode_t *zp, int *aclsize, int *aclcount,
zfs_acl_phys_t *aclphys)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
uint64_t acl_count;
int size;
int error;
ASSERT(MUTEX_HELD(&zp->z_acl_lock));
if (zp->z_is_sa) {
if ((error = sa_size(zp->z_sa_hdl, SA_ZPL_DACL_ACES(zfsvfs),
&size)) != 0)
return (error);
*aclsize = size;
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_DACL_COUNT(zfsvfs),
&acl_count, sizeof (acl_count))) != 0)
return (error);
*aclcount = acl_count;
} else {
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zfsvfs),
aclphys, sizeof (*aclphys))) != 0)
return (error);
if (aclphys->z_acl_version == ZFS_ACL_VERSION_INITIAL) {
*aclsize = ZFS_ACL_SIZE(aclphys->z_acl_size);
*aclcount = aclphys->z_acl_size;
} else {
*aclsize = aclphys->z_acl_size;
*aclcount = aclphys->z_acl_count;
}
}
return (0);
}
int
zfs_znode_acl_version(znode_t *zp)
{
zfs_acl_phys_t acl_phys;
if (zp->z_is_sa)
return (ZFS_ACL_VERSION_FUID);
else {
int error;
/*
* Need to deal with a potential
* race where zfs_sa_upgrade could cause
* z_isa_sa to change.
*
* If the lookup fails then the state of z_is_sa should have
* changed.
*/
if ((error = sa_lookup(zp->z_sa_hdl,
SA_ZPL_ZNODE_ACL(ZTOZSB(zp)),
&acl_phys, sizeof (acl_phys))) == 0)
return (acl_phys.z_acl_version);
else {
/*
* After upgrade SA_ZPL_ZNODE_ACL should have
* been removed.
*/
VERIFY(zp->z_is_sa && error == ENOENT);
return (ZFS_ACL_VERSION_FUID);
}
}
}
static int
zfs_acl_version(int version)
{
if (version < ZPL_VERSION_FUID)
return (ZFS_ACL_VERSION_INITIAL);
else
return (ZFS_ACL_VERSION_FUID);
}
static int
zfs_acl_version_zp(znode_t *zp)
{
return (zfs_acl_version(ZTOZSB(zp)->z_version));
}
zfs_acl_t *
zfs_acl_alloc(int vers)
{
zfs_acl_t *aclp;
aclp = kmem_zalloc(sizeof (zfs_acl_t), KM_SLEEP);
list_create(&aclp->z_acl, sizeof (zfs_acl_node_t),
offsetof(zfs_acl_node_t, z_next));
aclp->z_version = vers;
if (vers == ZFS_ACL_VERSION_FUID)
aclp->z_ops = &zfs_acl_fuid_ops;
else
aclp->z_ops = &zfs_acl_v0_ops;
return (aclp);
}
zfs_acl_node_t *
zfs_acl_node_alloc(size_t bytes)
{
zfs_acl_node_t *aclnode;
aclnode = kmem_zalloc(sizeof (zfs_acl_node_t), KM_SLEEP);
if (bytes) {
aclnode->z_acldata = kmem_alloc(bytes, KM_SLEEP);
aclnode->z_allocdata = aclnode->z_acldata;
aclnode->z_allocsize = bytes;
aclnode->z_size = bytes;
}
return (aclnode);
}
static void
zfs_acl_node_free(zfs_acl_node_t *aclnode)
{
if (aclnode->z_allocsize)
kmem_free(aclnode->z_allocdata, aclnode->z_allocsize);
kmem_free(aclnode, sizeof (zfs_acl_node_t));
}
static void
zfs_acl_release_nodes(zfs_acl_t *aclp)
{
zfs_acl_node_t *aclnode;
while ((aclnode = list_head(&aclp->z_acl))) {
list_remove(&aclp->z_acl, aclnode);
zfs_acl_node_free(aclnode);
}
aclp->z_acl_count = 0;
aclp->z_acl_bytes = 0;
}
void
zfs_acl_free(zfs_acl_t *aclp)
{
zfs_acl_release_nodes(aclp);
list_destroy(&aclp->z_acl);
kmem_free(aclp, sizeof (zfs_acl_t));
}
static boolean_t
zfs_acl_valid_ace_type(uint_t type, uint_t flags)
{
uint16_t entry_type;
switch (type) {
case ALLOW:
case DENY:
case ACE_SYSTEM_AUDIT_ACE_TYPE:
case ACE_SYSTEM_ALARM_ACE_TYPE:
entry_type = flags & ACE_TYPE_FLAGS;
return (entry_type == ACE_OWNER ||
entry_type == OWNING_GROUP ||
entry_type == ACE_EVERYONE || entry_type == 0 ||
entry_type == ACE_IDENTIFIER_GROUP);
default:
if (type >= MIN_ACE_TYPE && type <= MAX_ACE_TYPE)
return (B_TRUE);
}
return (B_FALSE);
}
static boolean_t
zfs_ace_valid(umode_t obj_mode, zfs_acl_t *aclp, uint16_t type, uint16_t iflags)
{
/*
* first check type of entry
*/
if (!zfs_acl_valid_ace_type(type, iflags))
return (B_FALSE);
switch (type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
if (aclp->z_version < ZFS_ACL_VERSION_FUID)
return (B_FALSE);
aclp->z_hints |= ZFS_ACL_OBJ_ACE;
}
/*
* next check inheritance level flags
*/
if (S_ISDIR(obj_mode) &&
(iflags & (ACE_FILE_INHERIT_ACE|ACE_DIRECTORY_INHERIT_ACE)))
aclp->z_hints |= ZFS_INHERIT_ACE;
if (iflags & (ACE_INHERIT_ONLY_ACE|ACE_NO_PROPAGATE_INHERIT_ACE)) {
if ((iflags & (ACE_FILE_INHERIT_ACE|
ACE_DIRECTORY_INHERIT_ACE)) == 0) {
return (B_FALSE);
}
}
return (B_TRUE);
}
static void *
zfs_acl_next_ace(zfs_acl_t *aclp, void *start, uint64_t *who,
uint32_t *access_mask, uint16_t *iflags, uint16_t *type)
{
zfs_acl_node_t *aclnode;
ASSERT(aclp);
if (start == NULL) {
aclnode = list_head(&aclp->z_acl);
if (aclnode == NULL)
return (NULL);
aclp->z_next_ace = aclnode->z_acldata;
aclp->z_curr_node = aclnode;
aclnode->z_ace_idx = 0;
}
aclnode = aclp->z_curr_node;
if (aclnode == NULL)
return (NULL);
if (aclnode->z_ace_idx >= aclnode->z_ace_count) {
aclnode = list_next(&aclp->z_acl, aclnode);
if (aclnode == NULL)
return (NULL);
else {
aclp->z_curr_node = aclnode;
aclnode->z_ace_idx = 0;
aclp->z_next_ace = aclnode->z_acldata;
}
}
if (aclnode->z_ace_idx < aclnode->z_ace_count) {
void *acep = aclp->z_next_ace;
size_t ace_size;
/*
* Make sure we don't overstep our bounds
*/
ace_size = aclp->z_ops->ace_size(acep);
if (((caddr_t)acep + ace_size) >
((caddr_t)aclnode->z_acldata + aclnode->z_size)) {
return (NULL);
}
*iflags = aclp->z_ops->ace_flags_get(acep);
*type = aclp->z_ops->ace_type_get(acep);
*access_mask = aclp->z_ops->ace_mask_get(acep);
*who = aclp->z_ops->ace_who_get(acep);
aclp->z_next_ace = (caddr_t)aclp->z_next_ace + ace_size;
aclnode->z_ace_idx++;
return ((void *)acep);
}
return (NULL);
}
/*ARGSUSED*/
static uint64_t
zfs_ace_walk(void *datap, uint64_t cookie, int aclcnt,
uint16_t *flags, uint16_t *type, uint32_t *mask)
{
zfs_acl_t *aclp = datap;
zfs_ace_hdr_t *acep = (zfs_ace_hdr_t *)(uintptr_t)cookie;
uint64_t who;
acep = zfs_acl_next_ace(aclp, acep, &who, mask,
flags, type);
return ((uint64_t)(uintptr_t)acep);
}
/*
* Copy ACE to internal ZFS format.
* While processing the ACL each ACE will be validated for correctness.
* ACE FUIDs will be created later.
*/
static int
zfs_copy_ace_2_fuid(zfsvfs_t *zfsvfs, umode_t obj_mode, zfs_acl_t *aclp,
void *datap, zfs_ace_t *z_acl, uint64_t aclcnt, size_t *size,
zfs_fuid_info_t **fuidp, cred_t *cr)
{
int i;
uint16_t entry_type;
zfs_ace_t *aceptr = z_acl;
ace_t *acep = datap;
zfs_object_ace_t *zobjacep;
ace_object_t *aceobjp;
for (i = 0; i != aclcnt; i++) {
aceptr->z_hdr.z_access_mask = acep->a_access_mask;
aceptr->z_hdr.z_flags = acep->a_flags;
aceptr->z_hdr.z_type = acep->a_type;
entry_type = aceptr->z_hdr.z_flags & ACE_TYPE_FLAGS;
if (entry_type != ACE_OWNER && entry_type != OWNING_GROUP &&
entry_type != ACE_EVERYONE) {
aceptr->z_fuid = zfs_fuid_create(zfsvfs, acep->a_who,
cr, (entry_type == 0) ?
ZFS_ACE_USER : ZFS_ACE_GROUP, fuidp);
}
/*
* Make sure ACE is valid
*/
if (zfs_ace_valid(obj_mode, aclp, aceptr->z_hdr.z_type,
aceptr->z_hdr.z_flags) != B_TRUE)
return (SET_ERROR(EINVAL));
switch (acep->a_type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
zobjacep = (zfs_object_ace_t *)aceptr;
aceobjp = (ace_object_t *)acep;
bcopy(aceobjp->a_obj_type, zobjacep->z_object_type,
sizeof (aceobjp->a_obj_type));
bcopy(aceobjp->a_inherit_obj_type,
zobjacep->z_inherit_type,
sizeof (aceobjp->a_inherit_obj_type));
acep = (ace_t *)((caddr_t)acep + sizeof (ace_object_t));
break;
default:
acep = (ace_t *)((caddr_t)acep + sizeof (ace_t));
}
aceptr = (zfs_ace_t *)((caddr_t)aceptr +
aclp->z_ops->ace_size(aceptr));
}
*size = (caddr_t)aceptr - (caddr_t)z_acl;
return (0);
}
/*
* Copy ZFS ACEs to fixed size ace_t layout
*/
static void
zfs_copy_fuid_2_ace(zfsvfs_t *zfsvfs, zfs_acl_t *aclp, cred_t *cr,
void *datap, int filter)
{
uint64_t who;
uint32_t access_mask;
uint16_t iflags, type;
zfs_ace_hdr_t *zacep = NULL;
ace_t *acep = datap;
ace_object_t *objacep;
zfs_object_ace_t *zobjacep;
size_t ace_size;
uint16_t entry_type;
while ((zacep = zfs_acl_next_ace(aclp, zacep,
&who, &access_mask, &iflags, &type))) {
switch (type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
if (filter) {
continue;
}
zobjacep = (zfs_object_ace_t *)zacep;
objacep = (ace_object_t *)acep;
bcopy(zobjacep->z_object_type,
objacep->a_obj_type,
sizeof (zobjacep->z_object_type));
bcopy(zobjacep->z_inherit_type,
objacep->a_inherit_obj_type,
sizeof (zobjacep->z_inherit_type));
ace_size = sizeof (ace_object_t);
break;
default:
ace_size = sizeof (ace_t);
break;
}
entry_type = (iflags & ACE_TYPE_FLAGS);
if ((entry_type != ACE_OWNER &&
entry_type != OWNING_GROUP &&
entry_type != ACE_EVERYONE)) {
acep->a_who = zfs_fuid_map_id(zfsvfs, who,
cr, (entry_type & ACE_IDENTIFIER_GROUP) ?
ZFS_ACE_GROUP : ZFS_ACE_USER);
} else {
acep->a_who = (uid_t)(int64_t)who;
}
acep->a_access_mask = access_mask;
acep->a_flags = iflags;
acep->a_type = type;
acep = (ace_t *)((caddr_t)acep + ace_size);
}
}
static int
zfs_copy_ace_2_oldace(umode_t obj_mode, zfs_acl_t *aclp, ace_t *acep,
zfs_oldace_t *z_acl, int aclcnt, size_t *size)
{
int i;
zfs_oldace_t *aceptr = z_acl;
for (i = 0; i != aclcnt; i++, aceptr++) {
aceptr->z_access_mask = acep[i].a_access_mask;
aceptr->z_type = acep[i].a_type;
aceptr->z_flags = acep[i].a_flags;
aceptr->z_fuid = acep[i].a_who;
/*
* Make sure ACE is valid
*/
if (zfs_ace_valid(obj_mode, aclp, aceptr->z_type,
aceptr->z_flags) != B_TRUE)
return (SET_ERROR(EINVAL));
}
*size = (caddr_t)aceptr - (caddr_t)z_acl;
return (0);
}
/*
* convert old ACL format to new
*/
void
zfs_acl_xform(znode_t *zp, zfs_acl_t *aclp, cred_t *cr)
{
zfs_oldace_t *oldaclp;
int i;
uint16_t type, iflags;
uint32_t access_mask;
uint64_t who;
void *cookie = NULL;
zfs_acl_node_t *newaclnode;
ASSERT(aclp->z_version == ZFS_ACL_VERSION_INITIAL);
/*
* First create the ACE in a contiguous piece of memory
* for zfs_copy_ace_2_fuid().
*
* We only convert an ACL once, so this won't happen
* every time.
*/
oldaclp = kmem_alloc(sizeof (zfs_oldace_t) * aclp->z_acl_count,
KM_SLEEP);
i = 0;
while ((cookie = zfs_acl_next_ace(aclp, cookie, &who,
&access_mask, &iflags, &type))) {
oldaclp[i].z_flags = iflags;
oldaclp[i].z_type = type;
oldaclp[i].z_fuid = who;
oldaclp[i++].z_access_mask = access_mask;
}
newaclnode = zfs_acl_node_alloc(aclp->z_acl_count *
sizeof (zfs_object_ace_t));
aclp->z_ops = &zfs_acl_fuid_ops;
VERIFY(zfs_copy_ace_2_fuid(ZTOZSB(zp), ZTOI(zp)->i_mode,
aclp, oldaclp, newaclnode->z_acldata, aclp->z_acl_count,
&newaclnode->z_size, NULL, cr) == 0);
newaclnode->z_ace_count = aclp->z_acl_count;
aclp->z_version = ZFS_ACL_VERSION;
kmem_free(oldaclp, aclp->z_acl_count * sizeof (zfs_oldace_t));
/*
* Release all previous ACL nodes
*/
zfs_acl_release_nodes(aclp);
list_insert_head(&aclp->z_acl, newaclnode);
aclp->z_acl_bytes = newaclnode->z_size;
aclp->z_acl_count = newaclnode->z_ace_count;
}
/*
* Convert unix access mask to v4 access mask
*/
static uint32_t
zfs_unix_to_v4(uint32_t access_mask)
{
uint32_t new_mask = 0;
if (access_mask & S_IXOTH)
new_mask |= ACE_EXECUTE;
if (access_mask & S_IWOTH)
new_mask |= ACE_WRITE_DATA;
if (access_mask & S_IROTH)
new_mask |= ACE_READ_DATA;
return (new_mask);
}
static void
zfs_set_ace(zfs_acl_t *aclp, void *acep, uint32_t access_mask,
uint16_t access_type, uint64_t fuid, uint16_t entry_type)
{
uint16_t type = entry_type & ACE_TYPE_FLAGS;
aclp->z_ops->ace_mask_set(acep, access_mask);
aclp->z_ops->ace_type_set(acep, access_type);
aclp->z_ops->ace_flags_set(acep, entry_type);
if ((type != ACE_OWNER && type != OWNING_GROUP &&
type != ACE_EVERYONE))
aclp->z_ops->ace_who_set(acep, fuid);
}
/*
* Determine mode of file based on ACL.
*/
uint64_t
zfs_mode_compute(uint64_t fmode, zfs_acl_t *aclp,
uint64_t *pflags, uint64_t fuid, uint64_t fgid)
{
int entry_type;
mode_t mode;
mode_t seen = 0;
zfs_ace_hdr_t *acep = NULL;
uint64_t who;
uint16_t iflags, type;
uint32_t access_mask;
boolean_t an_exec_denied = B_FALSE;
mode = (fmode & (S_IFMT | S_ISUID | S_ISGID | S_ISVTX));
while ((acep = zfs_acl_next_ace(aclp, acep, &who,
&access_mask, &iflags, &type))) {
if (!zfs_acl_valid_ace_type(type, iflags))
continue;
entry_type = (iflags & ACE_TYPE_FLAGS);
/*
* Skip over any inherit_only ACEs
*/
if (iflags & ACE_INHERIT_ONLY_ACE)
continue;
if (entry_type == ACE_OWNER || (entry_type == 0 &&
who == fuid)) {
if ((access_mask & ACE_READ_DATA) &&
(!(seen & S_IRUSR))) {
seen |= S_IRUSR;
if (type == ALLOW) {
mode |= S_IRUSR;
}
}
if ((access_mask & ACE_WRITE_DATA) &&
(!(seen & S_IWUSR))) {
seen |= S_IWUSR;
if (type == ALLOW) {
mode |= S_IWUSR;
}
}
if ((access_mask & ACE_EXECUTE) &&
(!(seen & S_IXUSR))) {
seen |= S_IXUSR;
if (type == ALLOW) {
mode |= S_IXUSR;
}
}
} else if (entry_type == OWNING_GROUP ||
(entry_type == ACE_IDENTIFIER_GROUP && who == fgid)) {
if ((access_mask & ACE_READ_DATA) &&
(!(seen & S_IRGRP))) {
seen |= S_IRGRP;
if (type == ALLOW) {
mode |= S_IRGRP;
}
}
if ((access_mask & ACE_WRITE_DATA) &&
(!(seen & S_IWGRP))) {
seen |= S_IWGRP;
if (type == ALLOW) {
mode |= S_IWGRP;
}
}
if ((access_mask & ACE_EXECUTE) &&
(!(seen & S_IXGRP))) {
seen |= S_IXGRP;
if (type == ALLOW) {
mode |= S_IXGRP;
}
}
} else if (entry_type == ACE_EVERYONE) {
if ((access_mask & ACE_READ_DATA)) {
if (!(seen & S_IRUSR)) {
seen |= S_IRUSR;
if (type == ALLOW) {
mode |= S_IRUSR;
}
}
if (!(seen & S_IRGRP)) {
seen |= S_IRGRP;
if (type == ALLOW) {
mode |= S_IRGRP;
}
}
if (!(seen & S_IROTH)) {
seen |= S_IROTH;
if (type == ALLOW) {
mode |= S_IROTH;
}
}
}
if ((access_mask & ACE_WRITE_DATA)) {
if (!(seen & S_IWUSR)) {
seen |= S_IWUSR;
if (type == ALLOW) {
mode |= S_IWUSR;
}
}
if (!(seen & S_IWGRP)) {
seen |= S_IWGRP;
if (type == ALLOW) {
mode |= S_IWGRP;
}
}
if (!(seen & S_IWOTH)) {
seen |= S_IWOTH;
if (type == ALLOW) {
mode |= S_IWOTH;
}
}
}
if ((access_mask & ACE_EXECUTE)) {
if (!(seen & S_IXUSR)) {
seen |= S_IXUSR;
if (type == ALLOW) {
mode |= S_IXUSR;
}
}
if (!(seen & S_IXGRP)) {
seen |= S_IXGRP;
if (type == ALLOW) {
mode |= S_IXGRP;
}
}
if (!(seen & S_IXOTH)) {
seen |= S_IXOTH;
if (type == ALLOW) {
mode |= S_IXOTH;
}
}
}
} else {
/*
* Only care if this IDENTIFIER_GROUP or
* USER ACE denies execute access to someone,
* mode is not affected
*/
if ((access_mask & ACE_EXECUTE) && type == DENY)
an_exec_denied = B_TRUE;
}
}
/*
* Failure to allow is effectively a deny, so execute permission
* is denied if it was never mentioned or if we explicitly
* weren't allowed it.
*/
if (!an_exec_denied &&
((seen & ALL_MODE_EXECS) != ALL_MODE_EXECS ||
(mode & ALL_MODE_EXECS) != ALL_MODE_EXECS))
an_exec_denied = B_TRUE;
if (an_exec_denied)
*pflags &= ~ZFS_NO_EXECS_DENIED;
else
*pflags |= ZFS_NO_EXECS_DENIED;
return (mode);
}
/*
* Read an external acl object. If the intent is to modify, always
* create a new acl and leave any cached acl in place.
*/
int
zfs_acl_node_read(struct znode *zp, boolean_t have_lock, zfs_acl_t **aclpp,
boolean_t will_modify)
{
zfs_acl_t *aclp;
int aclsize = 0;
int acl_count = 0;
zfs_acl_node_t *aclnode;
zfs_acl_phys_t znode_acl;
int version;
int error;
boolean_t drop_lock = B_FALSE;
ASSERT(MUTEX_HELD(&zp->z_acl_lock));
if (zp->z_acl_cached && !will_modify) {
*aclpp = zp->z_acl_cached;
return (0);
}
/*
* close race where znode could be upgrade while trying to
* read the znode attributes.
*
* But this could only happen if the file isn't already an SA
* znode
*/
if (!zp->z_is_sa && !have_lock) {
mutex_enter(&zp->z_lock);
drop_lock = B_TRUE;
}
version = zfs_znode_acl_version(zp);
if ((error = zfs_acl_znode_info(zp, &aclsize,
&acl_count, &znode_acl)) != 0) {
goto done;
}
aclp = zfs_acl_alloc(version);
aclp->z_acl_count = acl_count;
aclp->z_acl_bytes = aclsize;
aclnode = zfs_acl_node_alloc(aclsize);
aclnode->z_ace_count = aclp->z_acl_count;
aclnode->z_size = aclsize;
if (!zp->z_is_sa) {
if (znode_acl.z_acl_extern_obj) {
error = dmu_read(ZTOZSB(zp)->z_os,
znode_acl.z_acl_extern_obj, 0, aclnode->z_size,
aclnode->z_acldata, DMU_READ_PREFETCH);
} else {
bcopy(znode_acl.z_ace_data, aclnode->z_acldata,
aclnode->z_size);
}
} else {
error = sa_lookup(zp->z_sa_hdl, SA_ZPL_DACL_ACES(ZTOZSB(zp)),
aclnode->z_acldata, aclnode->z_size);
}
if (error != 0) {
zfs_acl_free(aclp);
zfs_acl_node_free(aclnode);
/* convert checksum errors into IO errors */
if (error == ECKSUM)
error = SET_ERROR(EIO);
goto done;
}
list_insert_head(&aclp->z_acl, aclnode);
*aclpp = aclp;
if (!will_modify)
zp->z_acl_cached = aclp;
done:
if (drop_lock)
mutex_exit(&zp->z_lock);
return (error);
}
/*ARGSUSED*/
void
zfs_acl_data_locator(void **dataptr, uint32_t *length, uint32_t buflen,
boolean_t start, void *userdata)
{
zfs_acl_locator_cb_t *cb = (zfs_acl_locator_cb_t *)userdata;
if (start) {
cb->cb_acl_node = list_head(&cb->cb_aclp->z_acl);
} else {
cb->cb_acl_node = list_next(&cb->cb_aclp->z_acl,
cb->cb_acl_node);
}
*dataptr = cb->cb_acl_node->z_acldata;
*length = cb->cb_acl_node->z_size;
}
int
zfs_acl_chown_setattr(znode_t *zp)
{
int error;
zfs_acl_t *aclp;
if (ZTOZSB(zp)->z_acl_type == ZFS_ACLTYPE_POSIX)
return (0);
ASSERT(MUTEX_HELD(&zp->z_lock));
ASSERT(MUTEX_HELD(&zp->z_acl_lock));
error = zfs_acl_node_read(zp, B_TRUE, &aclp, B_FALSE);
if (error == 0 && aclp->z_acl_count > 0)
zp->z_mode = ZTOI(zp)->i_mode =
zfs_mode_compute(zp->z_mode, aclp,
&zp->z_pflags, KUID_TO_SUID(ZTOI(zp)->i_uid),
KGID_TO_SGID(ZTOI(zp)->i_gid));
/*
* Some ZFS implementations (ZEVO) create neither a ZNODE_ACL
* nor a DACL_ACES SA in which case ENOENT is returned from
* zfs_acl_node_read() when the SA can't be located.
* Allow chown/chgrp to succeed in these cases rather than
* returning an error that makes no sense in the context of
* the caller.
*/
if (error == ENOENT)
return (0);
return (error);
}
typedef struct trivial_acl {
uint32_t allow0; /* allow mask for bits only in owner */
uint32_t deny1; /* deny mask for bits not in owner */
uint32_t deny2; /* deny mask for bits not in group */
uint32_t owner; /* allow mask matching mode */
uint32_t group; /* allow mask matching mode */
uint32_t everyone; /* allow mask matching mode */
} trivial_acl_t;
static void
acl_trivial_access_masks(mode_t mode, boolean_t isdir, trivial_acl_t *masks)
{
uint32_t read_mask = ACE_READ_DATA;
uint32_t write_mask = ACE_WRITE_DATA|ACE_APPEND_DATA;
uint32_t execute_mask = ACE_EXECUTE;
if (isdir)
write_mask |= ACE_DELETE_CHILD;
masks->deny1 = 0;
if (!(mode & S_IRUSR) && (mode & (S_IRGRP|S_IROTH)))
masks->deny1 |= read_mask;
if (!(mode & S_IWUSR) && (mode & (S_IWGRP|S_IWOTH)))
masks->deny1 |= write_mask;
if (!(mode & S_IXUSR) && (mode & (S_IXGRP|S_IXOTH)))
masks->deny1 |= execute_mask;
masks->deny2 = 0;
if (!(mode & S_IRGRP) && (mode & S_IROTH))
masks->deny2 |= read_mask;
if (!(mode & S_IWGRP) && (mode & S_IWOTH))
masks->deny2 |= write_mask;
if (!(mode & S_IXGRP) && (mode & S_IXOTH))
masks->deny2 |= execute_mask;
masks->allow0 = 0;
if ((mode & S_IRUSR) && (!(mode & S_IRGRP) && (mode & S_IROTH)))
masks->allow0 |= read_mask;
if ((mode & S_IWUSR) && (!(mode & S_IWGRP) && (mode & S_IWOTH)))
masks->allow0 |= write_mask;
if ((mode & S_IXUSR) && (!(mode & S_IXGRP) && (mode & S_IXOTH)))
masks->allow0 |= execute_mask;
masks->owner = ACE_WRITE_ATTRIBUTES|ACE_WRITE_OWNER|ACE_WRITE_ACL|
ACE_WRITE_NAMED_ATTRS|ACE_READ_ACL|ACE_READ_ATTRIBUTES|
ACE_READ_NAMED_ATTRS|ACE_SYNCHRONIZE;
if (mode & S_IRUSR)
masks->owner |= read_mask;
if (mode & S_IWUSR)
masks->owner |= write_mask;
if (mode & S_IXUSR)
masks->owner |= execute_mask;
masks->group = ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_READ_NAMED_ATTRS|
ACE_SYNCHRONIZE;
if (mode & S_IRGRP)
masks->group |= read_mask;
if (mode & S_IWGRP)
masks->group |= write_mask;
if (mode & S_IXGRP)
masks->group |= execute_mask;
masks->everyone = ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_READ_NAMED_ATTRS|
ACE_SYNCHRONIZE;
if (mode & S_IROTH)
masks->everyone |= read_mask;
if (mode & S_IWOTH)
masks->everyone |= write_mask;
if (mode & S_IXOTH)
masks->everyone |= execute_mask;
}
/*
* ace_trivial:
* determine whether an ace_t acl is trivial
*
* Trivialness implies that the acl is composed of only
* owner, group, everyone entries. ACL can't
* have read_acl denied, and write_owner/write_acl/write_attributes
* can only be owner@ entry.
*/
static int
ace_trivial_common(void *acep, int aclcnt,
uint64_t (*walk)(void *, uint64_t, int aclcnt,
uint16_t *, uint16_t *, uint32_t *))
{
uint16_t flags;
uint32_t mask;
uint16_t type;
uint64_t cookie = 0;
while ((cookie = walk(acep, cookie, aclcnt, &flags, &type, &mask))) {
switch (flags & ACE_TYPE_FLAGS) {
case ACE_OWNER:
case ACE_GROUP|ACE_IDENTIFIER_GROUP:
case ACE_EVERYONE:
break;
default:
return (1);
}
if (flags & (ACE_FILE_INHERIT_ACE|
ACE_DIRECTORY_INHERIT_ACE|ACE_NO_PROPAGATE_INHERIT_ACE|
ACE_INHERIT_ONLY_ACE))
return (1);
/*
* Special check for some special bits
*
* Don't allow anybody to deny reading basic
* attributes or a files ACL.
*/
if ((mask & (ACE_READ_ACL|ACE_READ_ATTRIBUTES)) &&
(type == ACE_ACCESS_DENIED_ACE_TYPE))
return (1);
/*
* Delete permission is never set by default
*/
if (mask & ACE_DELETE)
return (1);
/*
* Child delete permission should be accompanied by write
*/
if ((mask & ACE_DELETE_CHILD) && !(mask & ACE_WRITE_DATA))
return (1);
/*
* only allow owner@ to have
* write_acl/write_owner/write_attributes/write_xattr/
*/
if (type == ACE_ACCESS_ALLOWED_ACE_TYPE &&
(!(flags & ACE_OWNER) && (mask &
(ACE_WRITE_OWNER|ACE_WRITE_ACL| ACE_WRITE_ATTRIBUTES|
ACE_WRITE_NAMED_ATTRS))))
return (1);
}
return (0);
}
/*
* common code for setting ACLs.
*
* This function is called from zfs_mode_update, zfs_perm_init, and zfs_setacl.
* zfs_setacl passes a non-NULL inherit pointer (ihp) to indicate that it's
* already checked the acl and knows whether to inherit.
*/
int
zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx)
{
int error;
zfsvfs_t *zfsvfs = ZTOZSB(zp);
dmu_object_type_t otype;
zfs_acl_locator_cb_t locate = { 0 };
uint64_t mode;
sa_bulk_attr_t bulk[5];
uint64_t ctime[2];
int count = 0;
zfs_acl_phys_t acl_phys;
mode = zp->z_mode;
mode = zfs_mode_compute(mode, aclp, &zp->z_pflags,
KUID_TO_SUID(ZTOI(zp)->i_uid), KGID_TO_SGID(ZTOI(zp)->i_gid));
zp->z_mode = ZTOI(zp)->i_mode = mode;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
&mode, sizeof (mode));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, sizeof (zp->z_pflags));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
&ctime, sizeof (ctime));
if (zp->z_acl_cached) {
zfs_acl_free(zp->z_acl_cached);
zp->z_acl_cached = NULL;
}
/*
* Upgrade needed?
*/
if (!zfsvfs->z_use_fuids) {
otype = DMU_OT_OLDACL;
} else {
if ((aclp->z_version == ZFS_ACL_VERSION_INITIAL) &&
(zfsvfs->z_version >= ZPL_VERSION_FUID))
zfs_acl_xform(zp, aclp, cr);
ASSERT(aclp->z_version >= ZFS_ACL_VERSION_FUID);
otype = DMU_OT_ACL;
}
/*
* Arrgh, we have to handle old on disk format
* as well as newer (preferred) SA format.
*/
if (zp->z_is_sa) { /* the easy case, just update the ACL attribute */
locate.cb_aclp = aclp;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_DACL_ACES(zfsvfs),
zfs_acl_data_locator, &locate, aclp->z_acl_bytes);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_DACL_COUNT(zfsvfs),
NULL, &aclp->z_acl_count, sizeof (uint64_t));
} else { /* Painful legacy way */
zfs_acl_node_t *aclnode;
uint64_t off = 0;
uint64_t aoid;
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zfsvfs),
&acl_phys, sizeof (acl_phys))) != 0)
return (error);
aoid = acl_phys.z_acl_extern_obj;
if (aclp->z_acl_bytes > ZFS_ACE_SPACE) {
/*
* If ACL was previously external and we are now
* converting to new ACL format then release old
* ACL object and create a new one.
*/
if (aoid &&
aclp->z_version != acl_phys.z_acl_version) {
error = dmu_object_free(zfsvfs->z_os, aoid, tx);
if (error)
return (error);
aoid = 0;
}
if (aoid == 0) {
aoid = dmu_object_alloc(zfsvfs->z_os,
otype, aclp->z_acl_bytes,
otype == DMU_OT_ACL ?
DMU_OT_SYSACL : DMU_OT_NONE,
otype == DMU_OT_ACL ?
DN_OLD_MAX_BONUSLEN : 0, tx);
} else {
(void) dmu_object_set_blocksize(zfsvfs->z_os,
aoid, aclp->z_acl_bytes, 0, tx);
}
acl_phys.z_acl_extern_obj = aoid;
for (aclnode = list_head(&aclp->z_acl); aclnode;
aclnode = list_next(&aclp->z_acl, aclnode)) {
if (aclnode->z_ace_count == 0)
continue;
dmu_write(zfsvfs->z_os, aoid, off,
aclnode->z_size, aclnode->z_acldata, tx);
off += aclnode->z_size;
}
} else {
void *start = acl_phys.z_ace_data;
/*
* Migrating back embedded?
*/
if (acl_phys.z_acl_extern_obj) {
error = dmu_object_free(zfsvfs->z_os,
acl_phys.z_acl_extern_obj, tx);
if (error)
return (error);
acl_phys.z_acl_extern_obj = 0;
}
for (aclnode = list_head(&aclp->z_acl); aclnode;
aclnode = list_next(&aclp->z_acl, aclnode)) {
if (aclnode->z_ace_count == 0)
continue;
bcopy(aclnode->z_acldata, start,
aclnode->z_size);
start = (caddr_t)start + aclnode->z_size;
}
}
/*
* If Old version then swap count/bytes to match old
* layout of znode_acl_phys_t.
*/
if (aclp->z_version == ZFS_ACL_VERSION_INITIAL) {
acl_phys.z_acl_size = aclp->z_acl_count;
acl_phys.z_acl_count = aclp->z_acl_bytes;
} else {
acl_phys.z_acl_size = aclp->z_acl_bytes;
acl_phys.z_acl_count = aclp->z_acl_count;
}
acl_phys.z_acl_version = aclp->z_version;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ZNODE_ACL(zfsvfs), NULL,
&acl_phys, sizeof (acl_phys));
}
/*
* Replace ACL wide bits, but first clear them.
*/
zp->z_pflags &= ~ZFS_ACL_WIDE_FLAGS;
zp->z_pflags |= aclp->z_hints;
if (ace_trivial_common(aclp, 0, zfs_ace_walk) == 0)
zp->z_pflags |= ZFS_ACL_TRIVIAL;
zfs_tstamp_update_setup(zp, STATE_CHANGED, NULL, ctime);
return (sa_bulk_update(zp->z_sa_hdl, bulk, count, tx));
}
static void
zfs_acl_chmod(boolean_t isdir, uint64_t mode, boolean_t split, boolean_t trim,
zfs_acl_t *aclp)
{
void *acep = NULL;
uint64_t who;
int new_count, new_bytes;
int ace_size;
int entry_type;
uint16_t iflags, type;
uint32_t access_mask;
zfs_acl_node_t *newnode;
size_t abstract_size = aclp->z_ops->ace_abstract_size();
void *zacep;
trivial_acl_t masks;
new_count = new_bytes = 0;
acl_trivial_access_masks((mode_t)mode, isdir, &masks);
newnode = zfs_acl_node_alloc((abstract_size * 6) + aclp->z_acl_bytes);
zacep = newnode->z_acldata;
if (masks.allow0) {
zfs_set_ace(aclp, zacep, masks.allow0, ALLOW, -1, ACE_OWNER);
zacep = (void *)((uintptr_t)zacep + abstract_size);
new_count++;
new_bytes += abstract_size;
}
if (masks.deny1) {
zfs_set_ace(aclp, zacep, masks.deny1, DENY, -1, ACE_OWNER);
zacep = (void *)((uintptr_t)zacep + abstract_size);
new_count++;
new_bytes += abstract_size;
}
if (masks.deny2) {
zfs_set_ace(aclp, zacep, masks.deny2, DENY, -1, OWNING_GROUP);
zacep = (void *)((uintptr_t)zacep + abstract_size);
new_count++;
new_bytes += abstract_size;
}
while ((acep = zfs_acl_next_ace(aclp, acep, &who, &access_mask,
&iflags, &type))) {
entry_type = (iflags & ACE_TYPE_FLAGS);
/*
* ACEs used to represent the file mode may be divided
* into an equivalent pair of inherit-only and regular
* ACEs, if they are inheritable.
* Skip regular ACEs, which are replaced by the new mode.
*/
if (split && (entry_type == ACE_OWNER ||
entry_type == OWNING_GROUP ||
entry_type == ACE_EVERYONE)) {
if (!isdir || !(iflags &
(ACE_FILE_INHERIT_ACE|ACE_DIRECTORY_INHERIT_ACE)))
continue;
/*
* We preserve owner@, group@, or @everyone
* permissions, if they are inheritable, by
* copying them to inherit_only ACEs. This
* prevents inheritable permissions from being
* altered along with the file mode.
*/
iflags |= ACE_INHERIT_ONLY_ACE;
}
/*
* If this ACL has any inheritable ACEs, mark that in
* the hints (which are later masked into the pflags)
* so create knows to do inheritance.
*/
if (isdir && (iflags &
(ACE_FILE_INHERIT_ACE|ACE_DIRECTORY_INHERIT_ACE)))
aclp->z_hints |= ZFS_INHERIT_ACE;
if ((type != ALLOW && type != DENY) ||
(iflags & ACE_INHERIT_ONLY_ACE)) {
switch (type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
aclp->z_hints |= ZFS_ACL_OBJ_ACE;
break;
}
} else {
/*
* Limit permissions to be no greater than
* group permissions.
* The "aclinherit" and "aclmode" properties
* affect policy for create and chmod(2),
* respectively.
*/
if ((type == ALLOW) && trim)
access_mask &= masks.group;
}
zfs_set_ace(aclp, zacep, access_mask, type, who, iflags);
ace_size = aclp->z_ops->ace_size(acep);
zacep = (void *)((uintptr_t)zacep + ace_size);
new_count++;
new_bytes += ace_size;
}
zfs_set_ace(aclp, zacep, masks.owner, ALLOW, -1, ACE_OWNER);
zacep = (void *)((uintptr_t)zacep + abstract_size);
zfs_set_ace(aclp, zacep, masks.group, ALLOW, -1, OWNING_GROUP);
zacep = (void *)((uintptr_t)zacep + abstract_size);
zfs_set_ace(aclp, zacep, masks.everyone, ALLOW, -1, ACE_EVERYONE);
new_count += 3;
new_bytes += abstract_size * 3;
zfs_acl_release_nodes(aclp);
aclp->z_acl_count = new_count;
aclp->z_acl_bytes = new_bytes;
newnode->z_ace_count = new_count;
newnode->z_size = new_bytes;
list_insert_tail(&aclp->z_acl, newnode);
}
int
zfs_acl_chmod_setattr(znode_t *zp, zfs_acl_t **aclp, uint64_t mode)
{
int error = 0;
mutex_enter(&zp->z_acl_lock);
mutex_enter(&zp->z_lock);
if (ZTOZSB(zp)->z_acl_mode == ZFS_ACL_DISCARD)
*aclp = zfs_acl_alloc(zfs_acl_version_zp(zp));
else
error = zfs_acl_node_read(zp, B_TRUE, aclp, B_TRUE);
if (error == 0) {
(*aclp)->z_hints = zp->z_pflags & V4_ACL_WIDE_FLAGS;
zfs_acl_chmod(S_ISDIR(ZTOI(zp)->i_mode), mode, B_TRUE,
(ZTOZSB(zp)->z_acl_mode == ZFS_ACL_GROUPMASK), *aclp);
}
mutex_exit(&zp->z_lock);
mutex_exit(&zp->z_acl_lock);
return (error);
}
/*
* Should ACE be inherited?
*/
static int
zfs_ace_can_use(umode_t obj_mode, uint16_t acep_flags)
{
int iflags = (acep_flags & 0xf);
if (S_ISDIR(obj_mode) && (iflags & ACE_DIRECTORY_INHERIT_ACE))
return (1);
else if (iflags & ACE_FILE_INHERIT_ACE)
return (!(S_ISDIR(obj_mode) &&
(iflags & ACE_NO_PROPAGATE_INHERIT_ACE)));
return (0);
}
/*
* inherit inheritable ACEs from parent
*/
static zfs_acl_t *
zfs_acl_inherit(zfsvfs_t *zfsvfs, umode_t va_mode, zfs_acl_t *paclp,
uint64_t mode, boolean_t *need_chmod)
{
void *pacep = NULL;
void *acep;
zfs_acl_node_t *aclnode;
zfs_acl_t *aclp = NULL;
uint64_t who;
uint32_t access_mask;
uint16_t iflags, newflags, type;
size_t ace_size;
void *data1, *data2;
size_t data1sz, data2sz;
uint_t aclinherit;
boolean_t isdir = S_ISDIR(va_mode);
boolean_t isreg = S_ISREG(va_mode);
*need_chmod = B_TRUE;
aclp = zfs_acl_alloc(paclp->z_version);
aclinherit = zfsvfs->z_acl_inherit;
if (aclinherit == ZFS_ACL_DISCARD || S_ISLNK(va_mode))
return (aclp);
while ((pacep = zfs_acl_next_ace(paclp, pacep, &who,
&access_mask, &iflags, &type))) {
/*
* don't inherit bogus ACEs
*/
if (!zfs_acl_valid_ace_type(type, iflags))
continue;
/*
* Check if ACE is inheritable by this vnode
*/
if ((aclinherit == ZFS_ACL_NOALLOW && type == ALLOW) ||
!zfs_ace_can_use(va_mode, iflags))
continue;
/*
* If owner@, group@, or everyone@ inheritable
* then zfs_acl_chmod() isn't needed.
*/
if ((aclinherit == ZFS_ACL_PASSTHROUGH ||
aclinherit == ZFS_ACL_PASSTHROUGH_X) &&
((iflags & (ACE_OWNER|ACE_EVERYONE)) ||
((iflags & OWNING_GROUP) == OWNING_GROUP)) &&
(isreg || (isdir && (iflags & ACE_DIRECTORY_INHERIT_ACE))))
*need_chmod = B_FALSE;
/*
* Strip inherited execute permission from file if
* not in mode
*/
if (aclinherit == ZFS_ACL_PASSTHROUGH_X && type == ALLOW &&
!isdir && ((mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0)) {
access_mask &= ~ACE_EXECUTE;
}
/*
* Strip write_acl and write_owner from permissions
* when inheriting an ACE
*/
if (aclinherit == ZFS_ACL_RESTRICTED && type == ALLOW) {
access_mask &= ~RESTRICTED_CLEAR;
}
ace_size = aclp->z_ops->ace_size(pacep);
aclnode = zfs_acl_node_alloc(ace_size);
list_insert_tail(&aclp->z_acl, aclnode);
acep = aclnode->z_acldata;
zfs_set_ace(aclp, acep, access_mask, type,
who, iflags|ACE_INHERITED_ACE);
/*
* Copy special opaque data if any
*/
if ((data1sz = paclp->z_ops->ace_data(pacep, &data1)) != 0) {
VERIFY((data2sz = aclp->z_ops->ace_data(acep,
&data2)) == data1sz);
bcopy(data1, data2, data2sz);
}
aclp->z_acl_count++;
aclnode->z_ace_count++;
aclp->z_acl_bytes += aclnode->z_size;
newflags = aclp->z_ops->ace_flags_get(acep);
/*
* If ACE is not to be inherited further, or if the vnode is
* not a directory, remove all inheritance flags
*/
if (!isdir || (iflags & ACE_NO_PROPAGATE_INHERIT_ACE)) {
newflags &= ~ALL_INHERIT;
aclp->z_ops->ace_flags_set(acep,
newflags|ACE_INHERITED_ACE);
continue;
}
/*
* This directory has an inheritable ACE
*/
aclp->z_hints |= ZFS_INHERIT_ACE;
/*
* If only FILE_INHERIT is set then turn on
* inherit_only
*/
if ((iflags & (ACE_FILE_INHERIT_ACE |
ACE_DIRECTORY_INHERIT_ACE)) == ACE_FILE_INHERIT_ACE) {
newflags |= ACE_INHERIT_ONLY_ACE;
aclp->z_ops->ace_flags_set(acep,
newflags|ACE_INHERITED_ACE);
} else {
newflags &= ~ACE_INHERIT_ONLY_ACE;
aclp->z_ops->ace_flags_set(acep,
newflags|ACE_INHERITED_ACE);
}
}
if (zfsvfs->z_acl_mode == ZFS_ACL_RESTRICTED &&
aclp->z_acl_count != 0) {
*need_chmod = B_FALSE;
}
return (aclp);
}
/*
* Create file system object initial permissions
* including inheritable ACEs.
* Also, create FUIDs for owner and group.
*/
int
zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr,
vsecattr_t *vsecp, zfs_acl_ids_t *acl_ids)
{
int error;
zfsvfs_t *zfsvfs = ZTOZSB(dzp);
zfs_acl_t *paclp;
gid_t gid = vap->va_gid;
boolean_t need_chmod = B_TRUE;
boolean_t trim = B_FALSE;
boolean_t inherited = B_FALSE;
bzero(acl_ids, sizeof (zfs_acl_ids_t));
acl_ids->z_mode = vap->va_mode;
if (vsecp)
if ((error = zfs_vsec_2_aclp(zfsvfs, vap->va_mode, vsecp,
cr, &acl_ids->z_fuidp, &acl_ids->z_aclp)) != 0)
return (error);
acl_ids->z_fuid = vap->va_uid;
acl_ids->z_fgid = vap->va_gid;
#ifdef HAVE_KSID
/*
* Determine uid and gid.
*/
if ((flag & IS_ROOT_NODE) || zfsvfs->z_replay ||
((flag & IS_XATTR) && (S_ISDIR(vap->va_mode)))) {
acl_ids->z_fuid = zfs_fuid_create(zfsvfs, (uint64_t)vap->va_uid,
cr, ZFS_OWNER, &acl_ids->z_fuidp);
acl_ids->z_fgid = zfs_fuid_create(zfsvfs, (uint64_t)vap->va_gid,
cr, ZFS_GROUP, &acl_ids->z_fuidp);
gid = vap->va_gid;
} else {
acl_ids->z_fuid = zfs_fuid_create_cred(zfsvfs, ZFS_OWNER,
cr, &acl_ids->z_fuidp);
acl_ids->z_fgid = 0;
if (vap->va_mask & AT_GID) {
acl_ids->z_fgid = zfs_fuid_create(zfsvfs,
(uint64_t)vap->va_gid,
cr, ZFS_GROUP, &acl_ids->z_fuidp);
gid = vap->va_gid;
if (acl_ids->z_fgid != KGID_TO_SGID(ZTOI(dzp)->i_gid) &&
!groupmember(vap->va_gid, cr) &&
secpolicy_vnode_create_gid(cr) != 0)
acl_ids->z_fgid = 0;
}
if (acl_ids->z_fgid == 0) {
if (dzp->z_mode & S_ISGID) {
char *domain;
uint32_t rid;
acl_ids->z_fgid = KGID_TO_SGID(
ZTOI(dzp)->i_gid);
gid = zfs_fuid_map_id(zfsvfs, acl_ids->z_fgid,
cr, ZFS_GROUP);
if (zfsvfs->z_use_fuids &&
IS_EPHEMERAL(acl_ids->z_fgid)) {
domain = zfs_fuid_idx_domain(
&zfsvfs->z_fuid_idx,
FUID_INDEX(acl_ids->z_fgid));
rid = FUID_RID(acl_ids->z_fgid);
zfs_fuid_node_add(&acl_ids->z_fuidp,
domain, rid,
FUID_INDEX(acl_ids->z_fgid),
acl_ids->z_fgid, ZFS_GROUP);
}
} else {
acl_ids->z_fgid = zfs_fuid_create_cred(zfsvfs,
ZFS_GROUP, cr, &acl_ids->z_fuidp);
gid = crgetgid(cr);
}
}
}
#endif /* HAVE_KSID */
/*
* If we're creating a directory, and the parent directory has the
* set-GID bit set, set in on the new directory.
* Otherwise, if the user is neither privileged nor a member of the
* file's new group, clear the file's set-GID bit.
*/
if (!(flag & IS_ROOT_NODE) && (dzp->z_mode & S_ISGID) &&
(S_ISDIR(vap->va_mode))) {
acl_ids->z_mode |= S_ISGID;
} else {
if ((acl_ids->z_mode & S_ISGID) &&
secpolicy_vnode_setids_setgids(cr, gid) != 0)
acl_ids->z_mode &= ~S_ISGID;
}
if (acl_ids->z_aclp == NULL) {
mutex_enter(&dzp->z_acl_lock);
mutex_enter(&dzp->z_lock);
if (!(flag & IS_ROOT_NODE) &&
(dzp->z_pflags & ZFS_INHERIT_ACE) &&
!(dzp->z_pflags & ZFS_XATTR)) {
VERIFY(0 == zfs_acl_node_read(dzp, B_TRUE,
&paclp, B_FALSE));
acl_ids->z_aclp = zfs_acl_inherit(zfsvfs,
vap->va_mode, paclp, acl_ids->z_mode, &need_chmod);
inherited = B_TRUE;
} else {
acl_ids->z_aclp =
zfs_acl_alloc(zfs_acl_version_zp(dzp));
acl_ids->z_aclp->z_hints |= ZFS_ACL_TRIVIAL;
}
mutex_exit(&dzp->z_lock);
mutex_exit(&dzp->z_acl_lock);
if (need_chmod) {
if (S_ISDIR(vap->va_mode))
acl_ids->z_aclp->z_hints |=
ZFS_ACL_AUTO_INHERIT;
if (zfsvfs->z_acl_mode == ZFS_ACL_GROUPMASK &&
zfsvfs->z_acl_inherit != ZFS_ACL_PASSTHROUGH &&
zfsvfs->z_acl_inherit != ZFS_ACL_PASSTHROUGH_X)
trim = B_TRUE;
zfs_acl_chmod(vap->va_mode, acl_ids->z_mode, B_FALSE,
trim, acl_ids->z_aclp);
}
}
if (inherited || vsecp) {
acl_ids->z_mode = zfs_mode_compute(acl_ids->z_mode,
acl_ids->z_aclp, &acl_ids->z_aclp->z_hints,
acl_ids->z_fuid, acl_ids->z_fgid);
if (ace_trivial_common(acl_ids->z_aclp, 0, zfs_ace_walk) == 0)
acl_ids->z_aclp->z_hints |= ZFS_ACL_TRIVIAL;
}
return (0);
}
/*
* Free ACL and fuid_infop, but not the acl_ids structure
*/
void
zfs_acl_ids_free(zfs_acl_ids_t *acl_ids)
{
if (acl_ids->z_aclp)
zfs_acl_free(acl_ids->z_aclp);
if (acl_ids->z_fuidp)
zfs_fuid_info_free(acl_ids->z_fuidp);
acl_ids->z_aclp = NULL;
acl_ids->z_fuidp = NULL;
}
boolean_t
zfs_acl_ids_overquota(zfsvfs_t *zv, zfs_acl_ids_t *acl_ids, uint64_t projid)
{
return (zfs_id_overquota(zv, DMU_USERUSED_OBJECT, acl_ids->z_fuid) ||
zfs_id_overquota(zv, DMU_GROUPUSED_OBJECT, acl_ids->z_fgid) ||
(projid != ZFS_DEFAULT_PROJID && projid != ZFS_INVALID_PROJID &&
zfs_id_overquota(zv, DMU_PROJECTUSED_OBJECT, projid)));
}
/*
* Retrieve a file's ACL
*/
int
zfs_getacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
{
zfs_acl_t *aclp;
ulong_t mask;
int error;
int count = 0;
int largeace = 0;
mask = vsecp->vsa_mask & (VSA_ACE | VSA_ACECNT |
VSA_ACE_ACLFLAGS | VSA_ACE_ALLTYPES);
if (mask == 0)
return (SET_ERROR(ENOSYS));
if ((error = zfs_zaccess(zp, ACE_READ_ACL, 0, skipaclchk, cr)))
return (error);
mutex_enter(&zp->z_acl_lock);
error = zfs_acl_node_read(zp, B_FALSE, &aclp, B_FALSE);
if (error != 0) {
mutex_exit(&zp->z_acl_lock);
return (error);
}
/*
* Scan ACL to determine number of ACEs
*/
if ((zp->z_pflags & ZFS_ACL_OBJ_ACE) && !(mask & VSA_ACE_ALLTYPES)) {
void *zacep = NULL;
uint64_t who;
uint32_t access_mask;
uint16_t type, iflags;
while ((zacep = zfs_acl_next_ace(aclp, zacep,
&who, &access_mask, &iflags, &type))) {
switch (type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
largeace++;
continue;
default:
count++;
}
}
vsecp->vsa_aclcnt = count;
} else
count = (int)aclp->z_acl_count;
if (mask & VSA_ACECNT) {
vsecp->vsa_aclcnt = count;
}
if (mask & VSA_ACE) {
size_t aclsz;
aclsz = count * sizeof (ace_t) +
sizeof (ace_object_t) * largeace;
vsecp->vsa_aclentp = kmem_alloc(aclsz, KM_SLEEP);
vsecp->vsa_aclentsz = aclsz;
if (aclp->z_version == ZFS_ACL_VERSION_FUID)
zfs_copy_fuid_2_ace(ZTOZSB(zp), aclp, cr,
vsecp->vsa_aclentp, !(mask & VSA_ACE_ALLTYPES));
else {
zfs_acl_node_t *aclnode;
void *start = vsecp->vsa_aclentp;
for (aclnode = list_head(&aclp->z_acl); aclnode;
aclnode = list_next(&aclp->z_acl, aclnode)) {
bcopy(aclnode->z_acldata, start,
aclnode->z_size);
start = (caddr_t)start + aclnode->z_size;
}
ASSERT((caddr_t)start - (caddr_t)vsecp->vsa_aclentp ==
aclp->z_acl_bytes);
}
}
if (mask & VSA_ACE_ACLFLAGS) {
vsecp->vsa_aclflags = 0;
if (zp->z_pflags & ZFS_ACL_DEFAULTED)
vsecp->vsa_aclflags |= ACL_DEFAULTED;
if (zp->z_pflags & ZFS_ACL_PROTECTED)
vsecp->vsa_aclflags |= ACL_PROTECTED;
if (zp->z_pflags & ZFS_ACL_AUTO_INHERIT)
vsecp->vsa_aclflags |= ACL_AUTO_INHERIT;
}
mutex_exit(&zp->z_acl_lock);
return (0);
}
int
zfs_vsec_2_aclp(zfsvfs_t *zfsvfs, umode_t obj_mode,
vsecattr_t *vsecp, cred_t *cr, zfs_fuid_info_t **fuidp, zfs_acl_t **zaclp)
{
zfs_acl_t *aclp;
zfs_acl_node_t *aclnode;
int aclcnt = vsecp->vsa_aclcnt;
int error;
if (vsecp->vsa_aclcnt > MAX_ACL_ENTRIES || vsecp->vsa_aclcnt <= 0)
return (SET_ERROR(EINVAL));
aclp = zfs_acl_alloc(zfs_acl_version(zfsvfs->z_version));
aclp->z_hints = 0;
aclnode = zfs_acl_node_alloc(aclcnt * sizeof (zfs_object_ace_t));
if (aclp->z_version == ZFS_ACL_VERSION_INITIAL) {
if ((error = zfs_copy_ace_2_oldace(obj_mode, aclp,
(ace_t *)vsecp->vsa_aclentp, aclnode->z_acldata,
aclcnt, &aclnode->z_size)) != 0) {
zfs_acl_free(aclp);
zfs_acl_node_free(aclnode);
return (error);
}
} else {
if ((error = zfs_copy_ace_2_fuid(zfsvfs, obj_mode, aclp,
vsecp->vsa_aclentp, aclnode->z_acldata, aclcnt,
&aclnode->z_size, fuidp, cr)) != 0) {
zfs_acl_free(aclp);
zfs_acl_node_free(aclnode);
return (error);
}
}
aclp->z_acl_bytes = aclnode->z_size;
aclnode->z_ace_count = aclcnt;
aclp->z_acl_count = aclcnt;
list_insert_head(&aclp->z_acl, aclnode);
/*
* If flags are being set then add them to z_hints
*/
if (vsecp->vsa_mask & VSA_ACE_ACLFLAGS) {
if (vsecp->vsa_aclflags & ACL_PROTECTED)
aclp->z_hints |= ZFS_ACL_PROTECTED;
if (vsecp->vsa_aclflags & ACL_DEFAULTED)
aclp->z_hints |= ZFS_ACL_DEFAULTED;
if (vsecp->vsa_aclflags & ACL_AUTO_INHERIT)
aclp->z_hints |= ZFS_ACL_AUTO_INHERIT;
}
*zaclp = aclp;
return (0);
}
/*
* Set a file's ACL
*/
int
zfs_setacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
zilog_t *zilog = zfsvfs->z_log;
ulong_t mask = vsecp->vsa_mask & (VSA_ACE | VSA_ACECNT);
dmu_tx_t *tx;
int error;
zfs_acl_t *aclp;
zfs_fuid_info_t *fuidp = NULL;
boolean_t fuid_dirtied;
uint64_t acl_obj;
if (mask == 0)
return (SET_ERROR(ENOSYS));
if (zp->z_pflags & ZFS_IMMUTABLE)
return (SET_ERROR(EPERM));
if ((error = zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr)))
return (error);
error = zfs_vsec_2_aclp(zfsvfs, ZTOI(zp)->i_mode, vsecp, cr, &fuidp,
&aclp);
if (error)
return (error);
/*
* If ACL wide flags aren't being set then preserve any
* existing flags.
*/
if (!(vsecp->vsa_mask & VSA_ACE_ACLFLAGS)) {
aclp->z_hints |=
(zp->z_pflags & V4_ACL_WIDE_FLAGS);
}
top:
mutex_enter(&zp->z_acl_lock);
mutex_enter(&zp->z_lock);
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
fuid_dirtied = zfsvfs->z_fuid_dirty;
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
/*
* If old version and ACL won't fit in bonus and we aren't
* upgrading then take out necessary DMU holds
*/
if ((acl_obj = zfs_external_acl(zp)) != 0) {
if (zfsvfs->z_version >= ZPL_VERSION_FUID &&
zfs_znode_acl_version(zp) <= ZFS_ACL_VERSION_INITIAL) {
dmu_tx_hold_free(tx, acl_obj, 0,
DMU_OBJECT_END);
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
aclp->z_acl_bytes);
} else {
dmu_tx_hold_write(tx, acl_obj, 0, aclp->z_acl_bytes);
}
} else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, aclp->z_acl_bytes);
}
zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_NOWAIT);
if (error) {
mutex_exit(&zp->z_acl_lock);
mutex_exit(&zp->z_lock);
if (error == ERESTART) {
dmu_tx_wait(tx);
dmu_tx_abort(tx);
goto top;
}
dmu_tx_abort(tx);
zfs_acl_free(aclp);
return (error);
}
error = zfs_aclset_common(zp, aclp, cr, tx);
ASSERT(error == 0);
ASSERT(zp->z_acl_cached == NULL);
zp->z_acl_cached = aclp;
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
zfs_log_acl(zilog, tx, zp, vsecp, fuidp);
if (fuidp)
zfs_fuid_info_free(fuidp);
dmu_tx_commit(tx);
mutex_exit(&zp->z_lock);
mutex_exit(&zp->z_acl_lock);
return (error);
}
/*
* Check accesses of interest (AoI) against attributes of the dataset
* such as read-only. Returns zero if no AoI conflict with dataset
* attributes, otherwise an appropriate errno is returned.
*/
static int
zfs_zaccess_dataset_check(znode_t *zp, uint32_t v4_mode)
{
if ((v4_mode & WRITE_MASK) && (zfs_is_readonly(ZTOZSB(zp))) &&
(!Z_ISDEV(ZTOI(zp)->i_mode) ||
(Z_ISDEV(ZTOI(zp)->i_mode) && (v4_mode & WRITE_MASK_ATTRS)))) {
return (SET_ERROR(EROFS));
}
/*
* Intentionally allow ZFS_READONLY through here.
* See zfs_zaccess_common().
*/
if ((v4_mode & WRITE_MASK_DATA) &&
(zp->z_pflags & ZFS_IMMUTABLE)) {
return (SET_ERROR(EPERM));
}
if ((v4_mode & (ACE_DELETE | ACE_DELETE_CHILD)) &&
(zp->z_pflags & ZFS_NOUNLINK)) {
return (SET_ERROR(EPERM));
}
if (((v4_mode & (ACE_READ_DATA|ACE_EXECUTE)) &&
(zp->z_pflags & ZFS_AV_QUARANTINED))) {
return (SET_ERROR(EACCES));
}
return (0);
}
/*
* The primary usage of this function is to loop through all of the
* ACEs in the znode, determining what accesses of interest (AoI) to
* the caller are allowed or denied. The AoI are expressed as bits in
* the working_mode parameter. As each ACE is processed, bits covered
* by that ACE are removed from the working_mode. This removal
* facilitates two things. The first is that when the working mode is
* empty (= 0), we know we've looked at all the AoI. The second is
* that the ACE interpretation rules don't allow a later ACE to undo
* something granted or denied by an earlier ACE. Removing the
* discovered access or denial enforces this rule. At the end of
* processing the ACEs, all AoI that were found to be denied are
* placed into the working_mode, giving the caller a mask of denied
* accesses. Returns:
* 0 if all AoI granted
* EACCES if the denied mask is non-zero
* other error if abnormal failure (e.g., IO error)
*
* A secondary usage of the function is to determine if any of the
* AoI are granted. If an ACE grants any access in
* the working_mode, we immediately short circuit out of the function.
* This mode is chosen by setting anyaccess to B_TRUE. The
* working_mode is not a denied access mask upon exit if the function
* is used in this manner.
*/
static int
zfs_zaccess_aces_check(znode_t *zp, uint32_t *working_mode,
boolean_t anyaccess, cred_t *cr)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
zfs_acl_t *aclp;
int error;
uid_t uid = crgetuid(cr);
uint64_t who;
uint16_t type, iflags;
uint16_t entry_type;
uint32_t access_mask;
uint32_t deny_mask = 0;
zfs_ace_hdr_t *acep = NULL;
boolean_t checkit;
uid_t gowner;
uid_t fowner;
zfs_fuid_map_ids(zp, cr, &fowner, &gowner);
mutex_enter(&zp->z_acl_lock);
error = zfs_acl_node_read(zp, B_FALSE, &aclp, B_FALSE);
if (error != 0) {
mutex_exit(&zp->z_acl_lock);
return (error);
}
ASSERT(zp->z_acl_cached);
while ((acep = zfs_acl_next_ace(aclp, acep, &who, &access_mask,
&iflags, &type))) {
uint32_t mask_matched;
if (!zfs_acl_valid_ace_type(type, iflags))
continue;
if (S_ISDIR(ZTOI(zp)->i_mode) &&
(iflags & ACE_INHERIT_ONLY_ACE))
continue;
/* Skip ACE if it does not affect any AoI */
mask_matched = (access_mask & *working_mode);
if (!mask_matched)
continue;
entry_type = (iflags & ACE_TYPE_FLAGS);
checkit = B_FALSE;
switch (entry_type) {
case ACE_OWNER:
if (uid == fowner)
checkit = B_TRUE;
break;
case OWNING_GROUP:
who = gowner;
- /*FALLTHROUGH*/
+ /* FALLTHROUGH */
case ACE_IDENTIFIER_GROUP:
checkit = zfs_groupmember(zfsvfs, who, cr);
break;
case ACE_EVERYONE:
checkit = B_TRUE;
break;
/* USER Entry */
default:
if (entry_type == 0) {
uid_t newid;
newid = zfs_fuid_map_id(zfsvfs, who, cr,
ZFS_ACE_USER);
if (newid != IDMAP_WK_CREATOR_OWNER_UID &&
uid == newid)
checkit = B_TRUE;
break;
} else {
mutex_exit(&zp->z_acl_lock);
return (SET_ERROR(EIO));
}
}
if (checkit) {
if (type == DENY) {
DTRACE_PROBE3(zfs__ace__denies,
znode_t *, zp,
zfs_ace_hdr_t *, acep,
uint32_t, mask_matched);
deny_mask |= mask_matched;
} else {
DTRACE_PROBE3(zfs__ace__allows,
znode_t *, zp,
zfs_ace_hdr_t *, acep,
uint32_t, mask_matched);
if (anyaccess) {
mutex_exit(&zp->z_acl_lock);
return (0);
}
}
*working_mode &= ~mask_matched;
}
/* Are we done? */
if (*working_mode == 0)
break;
}
mutex_exit(&zp->z_acl_lock);
/* Put the found 'denies' back on the working mode */
if (deny_mask) {
*working_mode |= deny_mask;
return (SET_ERROR(EACCES));
} else if (*working_mode) {
return (-1);
}
return (0);
}
/*
* Return true if any access whatsoever granted, we don't actually
* care what access is granted.
*/
boolean_t
zfs_has_access(znode_t *zp, cred_t *cr)
{
uint32_t have = ACE_ALL_PERMS;
if (zfs_zaccess_aces_check(zp, &have, B_TRUE, cr) != 0) {
uid_t owner;
owner = zfs_fuid_map_id(ZTOZSB(zp),
KUID_TO_SUID(ZTOI(zp)->i_uid), cr, ZFS_OWNER);
return (secpolicy_vnode_any_access(cr, ZTOI(zp), owner) == 0);
}
return (B_TRUE);
}
static int
zfs_zaccess_common(znode_t *zp, uint32_t v4_mode, uint32_t *working_mode,
boolean_t *check_privs, boolean_t skipaclchk, cred_t *cr)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
int err;
*working_mode = v4_mode;
*check_privs = B_TRUE;
/*
* Short circuit empty requests
*/
if (v4_mode == 0 || zfsvfs->z_replay) {
*working_mode = 0;
return (0);
}
if ((err = zfs_zaccess_dataset_check(zp, v4_mode)) != 0) {
*check_privs = B_FALSE;
return (err);
}
/*
* The caller requested that the ACL check be skipped. This
* would only happen if the caller checked VOP_ACCESS() with a
* 32 bit ACE mask and already had the appropriate permissions.
*/
if (skipaclchk) {
*working_mode = 0;
return (0);
}
/*
* Note: ZFS_READONLY represents the "DOS R/O" attribute.
* When that flag is set, we should behave as if write access
* were not granted by anything in the ACL. In particular:
* We _must_ allow writes after opening the file r/w, then
* setting the DOS R/O attribute, and writing some more.
* (Similar to how you can write after fchmod(fd, 0444).)
*
* Therefore ZFS_READONLY is ignored in the dataset check
* above, and checked here as if part of the ACL check.
* Also note: DOS R/O is ignored for directories.
*/
if ((v4_mode & WRITE_MASK_DATA) &&
S_ISDIR(ZTOI(zp)->i_mode) &&
(zp->z_pflags & ZFS_READONLY)) {
return (SET_ERROR(EPERM));
}
return (zfs_zaccess_aces_check(zp, working_mode, B_FALSE, cr));
}
static int
zfs_zaccess_append(znode_t *zp, uint32_t *working_mode, boolean_t *check_privs,
cred_t *cr)
{
if (*working_mode != ACE_WRITE_DATA)
return (SET_ERROR(EACCES));
return (zfs_zaccess_common(zp, ACE_APPEND_DATA, working_mode,
check_privs, B_FALSE, cr));
}
int
zfs_fastaccesschk_execute(znode_t *zdp, cred_t *cr)
{
boolean_t owner = B_FALSE;
boolean_t groupmbr = B_FALSE;
boolean_t is_attr;
uid_t uid = crgetuid(cr);
int error;
if (zdp->z_pflags & ZFS_AV_QUARANTINED)
return (SET_ERROR(EACCES));
is_attr = ((zdp->z_pflags & ZFS_XATTR) &&
(S_ISDIR(ZTOI(zdp)->i_mode)));
if (is_attr)
goto slow;
mutex_enter(&zdp->z_acl_lock);
if (zdp->z_pflags & ZFS_NO_EXECS_DENIED) {
mutex_exit(&zdp->z_acl_lock);
return (0);
}
if (KUID_TO_SUID(ZTOI(zdp)->i_uid) != 0 ||
KGID_TO_SGID(ZTOI(zdp)->i_gid) != 0) {
mutex_exit(&zdp->z_acl_lock);
goto slow;
}
if (uid == KUID_TO_SUID(ZTOI(zdp)->i_uid)) {
owner = B_TRUE;
if (zdp->z_mode & S_IXUSR) {
mutex_exit(&zdp->z_acl_lock);
return (0);
} else {
mutex_exit(&zdp->z_acl_lock);
goto slow;
}
}
if (groupmember(KGID_TO_SGID(ZTOI(zdp)->i_gid), cr)) {
groupmbr = B_TRUE;
if (zdp->z_mode & S_IXGRP) {
mutex_exit(&zdp->z_acl_lock);
return (0);
} else {
mutex_exit(&zdp->z_acl_lock);
goto slow;
}
}
if (!owner && !groupmbr) {
if (zdp->z_mode & S_IXOTH) {
mutex_exit(&zdp->z_acl_lock);
return (0);
}
}
mutex_exit(&zdp->z_acl_lock);
slow:
DTRACE_PROBE(zfs__fastpath__execute__access__miss);
ZFS_ENTER(ZTOZSB(zdp));
error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr);
ZFS_EXIT(ZTOZSB(zdp));
return (error);
}
/*
* Determine whether Access should be granted/denied.
*
* The least priv subsystem is always consulted as a basic privilege
* can define any form of access.
*/
int
zfs_zaccess(znode_t *zp, int mode, int flags, boolean_t skipaclchk, cred_t *cr)
{
uint32_t working_mode;
int error;
int is_attr;
boolean_t check_privs;
znode_t *xzp;
znode_t *check_zp = zp;
mode_t needed_bits;
uid_t owner;
is_attr = ((zp->z_pflags & ZFS_XATTR) && S_ISDIR(ZTOI(zp)->i_mode));
/*
* If attribute then validate against base file
*/
if (is_attr) {
if ((error = zfs_zget(ZTOZSB(zp),
zp->z_xattr_parent, &xzp)) != 0) {
return (error);
}
check_zp = xzp;
/*
* fixup mode to map to xattr perms
*/
if (mode & (ACE_WRITE_DATA|ACE_APPEND_DATA)) {
mode &= ~(ACE_WRITE_DATA|ACE_APPEND_DATA);
mode |= ACE_WRITE_NAMED_ATTRS;
}
if (mode & (ACE_READ_DATA|ACE_EXECUTE)) {
mode &= ~(ACE_READ_DATA|ACE_EXECUTE);
mode |= ACE_READ_NAMED_ATTRS;
}
}
owner = zfs_fuid_map_id(ZTOZSB(zp), KUID_TO_SUID(ZTOI(zp)->i_uid),
cr, ZFS_OWNER);
/*
* Map the bits required to the standard inode flags
* S_IRUSR|S_IWUSR|S_IXUSR in the needed_bits. Map the bits
* mapped by working_mode (currently missing) in missing_bits.
* Call secpolicy_vnode_access2() with (needed_bits & ~checkmode),
* needed_bits.
*/
needed_bits = 0;
working_mode = mode;
if ((working_mode & (ACE_READ_ACL|ACE_READ_ATTRIBUTES)) &&
owner == crgetuid(cr))
working_mode &= ~(ACE_READ_ACL|ACE_READ_ATTRIBUTES);
if (working_mode & (ACE_READ_DATA|ACE_READ_NAMED_ATTRS|
ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_SYNCHRONIZE))
needed_bits |= S_IRUSR;
if (working_mode & (ACE_WRITE_DATA|ACE_WRITE_NAMED_ATTRS|
ACE_APPEND_DATA|ACE_WRITE_ATTRIBUTES|ACE_SYNCHRONIZE))
needed_bits |= S_IWUSR;
if (working_mode & ACE_EXECUTE)
needed_bits |= S_IXUSR;
if ((error = zfs_zaccess_common(check_zp, mode, &working_mode,
&check_privs, skipaclchk, cr)) == 0) {
if (is_attr)
zrele(xzp);
return (secpolicy_vnode_access2(cr, ZTOI(zp), owner,
needed_bits, needed_bits));
}
if (error && !check_privs) {
if (is_attr)
zrele(xzp);
return (error);
}
if (error && (flags & V_APPEND)) {
error = zfs_zaccess_append(zp, &working_mode, &check_privs, cr);
}
if (error && check_privs) {
mode_t checkmode = 0;
/*
* First check for implicit owner permission on
* read_acl/read_attributes
*/
error = 0;
ASSERT(working_mode != 0);
if ((working_mode & (ACE_READ_ACL|ACE_READ_ATTRIBUTES) &&
owner == crgetuid(cr)))
working_mode &= ~(ACE_READ_ACL|ACE_READ_ATTRIBUTES);
if (working_mode & (ACE_READ_DATA|ACE_READ_NAMED_ATTRS|
ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_SYNCHRONIZE))
checkmode |= S_IRUSR;
if (working_mode & (ACE_WRITE_DATA|ACE_WRITE_NAMED_ATTRS|
ACE_APPEND_DATA|ACE_WRITE_ATTRIBUTES|ACE_SYNCHRONIZE))
checkmode |= S_IWUSR;
if (working_mode & ACE_EXECUTE)
checkmode |= S_IXUSR;
error = secpolicy_vnode_access2(cr, ZTOI(check_zp), owner,
needed_bits & ~checkmode, needed_bits);
if (error == 0 && (working_mode & ACE_WRITE_OWNER))
error = secpolicy_vnode_chown(cr, owner);
if (error == 0 && (working_mode & ACE_WRITE_ACL))
error = secpolicy_vnode_setdac(cr, owner);
if (error == 0 && (working_mode &
(ACE_DELETE|ACE_DELETE_CHILD)))
error = secpolicy_vnode_remove(cr);
if (error == 0 && (working_mode & ACE_SYNCHRONIZE)) {
error = secpolicy_vnode_chown(cr, owner);
}
if (error == 0) {
/*
* See if any bits other than those already checked
* for are still present. If so then return EACCES
*/
if (working_mode & ~(ZFS_CHECKED_MASKS)) {
error = SET_ERROR(EACCES);
}
}
} else if (error == 0) {
error = secpolicy_vnode_access2(cr, ZTOI(zp), owner,
needed_bits, needed_bits);
}
if (is_attr)
zrele(xzp);
return (error);
}
/*
* Translate traditional unix S_IRUSR/S_IWUSR/S_IXUSR mode into
* NFSv4-style ZFS ACL format and call zfs_zaccess()
*/
int
zfs_zaccess_rwx(znode_t *zp, mode_t mode, int flags, cred_t *cr)
{
return (zfs_zaccess(zp, zfs_unix_to_v4(mode >> 6), flags, B_FALSE, cr));
}
/*
* Access function for secpolicy_vnode_setattr
*/
int
zfs_zaccess_unix(znode_t *zp, mode_t mode, cred_t *cr)
{
int v4_mode = zfs_unix_to_v4(mode >> 6);
return (zfs_zaccess(zp, v4_mode, 0, B_FALSE, cr));
}
/* See zfs_zaccess_delete() */
int zfs_write_implies_delete_child = 1;
/*
* Determine whether delete access should be granted.
*
* The following chart outlines how we handle delete permissions which is
* how recent versions of windows (Windows 2008) handles it. The efficiency
* comes from not having to check the parent ACL where the object itself grants
* delete:
*
* -------------------------------------------------------
* | Parent Dir | Target Object Permissions |
* | permissions | |
* -------------------------------------------------------
* | | ACL Allows | ACL Denies| Delete |
* | | Delete | Delete | unspecified|
* -------------------------------------------------------
* | ACL Allows | Permit | Deny * | Permit |
* | DELETE_CHILD | | | |
* -------------------------------------------------------
* | ACL Denies | Permit | Deny | Deny |
* | DELETE_CHILD | | | |
* -------------------------------------------------------
* | ACL specifies | | | |
* | only allow | Permit | Deny * | Permit |
* | write and | | | |
* | execute | | | |
* -------------------------------------------------------
* | ACL denies | | | |
* | write and | Permit | Deny | Deny |
* | execute | | | |
* -------------------------------------------------------
* ^
* |
* Re. execute permission on the directory: if that's missing,
* the vnode lookup of the target will fail before we get here.
*
* Re [*] in the table above: NFSv4 would normally Permit delete for
* these two cells of the matrix.
* See acl.h for notes on which ACE_... flags should be checked for which
* operations. Specifically, the NFSv4 committee recommendation is in
* conflict with the Windows interpretation of DENY ACEs, where DENY ACEs
* should take precedence ahead of ALLOW ACEs.
*
* This implementation always consults the target object's ACL first.
* If a DENY ACE is present on the target object that specifies ACE_DELETE,
* delete access is denied. If an ALLOW ACE with ACE_DELETE is present on
* the target object, access is allowed. If and only if no entries with
* ACE_DELETE are present in the object's ACL, check the container's ACL
* for entries with ACE_DELETE_CHILD.
*
* A summary of the logic implemented from the table above is as follows:
*
* First check for DENY ACEs that apply.
* If either target or container has a deny, EACCES.
*
* Delete access can then be summarized as follows:
* 1: The object to be deleted grants ACE_DELETE, or
* 2: The containing directory grants ACE_DELETE_CHILD.
* In a Windows system, that would be the end of the story.
* In this system, (2) has some complications...
* 2a: "sticky" bit on a directory adds restrictions, and
* 2b: existing ACEs from previous versions of ZFS may
* not carry ACE_DELETE_CHILD where they should, so we
* also allow delete when ACE_WRITE_DATA is granted.
*
* Note: 2b is technically a work-around for a prior bug,
* which hopefully can go away some day. For those who
* no longer need the work around, and for testing, this
* work-around is made conditional via the tunable:
* zfs_write_implies_delete_child
*/
int
zfs_zaccess_delete(znode_t *dzp, znode_t *zp, cred_t *cr)
{
uint32_t wanted_dirperms;
uint32_t dzp_working_mode = 0;
uint32_t zp_working_mode = 0;
int dzp_error, zp_error;
boolean_t dzpcheck_privs;
boolean_t zpcheck_privs;
if (zp->z_pflags & (ZFS_IMMUTABLE | ZFS_NOUNLINK))
return (SET_ERROR(EPERM));
/*
* Case 1:
* If target object grants ACE_DELETE then we are done. This is
* indicated by a return value of 0. For this case we don't worry
* about the sticky bit because sticky only applies to the parent
* directory and this is the child access result.
*
* If we encounter a DENY ACE here, we're also done (EACCES).
* Note that if we hit a DENY ACE here (on the target) it should
* take precedence over a DENY ACE on the container, so that when
* we have more complete auditing support we will be able to
* report an access failure against the specific target.
* (This is part of why we're checking the target first.)
*/
zp_error = zfs_zaccess_common(zp, ACE_DELETE, &zp_working_mode,
&zpcheck_privs, B_FALSE, cr);
if (zp_error == EACCES) {
/* We hit a DENY ACE. */
if (!zpcheck_privs)
return (SET_ERROR(zp_error));
return (secpolicy_vnode_remove(cr));
}
if (zp_error == 0)
return (0);
/*
* Case 2:
* If the containing directory grants ACE_DELETE_CHILD,
* or we're in backward compatibility mode and the
* containing directory has ACE_WRITE_DATA, allow.
* Case 2b is handled with wanted_dirperms.
*/
wanted_dirperms = ACE_DELETE_CHILD;
if (zfs_write_implies_delete_child)
wanted_dirperms |= ACE_WRITE_DATA;
dzp_error = zfs_zaccess_common(dzp, wanted_dirperms,
&dzp_working_mode, &dzpcheck_privs, B_FALSE, cr);
if (dzp_error == EACCES) {
/* We hit a DENY ACE. */
if (!dzpcheck_privs)
return (SET_ERROR(dzp_error));
return (secpolicy_vnode_remove(cr));
}
/*
* Cases 2a, 2b (continued)
*
* Note: dzp_working_mode now contains any permissions
* that were NOT granted. Therefore, if any of the
* wanted_dirperms WERE granted, we will have:
* dzp_working_mode != wanted_dirperms
* We're really asking if ANY of those permissions
* were granted, and if so, grant delete access.
*/
if (dzp_working_mode != wanted_dirperms)
dzp_error = 0;
/*
* dzp_error is 0 if the container granted us permissions to "modify".
* If we do not have permission via one or more ACEs, our current
* privileges may still permit us to modify the container.
*
* dzpcheck_privs is false when i.e. the FS is read-only.
* Otherwise, do privilege checks for the container.
*/
if (dzp_error != 0 && dzpcheck_privs) {
uid_t owner;
/*
* The secpolicy call needs the requested access and
* the current access mode of the container, but it
* only knows about Unix-style modes (VEXEC, VWRITE),
* so this must condense the fine-grained ACE bits into
* Unix modes.
*
* The VEXEC flag is easy, because we know that has
* always been checked before we get here (during the
* lookup of the target vnode). The container has not
* granted us permissions to "modify", so we do not set
* the VWRITE flag in the current access mode.
*/
owner = zfs_fuid_map_id(ZTOZSB(dzp),
KUID_TO_SUID(ZTOI(dzp)->i_uid), cr, ZFS_OWNER);
dzp_error = secpolicy_vnode_access2(cr, ZTOI(dzp),
owner, S_IXUSR, S_IWUSR|S_IXUSR);
}
if (dzp_error != 0) {
/*
* Note: We may have dzp_error = -1 here (from
* zfs_zacess_common). Don't return that.
*/
return (SET_ERROR(EACCES));
}
/*
* At this point, we know that the directory permissions allow
* us to modify, but we still need to check for the additional
* restrictions that apply when the "sticky bit" is set.
*
* Yes, zfs_sticky_remove_access() also checks this bit, but
* checking it here and skipping the call below is nice when
* you're watching all of this with dtrace.
*/
if ((dzp->z_mode & S_ISVTX) == 0)
return (0);
/*
* zfs_sticky_remove_access will succeed if:
* 1. The sticky bit is absent.
* 2. We pass the sticky bit restrictions.
* 3. We have privileges that always allow file removal.
*/
return (zfs_sticky_remove_access(dzp, zp, cr));
}
int
zfs_zaccess_rename(znode_t *sdzp, znode_t *szp, znode_t *tdzp,
znode_t *tzp, cred_t *cr)
{
int add_perm;
int error;
if (szp->z_pflags & ZFS_AV_QUARANTINED)
return (SET_ERROR(EACCES));
add_perm = S_ISDIR(ZTOI(szp)->i_mode) ?
ACE_ADD_SUBDIRECTORY : ACE_ADD_FILE;
/*
* Rename permissions are combination of delete permission +
* add file/subdir permission.
*/
/*
* first make sure we do the delete portion.
*
* If that succeeds then check for add_file/add_subdir permissions
*/
if ((error = zfs_zaccess_delete(sdzp, szp, cr)))
return (error);
/*
* If we have a tzp, see if we can delete it?
*/
if (tzp) {
if ((error = zfs_zaccess_delete(tdzp, tzp, cr)))
return (error);
}
/*
* Now check for add permissions
*/
error = zfs_zaccess(tdzp, add_perm, 0, B_FALSE, cr);
return (error);
}
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_znode.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_znode.c
index 577927747aef..8cc454468a3f 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_znode.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_znode.c
@@ -1,2251 +1,2251 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
*/
/* Portions Copyright 2007 Jeremy Teo */
#ifdef _KERNEL
#include <sys/types.h>
#include <sys/param.h>
#include <sys/time.h>
#include <sys/sysmacros.h>
#include <sys/mntent.h>
#include <sys/u8_textprep.h>
#include <sys/dsl_dataset.h>
#include <sys/vfs.h>
#include <sys/vnode.h>
#include <sys/file.h>
#include <sys/kmem.h>
#include <sys/errno.h>
#include <sys/atomic.h>
#include <sys/zfs_dir.h>
#include <sys/zfs_acl.h>
#include <sys/zfs_ioctl.h>
#include <sys/zfs_rlock.h>
#include <sys/zfs_fuid.h>
#include <sys/zfs_vnops.h>
#include <sys/zfs_ctldir.h>
#include <sys/dnode.h>
#include <sys/fs/zfs.h>
#include <sys/zpl.h>
#endif /* _KERNEL */
#include <sys/dmu.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_tx.h>
#include <sys/zfs_refcount.h>
#include <sys/stat.h>
#include <sys/zap.h>
#include <sys/zfs_znode.h>
#include <sys/sa.h>
#include <sys/zfs_sa.h>
#include <sys/zfs_stat.h>
#include "zfs_prop.h"
#include "zfs_comutil.h"
/*
* Functions needed for userland (ie: libzpool) are not put under
* #ifdef_KERNEL; the rest of the functions have dependencies
* (such as VFS logic) that will not compile easily in userland.
*/
#ifdef _KERNEL
static kmem_cache_t *znode_cache = NULL;
static kmem_cache_t *znode_hold_cache = NULL;
unsigned int zfs_object_mutex_size = ZFS_OBJ_MTX_SZ;
/*
* This is used by the test suite so that it can delay znodes from being
* freed in order to inspect the unlinked set.
*/
int zfs_unlink_suspend_progress = 0;
/*
* This callback is invoked when acquiring a RL_WRITER or RL_APPEND lock on
* z_rangelock. It will modify the offset and length of the lock to reflect
* znode-specific information, and convert RL_APPEND to RL_WRITER. This is
* called with the rangelock_t's rl_lock held, which avoids races.
*/
static void
zfs_rangelock_cb(zfs_locked_range_t *new, void *arg)
{
znode_t *zp = arg;
/*
* If in append mode, convert to writer and lock starting at the
* current end of file.
*/
if (new->lr_type == RL_APPEND) {
new->lr_offset = zp->z_size;
new->lr_type = RL_WRITER;
}
/*
* If we need to grow the block size then lock the whole file range.
*/
uint64_t end_size = MAX(zp->z_size, new->lr_offset + new->lr_length);
if (end_size > zp->z_blksz && (!ISP2(zp->z_blksz) ||
zp->z_blksz < ZTOZSB(zp)->z_max_blksz)) {
new->lr_offset = 0;
new->lr_length = UINT64_MAX;
}
}
/*ARGSUSED*/
static int
zfs_znode_cache_constructor(void *buf, void *arg, int kmflags)
{
znode_t *zp = buf;
inode_init_once(ZTOI(zp));
list_link_init(&zp->z_link_node);
mutex_init(&zp->z_lock, NULL, MUTEX_DEFAULT, NULL);
rw_init(&zp->z_parent_lock, NULL, RW_DEFAULT, NULL);
rw_init(&zp->z_name_lock, NULL, RW_NOLOCKDEP, NULL);
mutex_init(&zp->z_acl_lock, NULL, MUTEX_DEFAULT, NULL);
rw_init(&zp->z_xattr_lock, NULL, RW_DEFAULT, NULL);
zfs_rangelock_init(&zp->z_rangelock, zfs_rangelock_cb, zp);
zp->z_dirlocks = NULL;
zp->z_acl_cached = NULL;
zp->z_xattr_cached = NULL;
zp->z_xattr_parent = 0;
return (0);
}
/*ARGSUSED*/
static void
zfs_znode_cache_destructor(void *buf, void *arg)
{
znode_t *zp = buf;
ASSERT(!list_link_active(&zp->z_link_node));
mutex_destroy(&zp->z_lock);
rw_destroy(&zp->z_parent_lock);
rw_destroy(&zp->z_name_lock);
mutex_destroy(&zp->z_acl_lock);
rw_destroy(&zp->z_xattr_lock);
zfs_rangelock_fini(&zp->z_rangelock);
ASSERT3P(zp->z_dirlocks, ==, NULL);
ASSERT3P(zp->z_acl_cached, ==, NULL);
ASSERT3P(zp->z_xattr_cached, ==, NULL);
}
static int
zfs_znode_hold_cache_constructor(void *buf, void *arg, int kmflags)
{
znode_hold_t *zh = buf;
mutex_init(&zh->zh_lock, NULL, MUTEX_DEFAULT, NULL);
zfs_refcount_create(&zh->zh_refcount);
zh->zh_obj = ZFS_NO_OBJECT;
return (0);
}
static void
zfs_znode_hold_cache_destructor(void *buf, void *arg)
{
znode_hold_t *zh = buf;
mutex_destroy(&zh->zh_lock);
zfs_refcount_destroy(&zh->zh_refcount);
}
void
zfs_znode_init(void)
{
/*
* Initialize zcache. The KMC_SLAB hint is used in order that it be
* backed by kmalloc() when on the Linux slab in order that any
* wait_on_bit() operations on the related inode operate properly.
*/
ASSERT(znode_cache == NULL);
znode_cache = kmem_cache_create("zfs_znode_cache",
sizeof (znode_t), 0, zfs_znode_cache_constructor,
zfs_znode_cache_destructor, NULL, NULL, NULL, KMC_SLAB);
ASSERT(znode_hold_cache == NULL);
znode_hold_cache = kmem_cache_create("zfs_znode_hold_cache",
sizeof (znode_hold_t), 0, zfs_znode_hold_cache_constructor,
zfs_znode_hold_cache_destructor, NULL, NULL, NULL, 0);
}
void
zfs_znode_fini(void)
{
/*
* Cleanup zcache
*/
if (znode_cache)
kmem_cache_destroy(znode_cache);
znode_cache = NULL;
if (znode_hold_cache)
kmem_cache_destroy(znode_hold_cache);
znode_hold_cache = NULL;
}
/*
* The zfs_znode_hold_enter() / zfs_znode_hold_exit() functions are used to
* serialize access to a znode and its SA buffer while the object is being
* created or destroyed. This kind of locking would normally reside in the
* znode itself but in this case that's impossible because the znode and SA
* buffer may not yet exist. Therefore the locking is handled externally
* with an array of mutexes and AVLs trees which contain per-object locks.
*
* In zfs_znode_hold_enter() a per-object lock is created as needed, inserted
* in to the correct AVL tree and finally the per-object lock is held. In
* zfs_znode_hold_exit() the process is reversed. The per-object lock is
* released, removed from the AVL tree and destroyed if there are no waiters.
*
* This scheme has two important properties:
*
* 1) No memory allocations are performed while holding one of the z_hold_locks.
* This ensures evict(), which can be called from direct memory reclaim, will
* never block waiting on a z_hold_locks which just happens to have hashed
* to the same index.
*
* 2) All locks used to serialize access to an object are per-object and never
* shared. This minimizes lock contention without creating a large number
* of dedicated locks.
*
* On the downside it does require znode_lock_t structures to be frequently
* allocated and freed. However, because these are backed by a kmem cache
* and very short lived this cost is minimal.
*/
int
zfs_znode_hold_compare(const void *a, const void *b)
{
const znode_hold_t *zh_a = (const znode_hold_t *)a;
const znode_hold_t *zh_b = (const znode_hold_t *)b;
return (TREE_CMP(zh_a->zh_obj, zh_b->zh_obj));
}
static boolean_t __maybe_unused
zfs_znode_held(zfsvfs_t *zfsvfs, uint64_t obj)
{
znode_hold_t *zh, search;
int i = ZFS_OBJ_HASH(zfsvfs, obj);
boolean_t held;
search.zh_obj = obj;
mutex_enter(&zfsvfs->z_hold_locks[i]);
zh = avl_find(&zfsvfs->z_hold_trees[i], &search, NULL);
held = (zh && MUTEX_HELD(&zh->zh_lock)) ? B_TRUE : B_FALSE;
mutex_exit(&zfsvfs->z_hold_locks[i]);
return (held);
}
static znode_hold_t *
zfs_znode_hold_enter(zfsvfs_t *zfsvfs, uint64_t obj)
{
znode_hold_t *zh, *zh_new, search;
int i = ZFS_OBJ_HASH(zfsvfs, obj);
boolean_t found = B_FALSE;
zh_new = kmem_cache_alloc(znode_hold_cache, KM_SLEEP);
zh_new->zh_obj = obj;
search.zh_obj = obj;
mutex_enter(&zfsvfs->z_hold_locks[i]);
zh = avl_find(&zfsvfs->z_hold_trees[i], &search, NULL);
if (likely(zh == NULL)) {
zh = zh_new;
avl_add(&zfsvfs->z_hold_trees[i], zh);
} else {
ASSERT3U(zh->zh_obj, ==, obj);
found = B_TRUE;
}
zfs_refcount_add(&zh->zh_refcount, NULL);
mutex_exit(&zfsvfs->z_hold_locks[i]);
if (found == B_TRUE)
kmem_cache_free(znode_hold_cache, zh_new);
ASSERT(MUTEX_NOT_HELD(&zh->zh_lock));
ASSERT3S(zfs_refcount_count(&zh->zh_refcount), >, 0);
mutex_enter(&zh->zh_lock);
return (zh);
}
static void
zfs_znode_hold_exit(zfsvfs_t *zfsvfs, znode_hold_t *zh)
{
int i = ZFS_OBJ_HASH(zfsvfs, zh->zh_obj);
boolean_t remove = B_FALSE;
ASSERT(zfs_znode_held(zfsvfs, zh->zh_obj));
ASSERT3S(zfs_refcount_count(&zh->zh_refcount), >, 0);
mutex_exit(&zh->zh_lock);
mutex_enter(&zfsvfs->z_hold_locks[i]);
if (zfs_refcount_remove(&zh->zh_refcount, NULL) == 0) {
avl_remove(&zfsvfs->z_hold_trees[i], zh);
remove = B_TRUE;
}
mutex_exit(&zfsvfs->z_hold_locks[i]);
if (remove == B_TRUE)
kmem_cache_free(znode_hold_cache, zh);
}
dev_t
zfs_cmpldev(uint64_t dev)
{
return (dev);
}
static void
zfs_znode_sa_init(zfsvfs_t *zfsvfs, znode_t *zp,
dmu_buf_t *db, dmu_object_type_t obj_type, sa_handle_t *sa_hdl)
{
ASSERT(zfs_znode_held(zfsvfs, zp->z_id));
mutex_enter(&zp->z_lock);
ASSERT(zp->z_sa_hdl == NULL);
ASSERT(zp->z_acl_cached == NULL);
if (sa_hdl == NULL) {
VERIFY(0 == sa_handle_get_from_db(zfsvfs->z_os, db, zp,
SA_HDL_SHARED, &zp->z_sa_hdl));
} else {
zp->z_sa_hdl = sa_hdl;
sa_set_userp(sa_hdl, zp);
}
zp->z_is_sa = (obj_type == DMU_OT_SA) ? B_TRUE : B_FALSE;
mutex_exit(&zp->z_lock);
}
void
zfs_znode_dmu_fini(znode_t *zp)
{
ASSERT(zfs_znode_held(ZTOZSB(zp), zp->z_id) || zp->z_unlinked ||
RW_WRITE_HELD(&ZTOZSB(zp)->z_teardown_inactive_lock));
sa_handle_destroy(zp->z_sa_hdl);
zp->z_sa_hdl = NULL;
}
/*
* Called by new_inode() to allocate a new inode.
*/
int
zfs_inode_alloc(struct super_block *sb, struct inode **ip)
{
znode_t *zp;
zp = kmem_cache_alloc(znode_cache, KM_SLEEP);
*ip = ZTOI(zp);
return (0);
}
/*
* Called in multiple places when an inode should be destroyed.
*/
void
zfs_inode_destroy(struct inode *ip)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
mutex_enter(&zfsvfs->z_znodes_lock);
if (list_link_active(&zp->z_link_node)) {
list_remove(&zfsvfs->z_all_znodes, zp);
zfsvfs->z_nr_znodes--;
}
mutex_exit(&zfsvfs->z_znodes_lock);
if (zp->z_acl_cached) {
zfs_acl_free(zp->z_acl_cached);
zp->z_acl_cached = NULL;
}
if (zp->z_xattr_cached) {
nvlist_free(zp->z_xattr_cached);
zp->z_xattr_cached = NULL;
}
kmem_cache_free(znode_cache, zp);
}
static void
zfs_inode_set_ops(zfsvfs_t *zfsvfs, struct inode *ip)
{
uint64_t rdev = 0;
switch (ip->i_mode & S_IFMT) {
case S_IFREG:
ip->i_op = &zpl_inode_operations;
ip->i_fop = &zpl_file_operations;
ip->i_mapping->a_ops = &zpl_address_space_operations;
break;
case S_IFDIR:
ip->i_op = &zpl_dir_inode_operations;
ip->i_fop = &zpl_dir_file_operations;
ITOZ(ip)->z_zn_prefetch = B_TRUE;
break;
case S_IFLNK:
ip->i_op = &zpl_symlink_inode_operations;
break;
/*
* rdev is only stored in a SA only for device files.
*/
case S_IFCHR:
case S_IFBLK:
(void) sa_lookup(ITOZ(ip)->z_sa_hdl, SA_ZPL_RDEV(zfsvfs), &rdev,
sizeof (rdev));
- /*FALLTHROUGH*/
+ /* FALLTHROUGH */
case S_IFIFO:
case S_IFSOCK:
init_special_inode(ip, ip->i_mode, rdev);
ip->i_op = &zpl_special_inode_operations;
break;
default:
zfs_panic_recover("inode %llu has invalid mode: 0x%x\n",
(u_longlong_t)ip->i_ino, ip->i_mode);
/* Assume the inode is a file and attempt to continue */
ip->i_mode = S_IFREG | 0644;
ip->i_op = &zpl_inode_operations;
ip->i_fop = &zpl_file_operations;
ip->i_mapping->a_ops = &zpl_address_space_operations;
break;
}
}
static void
zfs_set_inode_flags(znode_t *zp, struct inode *ip)
{
/*
* Linux and Solaris have different sets of file attributes, so we
* restrict this conversion to the intersection of the two.
*/
#ifdef HAVE_INODE_SET_FLAGS
unsigned int flags = 0;
if (zp->z_pflags & ZFS_IMMUTABLE)
flags |= S_IMMUTABLE;
if (zp->z_pflags & ZFS_APPENDONLY)
flags |= S_APPEND;
inode_set_flags(ip, flags, S_IMMUTABLE|S_APPEND);
#else
if (zp->z_pflags & ZFS_IMMUTABLE)
ip->i_flags |= S_IMMUTABLE;
else
ip->i_flags &= ~S_IMMUTABLE;
if (zp->z_pflags & ZFS_APPENDONLY)
ip->i_flags |= S_APPEND;
else
ip->i_flags &= ~S_APPEND;
#endif
}
/*
* Update the embedded inode given the znode.
*/
void
zfs_znode_update_vfs(znode_t *zp)
{
zfsvfs_t *zfsvfs;
struct inode *ip;
uint32_t blksize;
u_longlong_t i_blocks;
ASSERT(zp != NULL);
zfsvfs = ZTOZSB(zp);
ip = ZTOI(zp);
/* Skip .zfs control nodes which do not exist on disk. */
if (zfsctl_is_node(ip))
return;
dmu_object_size_from_db(sa_get_db(zp->z_sa_hdl), &blksize, &i_blocks);
spin_lock(&ip->i_lock);
ip->i_mode = zp->z_mode;
ip->i_blocks = i_blocks;
i_size_write(ip, zp->z_size);
spin_unlock(&ip->i_lock);
}
/*
* Construct a znode+inode and initialize.
*
* This does not do a call to dmu_set_user() that is
* up to the caller to do, in case you don't want to
* return the znode
*/
static znode_t *
zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz,
dmu_object_type_t obj_type, sa_handle_t *hdl)
{
znode_t *zp;
struct inode *ip;
uint64_t mode;
uint64_t parent;
uint64_t tmp_gen;
uint64_t links;
uint64_t z_uid, z_gid;
uint64_t atime[2], mtime[2], ctime[2];
uint64_t projid = ZFS_DEFAULT_PROJID;
sa_bulk_attr_t bulk[11];
int count = 0;
ASSERT(zfsvfs != NULL);
ip = new_inode(zfsvfs->z_sb);
if (ip == NULL)
return (NULL);
zp = ITOZ(ip);
ASSERT(zp->z_dirlocks == NULL);
ASSERT3P(zp->z_acl_cached, ==, NULL);
ASSERT3P(zp->z_xattr_cached, ==, NULL);
zp->z_unlinked = B_FALSE;
zp->z_atime_dirty = B_FALSE;
zp->z_is_mapped = B_FALSE;
zp->z_is_ctldir = B_FALSE;
zp->z_is_stale = B_FALSE;
zp->z_suspended = B_FALSE;
zp->z_sa_hdl = NULL;
zp->z_mapcnt = 0;
zp->z_id = db->db_object;
zp->z_blksz = blksz;
zp->z_seq = 0x7A4653;
zp->z_sync_cnt = 0;
zfs_znode_sa_init(zfsvfs, zp, db, obj_type, hdl);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL, &tmp_gen, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
&zp->z_size, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL, &links, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL,
&parent, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL, &z_uid, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL, &z_gid, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count) != 0 || tmp_gen == 0 ||
(dmu_objset_projectquota_enabled(zfsvfs->z_os) &&
(zp->z_pflags & ZFS_PROJID) &&
sa_lookup(zp->z_sa_hdl, SA_ZPL_PROJID(zfsvfs), &projid, 8) != 0)) {
if (hdl == NULL)
sa_handle_destroy(zp->z_sa_hdl);
zp->z_sa_hdl = NULL;
goto error;
}
zp->z_projid = projid;
zp->z_mode = ip->i_mode = mode;
ip->i_generation = (uint32_t)tmp_gen;
ip->i_blkbits = SPA_MINBLOCKSHIFT;
set_nlink(ip, (uint32_t)links);
zfs_uid_write(ip, z_uid);
zfs_gid_write(ip, z_gid);
zfs_set_inode_flags(zp, ip);
/* Cache the xattr parent id */
if (zp->z_pflags & ZFS_XATTR)
zp->z_xattr_parent = parent;
ZFS_TIME_DECODE(&ip->i_atime, atime);
ZFS_TIME_DECODE(&ip->i_mtime, mtime);
ZFS_TIME_DECODE(&ip->i_ctime, ctime);
ip->i_ino = zp->z_id;
zfs_znode_update_vfs(zp);
zfs_inode_set_ops(zfsvfs, ip);
/*
* The only way insert_inode_locked() can fail is if the ip->i_ino
* number is already hashed for this super block. This can never
* happen because the inode numbers map 1:1 with the object numbers.
*
* Exceptions include rolling back a mounted file system, either
* from the zfs rollback or zfs recv command.
*
* Active inodes are unhashed during the rollback, but since zrele
* can happen asynchronously, we can't guarantee they've been
* unhashed. This can cause hash collisions in unlinked drain
* processing so do not hash unlinked znodes.
*/
if (links > 0)
VERIFY3S(insert_inode_locked(ip), ==, 0);
mutex_enter(&zfsvfs->z_znodes_lock);
list_insert_tail(&zfsvfs->z_all_znodes, zp);
zfsvfs->z_nr_znodes++;
mutex_exit(&zfsvfs->z_znodes_lock);
if (links > 0)
unlock_new_inode(ip);
return (zp);
error:
iput(ip);
return (NULL);
}
/*
* Safely mark an inode dirty. Inodes which are part of a read-only
* file system or snapshot may not be dirtied.
*/
void
zfs_mark_inode_dirty(struct inode *ip)
{
zfsvfs_t *zfsvfs = ITOZSB(ip);
if (zfs_is_readonly(zfsvfs) || dmu_objset_is_snapshot(zfsvfs->z_os))
return;
mark_inode_dirty(ip);
}
static uint64_t empty_xattr;
static uint64_t pad[4];
static zfs_acl_phys_t acl_phys;
/*
* Create a new DMU object to hold a zfs znode.
*
* IN: dzp - parent directory for new znode
* vap - file attributes for new znode
* tx - dmu transaction id for zap operations
* cr - credentials of caller
* flag - flags:
* IS_ROOT_NODE - new object will be root
* IS_TMPFILE - new object is of O_TMPFILE
* IS_XATTR - new object is an attribute
* acl_ids - ACL related attributes
*
* OUT: zpp - allocated znode (set to dzp if IS_ROOT_NODE)
*
*/
void
zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
uint_t flag, znode_t **zpp, zfs_acl_ids_t *acl_ids)
{
uint64_t crtime[2], atime[2], mtime[2], ctime[2];
uint64_t mode, size, links, parent, pflags;
uint64_t projid = ZFS_DEFAULT_PROJID;
uint64_t rdev = 0;
zfsvfs_t *zfsvfs = ZTOZSB(dzp);
dmu_buf_t *db;
inode_timespec_t now;
uint64_t gen, obj;
int bonuslen;
int dnodesize;
sa_handle_t *sa_hdl;
dmu_object_type_t obj_type;
sa_bulk_attr_t *sa_attrs;
int cnt = 0;
zfs_acl_locator_cb_t locate = { 0 };
znode_hold_t *zh;
if (zfsvfs->z_replay) {
obj = vap->va_nodeid;
now = vap->va_ctime; /* see zfs_replay_create() */
gen = vap->va_nblocks; /* ditto */
dnodesize = vap->va_fsid; /* ditto */
} else {
obj = 0;
gethrestime(&now);
gen = dmu_tx_get_txg(tx);
dnodesize = dmu_objset_dnodesize(zfsvfs->z_os);
}
if (dnodesize == 0)
dnodesize = DNODE_MIN_SIZE;
obj_type = zfsvfs->z_use_sa ? DMU_OT_SA : DMU_OT_ZNODE;
bonuslen = (obj_type == DMU_OT_SA) ?
DN_BONUS_SIZE(dnodesize) : ZFS_OLD_ZNODE_PHYS_SIZE;
/*
* Create a new DMU object.
*/
/*
* There's currently no mechanism for pre-reading the blocks that will
* be needed to allocate a new object, so we accept the small chance
* that there will be an i/o error and we will fail one of the
* assertions below.
*/
if (S_ISDIR(vap->va_mode)) {
if (zfsvfs->z_replay) {
VERIFY0(zap_create_claim_norm_dnsize(zfsvfs->z_os, obj,
zfsvfs->z_norm, DMU_OT_DIRECTORY_CONTENTS,
obj_type, bonuslen, dnodesize, tx));
} else {
obj = zap_create_norm_dnsize(zfsvfs->z_os,
zfsvfs->z_norm, DMU_OT_DIRECTORY_CONTENTS,
obj_type, bonuslen, dnodesize, tx);
}
} else {
if (zfsvfs->z_replay) {
VERIFY0(dmu_object_claim_dnsize(zfsvfs->z_os, obj,
DMU_OT_PLAIN_FILE_CONTENTS, 0,
obj_type, bonuslen, dnodesize, tx));
} else {
obj = dmu_object_alloc_dnsize(zfsvfs->z_os,
DMU_OT_PLAIN_FILE_CONTENTS, 0,
obj_type, bonuslen, dnodesize, tx);
}
}
zh = zfs_znode_hold_enter(zfsvfs, obj);
VERIFY0(sa_buf_hold(zfsvfs->z_os, obj, NULL, &db));
/*
* If this is the root, fix up the half-initialized parent pointer
* to reference the just-allocated physical data area.
*/
if (flag & IS_ROOT_NODE) {
dzp->z_id = obj;
}
/*
* If parent is an xattr, so am I.
*/
if (dzp->z_pflags & ZFS_XATTR) {
flag |= IS_XATTR;
}
if (zfsvfs->z_use_fuids)
pflags = ZFS_ARCHIVE | ZFS_AV_MODIFIED;
else
pflags = 0;
if (S_ISDIR(vap->va_mode)) {
size = 2; /* contents ("." and "..") */
links = 2;
} else {
size = 0;
links = (flag & IS_TMPFILE) ? 0 : 1;
}
if (S_ISBLK(vap->va_mode) || S_ISCHR(vap->va_mode))
rdev = vap->va_rdev;
parent = dzp->z_id;
mode = acl_ids->z_mode;
if (flag & IS_XATTR)
pflags |= ZFS_XATTR;
if (S_ISREG(vap->va_mode) || S_ISDIR(vap->va_mode)) {
/*
* With ZFS_PROJID flag, we can easily know whether there is
* project ID stored on disk or not. See zfs_space_delta_cb().
*/
if (obj_type != DMU_OT_ZNODE &&
dmu_objset_projectquota_enabled(zfsvfs->z_os))
pflags |= ZFS_PROJID;
/*
* Inherit project ID from parent if required.
*/
projid = zfs_inherit_projid(dzp);
if (dzp->z_pflags & ZFS_PROJINHERIT)
pflags |= ZFS_PROJINHERIT;
}
/*
* No execs denied will be determined when zfs_mode_compute() is called.
*/
pflags |= acl_ids->z_aclp->z_hints &
(ZFS_ACL_TRIVIAL|ZFS_INHERIT_ACE|ZFS_ACL_AUTO_INHERIT|
ZFS_ACL_DEFAULTED|ZFS_ACL_PROTECTED);
ZFS_TIME_ENCODE(&now, crtime);
ZFS_TIME_ENCODE(&now, ctime);
if (vap->va_mask & ATTR_ATIME) {
ZFS_TIME_ENCODE(&vap->va_atime, atime);
} else {
ZFS_TIME_ENCODE(&now, atime);
}
if (vap->va_mask & ATTR_MTIME) {
ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
} else {
ZFS_TIME_ENCODE(&now, mtime);
}
/* Now add in all of the "SA" attributes */
VERIFY(0 == sa_handle_get_from_db(zfsvfs->z_os, db, NULL, SA_HDL_SHARED,
&sa_hdl));
/*
* Setup the array of attributes to be replaced/set on the new file
*
* order for DMU_OT_ZNODE is critical since it needs to be constructed
* in the old znode_phys_t format. Don't change this ordering
*/
sa_attrs = kmem_alloc(sizeof (sa_bulk_attr_t) * ZPL_END, KM_SLEEP);
if (obj_type == DMU_OT_ZNODE) {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zfsvfs),
NULL, &atime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zfsvfs),
NULL, &mtime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zfsvfs),
NULL, &ctime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zfsvfs),
NULL, &crtime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zfsvfs),
NULL, &gen, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zfsvfs),
NULL, &mode, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zfsvfs),
NULL, &size, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zfsvfs),
NULL, &parent, 8);
} else {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zfsvfs),
NULL, &mode, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zfsvfs),
NULL, &size, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zfsvfs),
NULL, &gen, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zfsvfs),
NULL, &acl_ids->z_fuid, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zfsvfs),
NULL, &acl_ids->z_fgid, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zfsvfs),
NULL, &parent, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zfsvfs),
NULL, &pflags, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zfsvfs),
NULL, &atime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zfsvfs),
NULL, &mtime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zfsvfs),
NULL, &ctime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zfsvfs),
NULL, &crtime, 16);
}
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_LINKS(zfsvfs), NULL, &links, 8);
if (obj_type == DMU_OT_ZNODE) {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_XATTR(zfsvfs), NULL,
&empty_xattr, 8);
} else if (dmu_objset_projectquota_enabled(zfsvfs->z_os) &&
pflags & ZFS_PROJID) {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PROJID(zfsvfs),
NULL, &projid, 8);
}
if (obj_type == DMU_OT_ZNODE ||
(S_ISBLK(vap->va_mode) || S_ISCHR(vap->va_mode))) {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_RDEV(zfsvfs),
NULL, &rdev, 8);
}
if (obj_type == DMU_OT_ZNODE) {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zfsvfs),
NULL, &pflags, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zfsvfs), NULL,
&acl_ids->z_fuid, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zfsvfs), NULL,
&acl_ids->z_fgid, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PAD(zfsvfs), NULL, pad,
sizeof (uint64_t) * 4);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ZNODE_ACL(zfsvfs), NULL,
&acl_phys, sizeof (zfs_acl_phys_t));
} else if (acl_ids->z_aclp->z_version >= ZFS_ACL_VERSION_FUID) {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_COUNT(zfsvfs), NULL,
&acl_ids->z_aclp->z_acl_count, 8);
locate.cb_aclp = acl_ids->z_aclp;
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_ACES(zfsvfs),
zfs_acl_data_locator, &locate,
acl_ids->z_aclp->z_acl_bytes);
mode = zfs_mode_compute(mode, acl_ids->z_aclp, &pflags,
acl_ids->z_fuid, acl_ids->z_fgid);
}
VERIFY(sa_replace_all_by_template(sa_hdl, sa_attrs, cnt, tx) == 0);
if (!(flag & IS_ROOT_NODE)) {
/*
* The call to zfs_znode_alloc() may fail if memory is low
* via the call path: alloc_inode() -> inode_init_always() ->
* security_inode_alloc() -> inode_alloc_security(). Since
* the existing code is written such that zfs_mknode() can
* not fail retry until sufficient memory has been reclaimed.
*/
do {
*zpp = zfs_znode_alloc(zfsvfs, db, 0, obj_type, sa_hdl);
} while (*zpp == NULL);
VERIFY(*zpp != NULL);
VERIFY(dzp != NULL);
} else {
/*
* If we are creating the root node, the "parent" we
* passed in is the znode for the root.
*/
*zpp = dzp;
(*zpp)->z_sa_hdl = sa_hdl;
}
(*zpp)->z_pflags = pflags;
(*zpp)->z_mode = ZTOI(*zpp)->i_mode = mode;
(*zpp)->z_dnodesize = dnodesize;
(*zpp)->z_projid = projid;
if (obj_type == DMU_OT_ZNODE ||
acl_ids->z_aclp->z_version < ZFS_ACL_VERSION_FUID) {
VERIFY0(zfs_aclset_common(*zpp, acl_ids->z_aclp, cr, tx));
}
kmem_free(sa_attrs, sizeof (sa_bulk_attr_t) * ZPL_END);
zfs_znode_hold_exit(zfsvfs, zh);
}
/*
* Update in-core attributes. It is assumed the caller will be doing an
* sa_bulk_update to push the changes out.
*/
void
zfs_xvattr_set(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx)
{
xoptattr_t *xoap;
boolean_t update_inode = B_FALSE;
xoap = xva_getxoptattr(xvap);
ASSERT(xoap);
if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) {
uint64_t times[2];
ZFS_TIME_ENCODE(&xoap->xoa_createtime, times);
(void) sa_update(zp->z_sa_hdl, SA_ZPL_CRTIME(ZTOZSB(zp)),
&times, sizeof (times), tx);
XVA_SET_RTN(xvap, XAT_CREATETIME);
}
if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
ZFS_ATTR_SET(zp, ZFS_READONLY, xoap->xoa_readonly,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_READONLY);
}
if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
ZFS_ATTR_SET(zp, ZFS_HIDDEN, xoap->xoa_hidden,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_HIDDEN);
}
if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
ZFS_ATTR_SET(zp, ZFS_SYSTEM, xoap->xoa_system,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_SYSTEM);
}
if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
ZFS_ATTR_SET(zp, ZFS_ARCHIVE, xoap->xoa_archive,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_ARCHIVE);
}
if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
ZFS_ATTR_SET(zp, ZFS_IMMUTABLE, xoap->xoa_immutable,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_IMMUTABLE);
update_inode = B_TRUE;
}
if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
ZFS_ATTR_SET(zp, ZFS_NOUNLINK, xoap->xoa_nounlink,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_NOUNLINK);
}
if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
ZFS_ATTR_SET(zp, ZFS_APPENDONLY, xoap->xoa_appendonly,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_APPENDONLY);
update_inode = B_TRUE;
}
if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
ZFS_ATTR_SET(zp, ZFS_NODUMP, xoap->xoa_nodump,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_NODUMP);
}
if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
ZFS_ATTR_SET(zp, ZFS_OPAQUE, xoap->xoa_opaque,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_OPAQUE);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
ZFS_ATTR_SET(zp, ZFS_AV_QUARANTINED,
xoap->xoa_av_quarantined, zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
ZFS_ATTR_SET(zp, ZFS_AV_MODIFIED, xoap->xoa_av_modified,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) {
zfs_sa_set_scanstamp(zp, xvap, tx);
XVA_SET_RTN(xvap, XAT_AV_SCANSTAMP);
}
if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
ZFS_ATTR_SET(zp, ZFS_REPARSE, xoap->xoa_reparse,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_REPARSE);
}
if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) {
ZFS_ATTR_SET(zp, ZFS_OFFLINE, xoap->xoa_offline,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_OFFLINE);
}
if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) {
ZFS_ATTR_SET(zp, ZFS_SPARSE, xoap->xoa_sparse,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_SPARSE);
}
if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT)) {
ZFS_ATTR_SET(zp, ZFS_PROJINHERIT, xoap->xoa_projinherit,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_PROJINHERIT);
}
if (update_inode)
zfs_set_inode_flags(zp, ZTOI(zp));
}
int
zfs_zget(zfsvfs_t *zfsvfs, uint64_t obj_num, znode_t **zpp)
{
dmu_object_info_t doi;
dmu_buf_t *db;
znode_t *zp;
znode_hold_t *zh;
int err;
sa_handle_t *hdl;
*zpp = NULL;
again:
zh = zfs_znode_hold_enter(zfsvfs, obj_num);
err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db);
if (err) {
zfs_znode_hold_exit(zfsvfs, zh);
return (err);
}
dmu_object_info_from_db(db, &doi);
if (doi.doi_bonus_type != DMU_OT_SA &&
(doi.doi_bonus_type != DMU_OT_ZNODE ||
(doi.doi_bonus_type == DMU_OT_ZNODE &&
doi.doi_bonus_size < sizeof (znode_phys_t)))) {
sa_buf_rele(db, NULL);
zfs_znode_hold_exit(zfsvfs, zh);
return (SET_ERROR(EINVAL));
}
hdl = dmu_buf_get_user(db);
if (hdl != NULL) {
zp = sa_get_userdata(hdl);
/*
* Since "SA" does immediate eviction we
* should never find a sa handle that doesn't
* know about the znode.
*/
ASSERT3P(zp, !=, NULL);
mutex_enter(&zp->z_lock);
ASSERT3U(zp->z_id, ==, obj_num);
/*
* If zp->z_unlinked is set, the znode is already marked
* for deletion and should not be discovered. Check this
* after checking igrab() due to fsetxattr() & O_TMPFILE.
*
* If igrab() returns NULL the VFS has independently
* determined the inode should be evicted and has
* called iput_final() to start the eviction process.
* The SA handle is still valid but because the VFS
* requires that the eviction succeed we must drop
* our locks and references to allow the eviction to
* complete. The zfs_zget() may then be retried.
*
* This unlikely case could be optimized by registering
* a sops->drop_inode() callback. The callback would
* need to detect the active SA hold thereby informing
* the VFS that this inode should not be evicted.
*/
if (igrab(ZTOI(zp)) == NULL) {
if (zp->z_unlinked)
err = SET_ERROR(ENOENT);
else
err = SET_ERROR(EAGAIN);
} else {
*zpp = zp;
err = 0;
}
mutex_exit(&zp->z_lock);
sa_buf_rele(db, NULL);
zfs_znode_hold_exit(zfsvfs, zh);
if (err == EAGAIN) {
/* inode might need this to finish evict */
cond_resched();
goto again;
}
return (err);
}
/*
* Not found create new znode/vnode but only if file exists.
*
* There is a small window where zfs_vget() could
* find this object while a file create is still in
* progress. This is checked for in zfs_znode_alloc()
*
* if zfs_znode_alloc() fails it will drop the hold on the
* bonus buffer.
*/
zp = zfs_znode_alloc(zfsvfs, db, doi.doi_data_block_size,
doi.doi_bonus_type, NULL);
if (zp == NULL) {
err = SET_ERROR(ENOENT);
} else {
*zpp = zp;
}
zfs_znode_hold_exit(zfsvfs, zh);
return (err);
}
int
zfs_rezget(znode_t *zp)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
dmu_object_info_t doi;
dmu_buf_t *db;
uint64_t obj_num = zp->z_id;
uint64_t mode;
uint64_t links;
sa_bulk_attr_t bulk[10];
int err;
int count = 0;
uint64_t gen;
uint64_t z_uid, z_gid;
uint64_t atime[2], mtime[2], ctime[2];
uint64_t projid = ZFS_DEFAULT_PROJID;
znode_hold_t *zh;
/*
* skip ctldir, otherwise they will always get invalidated. This will
* cause funny behaviour for the mounted snapdirs. Especially for
* Linux >= 3.18, d_invalidate will detach the mountpoint and prevent
* anyone automount it again as long as someone is still using the
* detached mount.
*/
if (zp->z_is_ctldir)
return (0);
zh = zfs_znode_hold_enter(zfsvfs, obj_num);
mutex_enter(&zp->z_acl_lock);
if (zp->z_acl_cached) {
zfs_acl_free(zp->z_acl_cached);
zp->z_acl_cached = NULL;
}
mutex_exit(&zp->z_acl_lock);
rw_enter(&zp->z_xattr_lock, RW_WRITER);
if (zp->z_xattr_cached) {
nvlist_free(zp->z_xattr_cached);
zp->z_xattr_cached = NULL;
}
rw_exit(&zp->z_xattr_lock);
ASSERT(zp->z_sa_hdl == NULL);
err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db);
if (err) {
zfs_znode_hold_exit(zfsvfs, zh);
return (err);
}
dmu_object_info_from_db(db, &doi);
if (doi.doi_bonus_type != DMU_OT_SA &&
(doi.doi_bonus_type != DMU_OT_ZNODE ||
(doi.doi_bonus_type == DMU_OT_ZNODE &&
doi.doi_bonus_size < sizeof (znode_phys_t)))) {
sa_buf_rele(db, NULL);
zfs_znode_hold_exit(zfsvfs, zh);
return (SET_ERROR(EINVAL));
}
zfs_znode_sa_init(zfsvfs, zp, db, doi.doi_bonus_type, NULL);
/* reload cached values */
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL,
&gen, sizeof (gen));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
&zp->z_size, sizeof (zp->z_size));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL,
&links, sizeof (links));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, sizeof (zp->z_pflags));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
&z_uid, sizeof (z_uid));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
&z_gid, sizeof (z_gid));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
&mode, sizeof (mode));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
&atime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
&mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
&ctime, 16);
if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) {
zfs_znode_dmu_fini(zp);
zfs_znode_hold_exit(zfsvfs, zh);
return (SET_ERROR(EIO));
}
if (dmu_objset_projectquota_enabled(zfsvfs->z_os)) {
err = sa_lookup(zp->z_sa_hdl, SA_ZPL_PROJID(zfsvfs),
&projid, 8);
if (err != 0 && err != ENOENT) {
zfs_znode_dmu_fini(zp);
zfs_znode_hold_exit(zfsvfs, zh);
return (SET_ERROR(err));
}
}
zp->z_projid = projid;
zp->z_mode = ZTOI(zp)->i_mode = mode;
zfs_uid_write(ZTOI(zp), z_uid);
zfs_gid_write(ZTOI(zp), z_gid);
ZFS_TIME_DECODE(&ZTOI(zp)->i_atime, atime);
ZFS_TIME_DECODE(&ZTOI(zp)->i_mtime, mtime);
ZFS_TIME_DECODE(&ZTOI(zp)->i_ctime, ctime);
if ((uint32_t)gen != ZTOI(zp)->i_generation) {
zfs_znode_dmu_fini(zp);
zfs_znode_hold_exit(zfsvfs, zh);
return (SET_ERROR(EIO));
}
set_nlink(ZTOI(zp), (uint32_t)links);
zfs_set_inode_flags(zp, ZTOI(zp));
zp->z_blksz = doi.doi_data_block_size;
zp->z_atime_dirty = B_FALSE;
zfs_znode_update_vfs(zp);
/*
* If the file has zero links, then it has been unlinked on the send
* side and it must be in the received unlinked set.
* We call zfs_znode_dmu_fini() now to prevent any accesses to the
* stale data and to prevent automatic removal of the file in
* zfs_zinactive(). The file will be removed either when it is removed
* on the send side and the next incremental stream is received or
* when the unlinked set gets processed.
*/
zp->z_unlinked = (ZTOI(zp)->i_nlink == 0);
if (zp->z_unlinked)
zfs_znode_dmu_fini(zp);
zfs_znode_hold_exit(zfsvfs, zh);
return (0);
}
void
zfs_znode_delete(znode_t *zp, dmu_tx_t *tx)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
objset_t *os = zfsvfs->z_os;
uint64_t obj = zp->z_id;
uint64_t acl_obj = zfs_external_acl(zp);
znode_hold_t *zh;
zh = zfs_znode_hold_enter(zfsvfs, obj);
if (acl_obj) {
VERIFY(!zp->z_is_sa);
VERIFY(0 == dmu_object_free(os, acl_obj, tx));
}
VERIFY(0 == dmu_object_free(os, obj, tx));
zfs_znode_dmu_fini(zp);
zfs_znode_hold_exit(zfsvfs, zh);
}
void
zfs_zinactive(znode_t *zp)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
uint64_t z_id = zp->z_id;
znode_hold_t *zh;
ASSERT(zp->z_sa_hdl);
/*
* Don't allow a zfs_zget() while were trying to release this znode.
*/
zh = zfs_znode_hold_enter(zfsvfs, z_id);
mutex_enter(&zp->z_lock);
/*
* If this was the last reference to a file with no links, remove
* the file from the file system unless the file system is mounted
* read-only. That can happen, for example, if the file system was
* originally read-write, the file was opened, then unlinked and
* the file system was made read-only before the file was finally
* closed. The file will remain in the unlinked set.
*/
if (zp->z_unlinked) {
ASSERT(!zfsvfs->z_issnap);
if (!zfs_is_readonly(zfsvfs) && !zfs_unlink_suspend_progress) {
mutex_exit(&zp->z_lock);
zfs_znode_hold_exit(zfsvfs, zh);
zfs_rmnode(zp);
return;
}
}
mutex_exit(&zp->z_lock);
zfs_znode_dmu_fini(zp);
zfs_znode_hold_exit(zfsvfs, zh);
}
#if defined(HAVE_INODE_TIMESPEC64_TIMES)
#define zfs_compare_timespec timespec64_compare
#else
#define zfs_compare_timespec timespec_compare
#endif
/*
* Determine whether the znode's atime must be updated. The logic mostly
* duplicates the Linux kernel's relatime_need_update() functionality.
* This function is only called if the underlying filesystem actually has
* atime updates enabled.
*/
boolean_t
zfs_relatime_need_update(const struct inode *ip)
{
inode_timespec_t now;
gethrestime(&now);
/*
* In relatime mode, only update the atime if the previous atime
* is earlier than either the ctime or mtime or if at least a day
* has passed since the last update of atime.
*/
if (zfs_compare_timespec(&ip->i_mtime, &ip->i_atime) >= 0)
return (B_TRUE);
if (zfs_compare_timespec(&ip->i_ctime, &ip->i_atime) >= 0)
return (B_TRUE);
if ((hrtime_t)now.tv_sec - (hrtime_t)ip->i_atime.tv_sec >= 24*60*60)
return (B_TRUE);
return (B_FALSE);
}
/*
* Prepare to update znode time stamps.
*
* IN: zp - znode requiring timestamp update
* flag - ATTR_MTIME, ATTR_CTIME flags
*
* OUT: zp - z_seq
* mtime - new mtime
* ctime - new ctime
*
* Note: We don't update atime here, because we rely on Linux VFS to do
* atime updating.
*/
void
zfs_tstamp_update_setup(znode_t *zp, uint_t flag, uint64_t mtime[2],
uint64_t ctime[2])
{
inode_timespec_t now;
gethrestime(&now);
zp->z_seq++;
if (flag & ATTR_MTIME) {
ZFS_TIME_ENCODE(&now, mtime);
ZFS_TIME_DECODE(&(ZTOI(zp)->i_mtime), mtime);
if (ZTOZSB(zp)->z_use_fuids) {
zp->z_pflags |= (ZFS_ARCHIVE |
ZFS_AV_MODIFIED);
}
}
if (flag & ATTR_CTIME) {
ZFS_TIME_ENCODE(&now, ctime);
ZFS_TIME_DECODE(&(ZTOI(zp)->i_ctime), ctime);
if (ZTOZSB(zp)->z_use_fuids)
zp->z_pflags |= ZFS_ARCHIVE;
}
}
/*
* Grow the block size for a file.
*
* IN: zp - znode of file to free data in.
* size - requested block size
* tx - open transaction.
*
* NOTE: this function assumes that the znode is write locked.
*/
void
zfs_grow_blocksize(znode_t *zp, uint64_t size, dmu_tx_t *tx)
{
int error;
u_longlong_t dummy;
if (size <= zp->z_blksz)
return;
/*
* If the file size is already greater than the current blocksize,
* we will not grow. If there is more than one block in a file,
* the blocksize cannot change.
*/
if (zp->z_blksz && zp->z_size > zp->z_blksz)
return;
error = dmu_object_set_blocksize(ZTOZSB(zp)->z_os, zp->z_id,
size, 0, tx);
if (error == ENOTSUP)
return;
ASSERT0(error);
/* What blocksize did we actually get? */
dmu_object_size_from_db(sa_get_db(zp->z_sa_hdl), &zp->z_blksz, &dummy);
}
/*
* Increase the file length
*
* IN: zp - znode of file to free data in.
* end - new end-of-file
*
* RETURN: 0 on success, error code on failure
*/
static int
zfs_extend(znode_t *zp, uint64_t end)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
dmu_tx_t *tx;
zfs_locked_range_t *lr;
uint64_t newblksz;
int error;
/*
* We will change zp_size, lock the whole file.
*/
lr = zfs_rangelock_enter(&zp->z_rangelock, 0, UINT64_MAX, RL_WRITER);
/*
* Nothing to do if file already at desired length.
*/
if (end <= zp->z_size) {
zfs_rangelock_exit(lr);
return (0);
}
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
if (end > zp->z_blksz &&
(!ISP2(zp->z_blksz) || zp->z_blksz < zfsvfs->z_max_blksz)) {
/*
* We are growing the file past the current block size.
*/
if (zp->z_blksz > ZTOZSB(zp)->z_max_blksz) {
/*
* File's blocksize is already larger than the
* "recordsize" property. Only let it grow to
* the next power of 2.
*/
ASSERT(!ISP2(zp->z_blksz));
newblksz = MIN(end, 1 << highbit64(zp->z_blksz));
} else {
newblksz = MIN(end, ZTOZSB(zp)->z_max_blksz);
}
dmu_tx_hold_write(tx, zp->z_id, 0, newblksz);
} else {
newblksz = 0;
}
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
zfs_rangelock_exit(lr);
return (error);
}
if (newblksz)
zfs_grow_blocksize(zp, newblksz, tx);
zp->z_size = end;
VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(ZTOZSB(zp)),
&zp->z_size, sizeof (zp->z_size), tx));
zfs_rangelock_exit(lr);
dmu_tx_commit(tx);
return (0);
}
/*
* zfs_zero_partial_page - Modeled after update_pages() but
* with different arguments and semantics for use by zfs_freesp().
*
* Zeroes a piece of a single page cache entry for zp at offset
* start and length len.
*
* Caller must acquire a range lock on the file for the region
* being zeroed in order that the ARC and page cache stay in sync.
*/
static void
zfs_zero_partial_page(znode_t *zp, uint64_t start, uint64_t len)
{
struct address_space *mp = ZTOI(zp)->i_mapping;
struct page *pp;
int64_t off;
void *pb;
ASSERT((start & PAGE_MASK) == ((start + len - 1) & PAGE_MASK));
off = start & (PAGE_SIZE - 1);
start &= PAGE_MASK;
pp = find_lock_page(mp, start >> PAGE_SHIFT);
if (pp) {
if (mapping_writably_mapped(mp))
flush_dcache_page(pp);
pb = kmap(pp);
bzero(pb + off, len);
kunmap(pp);
if (mapping_writably_mapped(mp))
flush_dcache_page(pp);
mark_page_accessed(pp);
SetPageUptodate(pp);
ClearPageError(pp);
unlock_page(pp);
put_page(pp);
}
}
/*
* Free space in a file.
*
* IN: zp - znode of file to free data in.
* off - start of section to free.
* len - length of section to free.
*
* RETURN: 0 on success, error code on failure
*/
static int
zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
zfs_locked_range_t *lr;
int error;
/*
* Lock the range being freed.
*/
lr = zfs_rangelock_enter(&zp->z_rangelock, off, len, RL_WRITER);
/*
* Nothing to do if file already at desired length.
*/
if (off >= zp->z_size) {
zfs_rangelock_exit(lr);
return (0);
}
if (off + len > zp->z_size)
len = zp->z_size - off;
error = dmu_free_long_range(zfsvfs->z_os, zp->z_id, off, len);
/*
* Zero partial page cache entries. This must be done under a
* range lock in order to keep the ARC and page cache in sync.
*/
if (zp->z_is_mapped) {
loff_t first_page, last_page, page_len;
loff_t first_page_offset, last_page_offset;
/* first possible full page in hole */
first_page = (off + PAGE_SIZE - 1) >> PAGE_SHIFT;
/* last page of hole */
last_page = (off + len) >> PAGE_SHIFT;
/* offset of first_page */
first_page_offset = first_page << PAGE_SHIFT;
/* offset of last_page */
last_page_offset = last_page << PAGE_SHIFT;
/* truncate whole pages */
if (last_page_offset > first_page_offset) {
truncate_inode_pages_range(ZTOI(zp)->i_mapping,
first_page_offset, last_page_offset - 1);
}
/* truncate sub-page ranges */
if (first_page > last_page) {
/* entire punched area within a single page */
zfs_zero_partial_page(zp, off, len);
} else {
/* beginning of punched area at the end of a page */
page_len = first_page_offset - off;
if (page_len > 0)
zfs_zero_partial_page(zp, off, page_len);
/* end of punched area at the beginning of a page */
page_len = off + len - last_page_offset;
if (page_len > 0)
zfs_zero_partial_page(zp, last_page_offset,
page_len);
}
}
zfs_rangelock_exit(lr);
return (error);
}
/*
* Truncate a file
*
* IN: zp - znode of file to free data in.
* end - new end-of-file.
*
* RETURN: 0 on success, error code on failure
*/
static int
zfs_trunc(znode_t *zp, uint64_t end)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
dmu_tx_t *tx;
zfs_locked_range_t *lr;
int error;
sa_bulk_attr_t bulk[2];
int count = 0;
/*
* We will change zp_size, lock the whole file.
*/
lr = zfs_rangelock_enter(&zp->z_rangelock, 0, UINT64_MAX, RL_WRITER);
/*
* Nothing to do if file already at desired length.
*/
if (end >= zp->z_size) {
zfs_rangelock_exit(lr);
return (0);
}
error = dmu_free_long_range(zfsvfs->z_os, zp->z_id, end,
DMU_OBJECT_END);
if (error) {
zfs_rangelock_exit(lr);
return (error);
}
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
zfs_rangelock_exit(lr);
return (error);
}
zp->z_size = end;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs),
NULL, &zp->z_size, sizeof (zp->z_size));
if (end == 0) {
zp->z_pflags &= ~ZFS_SPARSE;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
NULL, &zp->z_pflags, 8);
}
VERIFY(sa_bulk_update(zp->z_sa_hdl, bulk, count, tx) == 0);
dmu_tx_commit(tx);
zfs_rangelock_exit(lr);
return (0);
}
/*
* Free space in a file
*
* IN: zp - znode of file to free data in.
* off - start of range
* len - end of range (0 => EOF)
* flag - current file open mode flags.
* log - TRUE if this action should be logged
*
* RETURN: 0 on success, error code on failure
*/
int
zfs_freesp(znode_t *zp, uint64_t off, uint64_t len, int flag, boolean_t log)
{
dmu_tx_t *tx;
zfsvfs_t *zfsvfs = ZTOZSB(zp);
zilog_t *zilog = zfsvfs->z_log;
uint64_t mode;
uint64_t mtime[2], ctime[2];
sa_bulk_attr_t bulk[3];
int count = 0;
int error;
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs), &mode,
sizeof (mode))) != 0)
return (error);
if (off > zp->z_size) {
error = zfs_extend(zp, off+len);
if (error == 0 && log)
goto log;
goto out;
}
if (len == 0) {
error = zfs_trunc(zp, off);
} else {
if ((error = zfs_free_range(zp, off, len)) == 0 &&
off + len > zp->z_size)
error = zfs_extend(zp, off+len);
}
if (error || !log)
goto out;
log:
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
goto out;
}
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
NULL, &zp->z_pflags, 8);
zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
ASSERT(error == 0);
zfs_log_truncate(zilog, tx, TX_TRUNCATE, zp, off, len);
dmu_tx_commit(tx);
zfs_znode_update_vfs(zp);
error = 0;
out:
/*
* Truncate the page cache - for file truncate operations, use
* the purpose-built API for truncations. For punching operations,
* the truncation is handled under a range lock in zfs_free_range.
*/
if (len == 0)
truncate_setsize(ZTOI(zp), off);
return (error);
}
void
zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
{
struct super_block *sb;
zfsvfs_t *zfsvfs;
uint64_t moid, obj, sa_obj, version;
uint64_t sense = ZFS_CASE_SENSITIVE;
uint64_t norm = 0;
nvpair_t *elem;
int size;
int error;
int i;
znode_t *rootzp = NULL;
vattr_t vattr;
znode_t *zp;
zfs_acl_ids_t acl_ids;
/*
* First attempt to create master node.
*/
/*
* In an empty objset, there are no blocks to read and thus
* there can be no i/o errors (which we assert below).
*/
moid = MASTER_NODE_OBJ;
error = zap_create_claim(os, moid, DMU_OT_MASTER_NODE,
DMU_OT_NONE, 0, tx);
ASSERT(error == 0);
/*
* Set starting attributes.
*/
version = zfs_zpl_version_map(spa_version(dmu_objset_spa(os)));
elem = NULL;
while ((elem = nvlist_next_nvpair(zplprops, elem)) != NULL) {
/* For the moment we expect all zpl props to be uint64_ts */
uint64_t val;
char *name;
ASSERT(nvpair_type(elem) == DATA_TYPE_UINT64);
VERIFY(nvpair_value_uint64(elem, &val) == 0);
name = nvpair_name(elem);
if (strcmp(name, zfs_prop_to_name(ZFS_PROP_VERSION)) == 0) {
if (val < version)
version = val;
} else {
error = zap_update(os, moid, name, 8, 1, &val, tx);
}
ASSERT(error == 0);
if (strcmp(name, zfs_prop_to_name(ZFS_PROP_NORMALIZE)) == 0)
norm = val;
else if (strcmp(name, zfs_prop_to_name(ZFS_PROP_CASE)) == 0)
sense = val;
}
ASSERT(version != 0);
error = zap_update(os, moid, ZPL_VERSION_STR, 8, 1, &version, tx);
/*
* Create zap object used for SA attribute registration
*/
if (version >= ZPL_VERSION_SA) {
sa_obj = zap_create(os, DMU_OT_SA_MASTER_NODE,
DMU_OT_NONE, 0, tx);
error = zap_add(os, moid, ZFS_SA_ATTRS, 8, 1, &sa_obj, tx);
ASSERT(error == 0);
} else {
sa_obj = 0;
}
/*
* Create a delete queue.
*/
obj = zap_create(os, DMU_OT_UNLINKED_SET, DMU_OT_NONE, 0, tx);
error = zap_add(os, moid, ZFS_UNLINKED_SET, 8, 1, &obj, tx);
ASSERT(error == 0);
/*
* Create root znode. Create minimal znode/inode/zfsvfs/sb
* to allow zfs_mknode to work.
*/
vattr.va_mask = ATTR_MODE|ATTR_UID|ATTR_GID;
vattr.va_mode = S_IFDIR|0755;
vattr.va_uid = crgetuid(cr);
vattr.va_gid = crgetgid(cr);
rootzp = kmem_cache_alloc(znode_cache, KM_SLEEP);
rootzp->z_unlinked = B_FALSE;
rootzp->z_atime_dirty = B_FALSE;
rootzp->z_is_sa = USE_SA(version, os);
rootzp->z_pflags = 0;
zfsvfs = kmem_zalloc(sizeof (zfsvfs_t), KM_SLEEP);
zfsvfs->z_os = os;
zfsvfs->z_parent = zfsvfs;
zfsvfs->z_version = version;
zfsvfs->z_use_fuids = USE_FUIDS(version, os);
zfsvfs->z_use_sa = USE_SA(version, os);
zfsvfs->z_norm = norm;
sb = kmem_zalloc(sizeof (struct super_block), KM_SLEEP);
sb->s_fs_info = zfsvfs;
ZTOI(rootzp)->i_sb = sb;
error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
&zfsvfs->z_attr_table);
ASSERT(error == 0);
/*
* Fold case on file systems that are always or sometimes case
* insensitive.
*/
if (sense == ZFS_CASE_INSENSITIVE || sense == ZFS_CASE_MIXED)
zfsvfs->z_norm |= U8_TEXTPREP_TOUPPER;
mutex_init(&zfsvfs->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&zfsvfs->z_all_znodes, sizeof (znode_t),
offsetof(znode_t, z_link_node));
size = MIN(1 << (highbit64(zfs_object_mutex_size)-1), ZFS_OBJ_MTX_MAX);
zfsvfs->z_hold_size = size;
zfsvfs->z_hold_trees = vmem_zalloc(sizeof (avl_tree_t) * size,
KM_SLEEP);
zfsvfs->z_hold_locks = vmem_zalloc(sizeof (kmutex_t) * size, KM_SLEEP);
for (i = 0; i != size; i++) {
avl_create(&zfsvfs->z_hold_trees[i], zfs_znode_hold_compare,
sizeof (znode_hold_t), offsetof(znode_hold_t, zh_node));
mutex_init(&zfsvfs->z_hold_locks[i], NULL, MUTEX_DEFAULT, NULL);
}
VERIFY(0 == zfs_acl_ids_create(rootzp, IS_ROOT_NODE, &vattr,
cr, NULL, &acl_ids));
zfs_mknode(rootzp, &vattr, tx, cr, IS_ROOT_NODE, &zp, &acl_ids);
ASSERT3P(zp, ==, rootzp);
error = zap_add(os, moid, ZFS_ROOT_OBJ, 8, 1, &rootzp->z_id, tx);
ASSERT(error == 0);
zfs_acl_ids_free(&acl_ids);
atomic_set(&ZTOI(rootzp)->i_count, 0);
sa_handle_destroy(rootzp->z_sa_hdl);
kmem_cache_free(znode_cache, rootzp);
for (i = 0; i != size; i++) {
avl_destroy(&zfsvfs->z_hold_trees[i]);
mutex_destroy(&zfsvfs->z_hold_locks[i]);
}
mutex_destroy(&zfsvfs->z_znodes_lock);
vmem_free(zfsvfs->z_hold_trees, sizeof (avl_tree_t) * size);
vmem_free(zfsvfs->z_hold_locks, sizeof (kmutex_t) * size);
kmem_free(sb, sizeof (struct super_block));
kmem_free(zfsvfs, sizeof (zfsvfs_t));
}
#endif /* _KERNEL */
static int
zfs_sa_setup(objset_t *osp, sa_attr_type_t **sa_table)
{
uint64_t sa_obj = 0;
int error;
error = zap_lookup(osp, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1, &sa_obj);
if (error != 0 && error != ENOENT)
return (error);
error = sa_setup(osp, sa_obj, zfs_attr_table, ZPL_END, sa_table);
return (error);
}
static int
zfs_grab_sa_handle(objset_t *osp, uint64_t obj, sa_handle_t **hdlp,
dmu_buf_t **db, void *tag)
{
dmu_object_info_t doi;
int error;
if ((error = sa_buf_hold(osp, obj, tag, db)) != 0)
return (error);
dmu_object_info_from_db(*db, &doi);
if ((doi.doi_bonus_type != DMU_OT_SA &&
doi.doi_bonus_type != DMU_OT_ZNODE) ||
(doi.doi_bonus_type == DMU_OT_ZNODE &&
doi.doi_bonus_size < sizeof (znode_phys_t))) {
sa_buf_rele(*db, tag);
return (SET_ERROR(ENOTSUP));
}
error = sa_handle_get(osp, obj, NULL, SA_HDL_PRIVATE, hdlp);
if (error != 0) {
sa_buf_rele(*db, tag);
return (error);
}
return (0);
}
static void
zfs_release_sa_handle(sa_handle_t *hdl, dmu_buf_t *db, void *tag)
{
sa_handle_destroy(hdl);
sa_buf_rele(db, tag);
}
/*
* Given an object number, return its parent object number and whether
* or not the object is an extended attribute directory.
*/
static int
zfs_obj_to_pobj(objset_t *osp, sa_handle_t *hdl, sa_attr_type_t *sa_table,
uint64_t *pobjp, int *is_xattrdir)
{
uint64_t parent;
uint64_t pflags;
uint64_t mode;
uint64_t parent_mode;
sa_bulk_attr_t bulk[3];
sa_handle_t *sa_hdl;
dmu_buf_t *sa_db;
int count = 0;
int error;
SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_PARENT], NULL,
&parent, sizeof (parent));
SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_FLAGS], NULL,
&pflags, sizeof (pflags));
SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_MODE], NULL,
&mode, sizeof (mode));
if ((error = sa_bulk_lookup(hdl, bulk, count)) != 0)
return (error);
/*
* When a link is removed its parent pointer is not changed and will
* be invalid. There are two cases where a link is removed but the
* file stays around, when it goes to the delete queue and when there
* are additional links.
*/
error = zfs_grab_sa_handle(osp, parent, &sa_hdl, &sa_db, FTAG);
if (error != 0)
return (error);
error = sa_lookup(sa_hdl, ZPL_MODE, &parent_mode, sizeof (parent_mode));
zfs_release_sa_handle(sa_hdl, sa_db, FTAG);
if (error != 0)
return (error);
*is_xattrdir = ((pflags & ZFS_XATTR) != 0) && S_ISDIR(mode);
/*
* Extended attributes can be applied to files, directories, etc.
* Otherwise the parent must be a directory.
*/
if (!*is_xattrdir && !S_ISDIR(parent_mode))
return (SET_ERROR(EINVAL));
*pobjp = parent;
return (0);
}
/*
* Given an object number, return some zpl level statistics
*/
static int
zfs_obj_to_stats_impl(sa_handle_t *hdl, sa_attr_type_t *sa_table,
zfs_stat_t *sb)
{
sa_bulk_attr_t bulk[4];
int count = 0;
SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_MODE], NULL,
&sb->zs_mode, sizeof (sb->zs_mode));
SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_GEN], NULL,
&sb->zs_gen, sizeof (sb->zs_gen));
SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_LINKS], NULL,
&sb->zs_links, sizeof (sb->zs_links));
SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_CTIME], NULL,
&sb->zs_ctime, sizeof (sb->zs_ctime));
return (sa_bulk_lookup(hdl, bulk, count));
}
static int
zfs_obj_to_path_impl(objset_t *osp, uint64_t obj, sa_handle_t *hdl,
sa_attr_type_t *sa_table, char *buf, int len)
{
sa_handle_t *sa_hdl;
sa_handle_t *prevhdl = NULL;
dmu_buf_t *prevdb = NULL;
dmu_buf_t *sa_db = NULL;
char *path = buf + len - 1;
int error;
*path = '\0';
sa_hdl = hdl;
uint64_t deleteq_obj;
VERIFY0(zap_lookup(osp, MASTER_NODE_OBJ,
ZFS_UNLINKED_SET, sizeof (uint64_t), 1, &deleteq_obj));
error = zap_lookup_int(osp, deleteq_obj, obj);
if (error == 0) {
return (ESTALE);
} else if (error != ENOENT) {
return (error);
}
error = 0;
for (;;) {
uint64_t pobj = 0;
char component[MAXNAMELEN + 2];
size_t complen;
int is_xattrdir = 0;
if (prevdb) {
ASSERT(prevhdl != NULL);
zfs_release_sa_handle(prevhdl, prevdb, FTAG);
}
if ((error = zfs_obj_to_pobj(osp, sa_hdl, sa_table, &pobj,
&is_xattrdir)) != 0)
break;
if (pobj == obj) {
if (path[0] != '/')
*--path = '/';
break;
}
component[0] = '/';
if (is_xattrdir) {
(void) sprintf(component + 1, "<xattrdir>");
} else {
error = zap_value_search(osp, pobj, obj,
ZFS_DIRENT_OBJ(-1ULL), component + 1);
if (error != 0)
break;
}
complen = strlen(component);
path -= complen;
ASSERT(path >= buf);
bcopy(component, path, complen);
obj = pobj;
if (sa_hdl != hdl) {
prevhdl = sa_hdl;
prevdb = sa_db;
}
error = zfs_grab_sa_handle(osp, obj, &sa_hdl, &sa_db, FTAG);
if (error != 0) {
sa_hdl = prevhdl;
sa_db = prevdb;
break;
}
}
if (sa_hdl != NULL && sa_hdl != hdl) {
ASSERT(sa_db != NULL);
zfs_release_sa_handle(sa_hdl, sa_db, FTAG);
}
if (error == 0)
(void) memmove(buf, path, buf + len - path);
return (error);
}
int
zfs_obj_to_path(objset_t *osp, uint64_t obj, char *buf, int len)
{
sa_attr_type_t *sa_table;
sa_handle_t *hdl;
dmu_buf_t *db;
int error;
error = zfs_sa_setup(osp, &sa_table);
if (error != 0)
return (error);
error = zfs_grab_sa_handle(osp, obj, &hdl, &db, FTAG);
if (error != 0)
return (error);
error = zfs_obj_to_path_impl(osp, obj, hdl, sa_table, buf, len);
zfs_release_sa_handle(hdl, db, FTAG);
return (error);
}
int
zfs_obj_to_stats(objset_t *osp, uint64_t obj, zfs_stat_t *sb,
char *buf, int len)
{
char *path = buf + len - 1;
sa_attr_type_t *sa_table;
sa_handle_t *hdl;
dmu_buf_t *db;
int error;
*path = '\0';
error = zfs_sa_setup(osp, &sa_table);
if (error != 0)
return (error);
error = zfs_grab_sa_handle(osp, obj, &hdl, &db, FTAG);
if (error != 0)
return (error);
error = zfs_obj_to_stats_impl(hdl, sa_table, sb);
if (error != 0) {
zfs_release_sa_handle(hdl, db, FTAG);
return (error);
}
error = zfs_obj_to_path_impl(osp, obj, hdl, sa_table, buf, len);
zfs_release_sa_handle(hdl, db, FTAG);
return (error);
}
#if defined(_KERNEL)
EXPORT_SYMBOL(zfs_create_fs);
EXPORT_SYMBOL(zfs_obj_to_path);
/* CSTYLED */
module_param(zfs_object_mutex_size, uint, 0644);
MODULE_PARM_DESC(zfs_object_mutex_size, "Size of znode hold array");
module_param(zfs_unlink_suspend_progress, int, 0644);
MODULE_PARM_DESC(zfs_unlink_suspend_progress, "Set to prevent async unlinks "
"(debug - leaks space into the unlinked set)");
#endif
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zpl_file.c b/sys/contrib/openzfs/module/os/linux/zfs/zpl_file.c
index 0319148b983d..63002fe3b932 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zpl_file.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zpl_file.c
@@ -1,1077 +1,1083 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2011, Lawrence Livermore National Security, LLC.
* Copyright (c) 2015 by Chunwei Chen. All rights reserved.
*/
#ifdef CONFIG_COMPAT
#include <linux/compat.h>
#endif
#include <sys/file.h>
#include <sys/dmu_objset.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_vfsops.h>
#include <sys/zfs_vnops.h>
#include <sys/zfs_project.h>
+#ifdef HAVE_VFS_SET_PAGE_DIRTY_NOBUFFERS
+#include <linux/pagemap.h>
+#endif
/*
* When using fallocate(2) to preallocate space, inflate the requested
* capacity check by 10% to account for the required metadata blocks.
*/
unsigned int zfs_fallocate_reserve_percent = 110;
static int
zpl_open(struct inode *ip, struct file *filp)
{
cred_t *cr = CRED();
int error;
fstrans_cookie_t cookie;
error = generic_file_open(ip, filp);
if (error)
return (error);
crhold(cr);
cookie = spl_fstrans_mark();
error = -zfs_open(ip, filp->f_mode, filp->f_flags, cr);
spl_fstrans_unmark(cookie);
crfree(cr);
ASSERT3S(error, <=, 0);
return (error);
}
static int
zpl_release(struct inode *ip, struct file *filp)
{
cred_t *cr = CRED();
int error;
fstrans_cookie_t cookie;
cookie = spl_fstrans_mark();
if (ITOZ(ip)->z_atime_dirty)
zfs_mark_inode_dirty(ip);
crhold(cr);
error = -zfs_close(ip, filp->f_flags, cr);
spl_fstrans_unmark(cookie);
crfree(cr);
ASSERT3S(error, <=, 0);
return (error);
}
static int
zpl_iterate(struct file *filp, zpl_dir_context_t *ctx)
{
cred_t *cr = CRED();
int error;
fstrans_cookie_t cookie;
crhold(cr);
cookie = spl_fstrans_mark();
error = -zfs_readdir(file_inode(filp), ctx, cr);
spl_fstrans_unmark(cookie);
crfree(cr);
ASSERT3S(error, <=, 0);
return (error);
}
#if !defined(HAVE_VFS_ITERATE) && !defined(HAVE_VFS_ITERATE_SHARED)
static int
zpl_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
zpl_dir_context_t ctx =
ZPL_DIR_CONTEXT_INIT(dirent, filldir, filp->f_pos);
int error;
error = zpl_iterate(filp, &ctx);
filp->f_pos = ctx.pos;
return (error);
}
#endif /* !HAVE_VFS_ITERATE && !HAVE_VFS_ITERATE_SHARED */
#if defined(HAVE_FSYNC_WITHOUT_DENTRY)
/*
* Linux 2.6.35 - 3.0 API,
* As of 2.6.35 the dentry argument to the fops->fsync() hook was deemed
* redundant. The dentry is still accessible via filp->f_path.dentry,
* and we are guaranteed that filp will never be NULL.
*/
static int
zpl_fsync(struct file *filp, int datasync)
{
struct inode *inode = filp->f_mapping->host;
cred_t *cr = CRED();
int error;
fstrans_cookie_t cookie;
crhold(cr);
cookie = spl_fstrans_mark();
error = -zfs_fsync(ITOZ(inode), datasync, cr);
spl_fstrans_unmark(cookie);
crfree(cr);
ASSERT3S(error, <=, 0);
return (error);
}
#ifdef HAVE_FILE_AIO_FSYNC
static int
zpl_aio_fsync(struct kiocb *kiocb, int datasync)
{
return (zpl_fsync(kiocb->ki_filp, datasync));
}
#endif
#elif defined(HAVE_FSYNC_RANGE)
/*
* Linux 3.1 API,
* As of 3.1 the responsibility to call filemap_write_and_wait_range() has
* been pushed down in to the .fsync() vfs hook. Additionally, the i_mutex
* lock is no longer held by the caller, for zfs we don't require the lock
* to be held so we don't acquire it.
*/
static int
zpl_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
{
struct inode *inode = filp->f_mapping->host;
cred_t *cr = CRED();
int error;
fstrans_cookie_t cookie;
error = filemap_write_and_wait_range(inode->i_mapping, start, end);
if (error)
return (error);
crhold(cr);
cookie = spl_fstrans_mark();
error = -zfs_fsync(ITOZ(inode), datasync, cr);
spl_fstrans_unmark(cookie);
crfree(cr);
ASSERT3S(error, <=, 0);
return (error);
}
#ifdef HAVE_FILE_AIO_FSYNC
static int
zpl_aio_fsync(struct kiocb *kiocb, int datasync)
{
return (zpl_fsync(kiocb->ki_filp, kiocb->ki_pos, -1, datasync));
}
#endif
#else
#error "Unsupported fops->fsync() implementation"
#endif
static inline int
zfs_io_flags(struct kiocb *kiocb)
{
int flags = 0;
#if defined(IOCB_DSYNC)
if (kiocb->ki_flags & IOCB_DSYNC)
flags |= O_DSYNC;
#endif
#if defined(IOCB_SYNC)
if (kiocb->ki_flags & IOCB_SYNC)
flags |= O_SYNC;
#endif
#if defined(IOCB_APPEND)
if (kiocb->ki_flags & IOCB_APPEND)
flags |= O_APPEND;
#endif
#if defined(IOCB_DIRECT)
if (kiocb->ki_flags & IOCB_DIRECT)
flags |= O_DIRECT;
#endif
return (flags);
}
/*
* If relatime is enabled, call file_accessed() if zfs_relatime_need_update()
* is true. This is needed since datasets with inherited "relatime" property
* aren't necessarily mounted with the MNT_RELATIME flag (e.g. after
* `zfs set relatime=...`), which is what relatime test in VFS by
* relatime_need_update() is based on.
*/
static inline void
zpl_file_accessed(struct file *filp)
{
struct inode *ip = filp->f_mapping->host;
if (!IS_NOATIME(ip) && ITOZSB(ip)->z_relatime) {
if (zfs_relatime_need_update(ip))
file_accessed(filp);
} else {
file_accessed(filp);
}
}
#if defined(HAVE_VFS_RW_ITERATE)
/*
* When HAVE_VFS_IOV_ITER is defined the iov_iter structure supports
* iovecs, kvevs, bvecs and pipes, plus all the required interfaces to
* manipulate the iov_iter are available. In which case the full iov_iter
* can be attached to the uio and correctly handled in the lower layers.
* Otherwise, for older kernels extract the iovec and pass it instead.
*/
static void
zpl_uio_init(zfs_uio_t *uio, struct kiocb *kiocb, struct iov_iter *to,
loff_t pos, ssize_t count, size_t skip)
{
#if defined(HAVE_VFS_IOV_ITER)
zfs_uio_iov_iter_init(uio, to, pos, count, skip);
#else
zfs_uio_iovec_init(uio, to->iov, to->nr_segs, pos,
to->type & ITER_KVEC ? UIO_SYSSPACE : UIO_USERSPACE,
count, skip);
#endif
}
static ssize_t
zpl_iter_read(struct kiocb *kiocb, struct iov_iter *to)
{
cred_t *cr = CRED();
fstrans_cookie_t cookie;
struct file *filp = kiocb->ki_filp;
ssize_t count = iov_iter_count(to);
zfs_uio_t uio;
zpl_uio_init(&uio, kiocb, to, kiocb->ki_pos, count, 0);
crhold(cr);
cookie = spl_fstrans_mark();
int error = -zfs_read(ITOZ(filp->f_mapping->host), &uio,
filp->f_flags | zfs_io_flags(kiocb), cr);
spl_fstrans_unmark(cookie);
crfree(cr);
if (error < 0)
return (error);
ssize_t read = count - uio.uio_resid;
kiocb->ki_pos += read;
zpl_file_accessed(filp);
return (read);
}
static inline ssize_t
zpl_generic_write_checks(struct kiocb *kiocb, struct iov_iter *from,
size_t *countp)
{
#ifdef HAVE_GENERIC_WRITE_CHECKS_KIOCB
ssize_t ret = generic_write_checks(kiocb, from);
if (ret <= 0)
return (ret);
*countp = ret;
#else
struct file *file = kiocb->ki_filp;
struct address_space *mapping = file->f_mapping;
struct inode *ip = mapping->host;
int isblk = S_ISBLK(ip->i_mode);
*countp = iov_iter_count(from);
ssize_t ret = generic_write_checks(file, &kiocb->ki_pos, countp, isblk);
if (ret)
return (ret);
#endif
return (0);
}
static ssize_t
zpl_iter_write(struct kiocb *kiocb, struct iov_iter *from)
{
cred_t *cr = CRED();
fstrans_cookie_t cookie;
struct file *filp = kiocb->ki_filp;
struct inode *ip = filp->f_mapping->host;
zfs_uio_t uio;
size_t count = 0;
ssize_t ret;
ret = zpl_generic_write_checks(kiocb, from, &count);
if (ret)
return (ret);
zpl_uio_init(&uio, kiocb, from, kiocb->ki_pos, count, from->iov_offset);
crhold(cr);
cookie = spl_fstrans_mark();
int error = -zfs_write(ITOZ(ip), &uio,
filp->f_flags | zfs_io_flags(kiocb), cr);
spl_fstrans_unmark(cookie);
crfree(cr);
if (error < 0)
return (error);
ssize_t wrote = count - uio.uio_resid;
kiocb->ki_pos += wrote;
return (wrote);
}
#else /* !HAVE_VFS_RW_ITERATE */
static ssize_t
zpl_aio_read(struct kiocb *kiocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
cred_t *cr = CRED();
fstrans_cookie_t cookie;
struct file *filp = kiocb->ki_filp;
size_t count;
ssize_t ret;
ret = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
if (ret)
return (ret);
zfs_uio_t uio;
zfs_uio_iovec_init(&uio, iov, nr_segs, kiocb->ki_pos, UIO_USERSPACE,
count, 0);
crhold(cr);
cookie = spl_fstrans_mark();
int error = -zfs_read(ITOZ(filp->f_mapping->host), &uio,
filp->f_flags | zfs_io_flags(kiocb), cr);
spl_fstrans_unmark(cookie);
crfree(cr);
if (error < 0)
return (error);
ssize_t read = count - uio.uio_resid;
kiocb->ki_pos += read;
zpl_file_accessed(filp);
return (read);
}
static ssize_t
zpl_aio_write(struct kiocb *kiocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
cred_t *cr = CRED();
fstrans_cookie_t cookie;
struct file *filp = kiocb->ki_filp;
struct inode *ip = filp->f_mapping->host;
size_t count;
ssize_t ret;
ret = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ);
if (ret)
return (ret);
ret = generic_write_checks(filp, &pos, &count, S_ISBLK(ip->i_mode));
if (ret)
return (ret);
zfs_uio_t uio;
zfs_uio_iovec_init(&uio, iov, nr_segs, kiocb->ki_pos, UIO_USERSPACE,
count, 0);
crhold(cr);
cookie = spl_fstrans_mark();
int error = -zfs_write(ITOZ(ip), &uio,
filp->f_flags | zfs_io_flags(kiocb), cr);
spl_fstrans_unmark(cookie);
crfree(cr);
if (error < 0)
return (error);
ssize_t wrote = count - uio.uio_resid;
kiocb->ki_pos += wrote;
return (wrote);
}
#endif /* HAVE_VFS_RW_ITERATE */
#if defined(HAVE_VFS_RW_ITERATE)
static ssize_t
zpl_direct_IO_impl(int rw, struct kiocb *kiocb, struct iov_iter *iter)
{
if (rw == WRITE)
return (zpl_iter_write(kiocb, iter));
else
return (zpl_iter_read(kiocb, iter));
}
#if defined(HAVE_VFS_DIRECT_IO_ITER)
static ssize_t
zpl_direct_IO(struct kiocb *kiocb, struct iov_iter *iter)
{
return (zpl_direct_IO_impl(iov_iter_rw(iter), kiocb, iter));
}
#elif defined(HAVE_VFS_DIRECT_IO_ITER_OFFSET)
static ssize_t
zpl_direct_IO(struct kiocb *kiocb, struct iov_iter *iter, loff_t pos)
{
ASSERT3S(pos, ==, kiocb->ki_pos);
return (zpl_direct_IO_impl(iov_iter_rw(iter), kiocb, iter));
}
#elif defined(HAVE_VFS_DIRECT_IO_ITER_RW_OFFSET)
static ssize_t
zpl_direct_IO(int rw, struct kiocb *kiocb, struct iov_iter *iter, loff_t pos)
{
ASSERT3S(pos, ==, kiocb->ki_pos);
return (zpl_direct_IO_impl(rw, kiocb, iter));
}
#else
#error "Unknown direct IO interface"
#endif
#else /* HAVE_VFS_RW_ITERATE */
#if defined(HAVE_VFS_DIRECT_IO_IOVEC)
static ssize_t
zpl_direct_IO(int rw, struct kiocb *kiocb, const struct iovec *iov,
loff_t pos, unsigned long nr_segs)
{
if (rw == WRITE)
return (zpl_aio_write(kiocb, iov, nr_segs, pos));
else
return (zpl_aio_read(kiocb, iov, nr_segs, pos));
}
#elif defined(HAVE_VFS_DIRECT_IO_ITER_RW_OFFSET)
static ssize_t
zpl_direct_IO(int rw, struct kiocb *kiocb, struct iov_iter *iter, loff_t pos)
{
const struct iovec *iovp = iov_iter_iovec(iter);
unsigned long nr_segs = iter->nr_segs;
ASSERT3S(pos, ==, kiocb->ki_pos);
if (rw == WRITE)
return (zpl_aio_write(kiocb, iovp, nr_segs, pos));
else
return (zpl_aio_read(kiocb, iovp, nr_segs, pos));
}
#else
#error "Unknown direct IO interface"
#endif
#endif /* HAVE_VFS_RW_ITERATE */
static loff_t
zpl_llseek(struct file *filp, loff_t offset, int whence)
{
#if defined(SEEK_HOLE) && defined(SEEK_DATA)
fstrans_cookie_t cookie;
if (whence == SEEK_DATA || whence == SEEK_HOLE) {
struct inode *ip = filp->f_mapping->host;
loff_t maxbytes = ip->i_sb->s_maxbytes;
loff_t error;
spl_inode_lock_shared(ip);
cookie = spl_fstrans_mark();
error = -zfs_holey(ITOZ(ip), whence, &offset);
spl_fstrans_unmark(cookie);
if (error == 0)
error = lseek_execute(filp, ip, offset, maxbytes);
spl_inode_unlock_shared(ip);
return (error);
}
#endif /* SEEK_HOLE && SEEK_DATA */
return (generic_file_llseek(filp, offset, whence));
}
/*
* It's worth taking a moment to describe how mmap is implemented
* for zfs because it differs considerably from other Linux filesystems.
* However, this issue is handled the same way under OpenSolaris.
*
* The issue is that by design zfs bypasses the Linux page cache and
* leaves all caching up to the ARC. This has been shown to work
* well for the common read(2)/write(2) case. However, mmap(2)
* is problem because it relies on being tightly integrated with the
* page cache. To handle this we cache mmap'ed files twice, once in
* the ARC and a second time in the page cache. The code is careful
* to keep both copies synchronized.
*
* When a file with an mmap'ed region is written to using write(2)
* both the data in the ARC and existing pages in the page cache
* are updated. For a read(2) data will be read first from the page
* cache then the ARC if needed. Neither a write(2) or read(2) will
* will ever result in new pages being added to the page cache.
*
* New pages are added to the page cache only via .readpage() which
* is called when the vfs needs to read a page off disk to back the
* virtual memory region. These pages may be modified without
* notifying the ARC and will be written out periodically via
* .writepage(). This will occur due to either a sync or the usual
* page aging behavior. Note because a read(2) of a mmap'ed file
* will always check the page cache first even when the ARC is out
* of date correct data will still be returned.
*
* While this implementation ensures correct behavior it does have
* have some drawbacks. The most obvious of which is that it
* increases the required memory footprint when access mmap'ed
* files. It also adds additional complexity to the code keeping
* both caches synchronized.
*
* Longer term it may be possible to cleanly resolve this wart by
* mapping page cache pages directly on to the ARC buffers. The
* Linux address space operations are flexible enough to allow
* selection of which pages back a particular index. The trick
* would be working out the details of which subsystem is in
* charge, the ARC, the page cache, or both. It may also prove
* helpful to move the ARC buffers to a scatter-gather lists
* rather than a vmalloc'ed region.
*/
static int
zpl_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct inode *ip = filp->f_mapping->host;
znode_t *zp = ITOZ(ip);
int error;
fstrans_cookie_t cookie;
cookie = spl_fstrans_mark();
error = -zfs_map(ip, vma->vm_pgoff, (caddr_t *)vma->vm_start,
(size_t)(vma->vm_end - vma->vm_start), vma->vm_flags);
spl_fstrans_unmark(cookie);
if (error)
return (error);
error = generic_file_mmap(filp, vma);
if (error)
return (error);
mutex_enter(&zp->z_lock);
zp->z_is_mapped = B_TRUE;
mutex_exit(&zp->z_lock);
return (error);
}
/*
* Populate a page with data for the Linux page cache. This function is
* only used to support mmap(2). There will be an identical copy of the
* data in the ARC which is kept up to date via .write() and .writepage().
*/
static inline int
zpl_readpage_common(struct page *pp)
{
struct inode *ip;
struct page *pl[1];
int error = 0;
fstrans_cookie_t cookie;
ASSERT(PageLocked(pp));
ip = pp->mapping->host;
pl[0] = pp;
cookie = spl_fstrans_mark();
error = -zfs_getpage(ip, pl, 1);
spl_fstrans_unmark(cookie);
if (error) {
SetPageError(pp);
ClearPageUptodate(pp);
} else {
ClearPageError(pp);
SetPageUptodate(pp);
flush_dcache_page(pp);
}
unlock_page(pp);
return (error);
}
static int
zpl_readpage(struct file *filp, struct page *pp)
{
return (zpl_readpage_common(pp));
}
static int
zpl_readpage_filler(void *data, struct page *pp)
{
return (zpl_readpage_common(pp));
}
/*
* Populate a set of pages with data for the Linux page cache. This
* function will only be called for read ahead and never for demand
* paging. For simplicity, the code relies on read_cache_pages() to
* correctly lock each page for IO and call zpl_readpage().
*/
static int
zpl_readpages(struct file *filp, struct address_space *mapping,
struct list_head *pages, unsigned nr_pages)
{
return (read_cache_pages(mapping, pages, zpl_readpage_filler, NULL));
}
static int
zpl_putpage(struct page *pp, struct writeback_control *wbc, void *data)
{
struct address_space *mapping = data;
fstrans_cookie_t cookie;
ASSERT(PageLocked(pp));
ASSERT(!PageWriteback(pp));
cookie = spl_fstrans_mark();
(void) zfs_putpage(mapping->host, pp, wbc);
spl_fstrans_unmark(cookie);
return (0);
}
static int
zpl_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
znode_t *zp = ITOZ(mapping->host);
zfsvfs_t *zfsvfs = ITOZSB(mapping->host);
enum writeback_sync_modes sync_mode;
int result;
ZPL_ENTER(zfsvfs);
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
wbc->sync_mode = WB_SYNC_ALL;
ZPL_EXIT(zfsvfs);
sync_mode = wbc->sync_mode;
/*
* We don't want to run write_cache_pages() in SYNC mode here, because
* that would make putpage() wait for a single page to be committed to
* disk every single time, resulting in atrocious performance. Instead
* we run it once in non-SYNC mode so that the ZIL gets all the data,
* and then we commit it all in one go.
*/
wbc->sync_mode = WB_SYNC_NONE;
result = write_cache_pages(mapping, wbc, zpl_putpage, mapping);
if (sync_mode != wbc->sync_mode) {
ZPL_ENTER(zfsvfs);
ZPL_VERIFY_ZP(zp);
if (zfsvfs->z_log != NULL)
zil_commit(zfsvfs->z_log, zp->z_id);
ZPL_EXIT(zfsvfs);
/*
* We need to call write_cache_pages() again (we can't just
* return after the commit) because the previous call in
* non-SYNC mode does not guarantee that we got all the dirty
* pages (see the implementation of write_cache_pages() for
* details). That being said, this is a no-op in most cases.
*/
wbc->sync_mode = sync_mode;
result = write_cache_pages(mapping, wbc, zpl_putpage, mapping);
}
return (result);
}
/*
* Write out dirty pages to the ARC, this function is only required to
* support mmap(2). Mapped pages may be dirtied by memory operations
* which never call .write(). These dirty pages are kept in sync with
* the ARC buffers via this hook.
*/
static int
zpl_writepage(struct page *pp, struct writeback_control *wbc)
{
if (ITOZSB(pp->mapping->host)->z_os->os_sync == ZFS_SYNC_ALWAYS)
wbc->sync_mode = WB_SYNC_ALL;
return (zpl_putpage(pp, wbc, pp->mapping));
}
/*
* The flag combination which matches the behavior of zfs_space() is
* FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE. The FALLOC_FL_PUNCH_HOLE
* flag was introduced in the 2.6.38 kernel.
*
* The original mode=0 (allocate space) behavior can be reasonably emulated
* by checking if enough space exists and creating a sparse file, as real
* persistent space reservation is not possible due to COW, snapshots, etc.
*/
static long
zpl_fallocate_common(struct inode *ip, int mode, loff_t offset, loff_t len)
{
cred_t *cr = CRED();
loff_t olen;
fstrans_cookie_t cookie;
int error = 0;
if ((mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) != 0)
return (-EOPNOTSUPP);
if (offset < 0 || len <= 0)
return (-EINVAL);
spl_inode_lock(ip);
olen = i_size_read(ip);
crhold(cr);
cookie = spl_fstrans_mark();
if (mode & FALLOC_FL_PUNCH_HOLE) {
flock64_t bf;
if (offset > olen)
goto out_unmark;
if (offset + len > olen)
len = olen - offset;
bf.l_type = F_WRLCK;
bf.l_whence = SEEK_SET;
bf.l_start = offset;
bf.l_len = len;
bf.l_pid = 0;
error = -zfs_space(ITOZ(ip), F_FREESP, &bf, O_RDWR, offset, cr);
} else if ((mode & ~FALLOC_FL_KEEP_SIZE) == 0) {
unsigned int percent = zfs_fallocate_reserve_percent;
struct kstatfs statfs;
/* Legacy mode, disable fallocate compatibility. */
if (percent == 0) {
error = -EOPNOTSUPP;
goto out_unmark;
}
/*
* Use zfs_statvfs() instead of dmu_objset_space() since it
* also checks project quota limits, which are relevant here.
*/
error = zfs_statvfs(ip, &statfs);
if (error)
goto out_unmark;
/*
* Shrink available space a bit to account for overhead/races.
* We know the product previously fit into availbytes from
* dmu_objset_space(), so the smaller product will also fit.
*/
if (len > statfs.f_bavail * (statfs.f_bsize * 100 / percent)) {
error = -ENOSPC;
goto out_unmark;
}
if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > olen)
error = zfs_freesp(ITOZ(ip), offset + len, 0, 0, FALSE);
}
out_unmark:
spl_fstrans_unmark(cookie);
spl_inode_unlock(ip);
crfree(cr);
return (error);
}
static long
zpl_fallocate(struct file *filp, int mode, loff_t offset, loff_t len)
{
return zpl_fallocate_common(file_inode(filp),
mode, offset, len);
}
#define ZFS_FL_USER_VISIBLE (FS_FL_USER_VISIBLE | ZFS_PROJINHERIT_FL)
#define ZFS_FL_USER_MODIFIABLE (FS_FL_USER_MODIFIABLE | ZFS_PROJINHERIT_FL)
static uint32_t
__zpl_ioctl_getflags(struct inode *ip)
{
uint64_t zfs_flags = ITOZ(ip)->z_pflags;
uint32_t ioctl_flags = 0;
if (zfs_flags & ZFS_IMMUTABLE)
ioctl_flags |= FS_IMMUTABLE_FL;
if (zfs_flags & ZFS_APPENDONLY)
ioctl_flags |= FS_APPEND_FL;
if (zfs_flags & ZFS_NODUMP)
ioctl_flags |= FS_NODUMP_FL;
if (zfs_flags & ZFS_PROJINHERIT)
ioctl_flags |= ZFS_PROJINHERIT_FL;
return (ioctl_flags & ZFS_FL_USER_VISIBLE);
}
/*
* Map zfs file z_pflags (xvattr_t) to linux file attributes. Only file
* attributes common to both Linux and Solaris are mapped.
*/
static int
zpl_ioctl_getflags(struct file *filp, void __user *arg)
{
uint32_t flags;
int err;
flags = __zpl_ioctl_getflags(file_inode(filp));
err = copy_to_user(arg, &flags, sizeof (flags));
return (err);
}
/*
* fchange() is a helper macro to detect if we have been asked to change a
* flag. This is ugly, but the requirement that we do this is a consequence of
* how the Linux file attribute interface was designed. Another consequence is
* that concurrent modification of files suffers from a TOCTOU race. Neither
* are things we can fix without modifying the kernel-userland interface, which
* is outside of our jurisdiction.
*/
#define fchange(f0, f1, b0, b1) (!((f0) & (b0)) != !((f1) & (b1)))
static int
__zpl_ioctl_setflags(struct inode *ip, uint32_t ioctl_flags, xvattr_t *xva)
{
uint64_t zfs_flags = ITOZ(ip)->z_pflags;
xoptattr_t *xoap;
if (ioctl_flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NODUMP_FL |
ZFS_PROJINHERIT_FL))
return (-EOPNOTSUPP);
if (ioctl_flags & ~ZFS_FL_USER_MODIFIABLE)
return (-EACCES);
if ((fchange(ioctl_flags, zfs_flags, FS_IMMUTABLE_FL, ZFS_IMMUTABLE) ||
fchange(ioctl_flags, zfs_flags, FS_APPEND_FL, ZFS_APPENDONLY)) &&
!capable(CAP_LINUX_IMMUTABLE))
return (-EPERM);
if (!zpl_inode_owner_or_capable(kcred->user_ns, ip))
return (-EACCES);
xva_init(xva);
xoap = xva_getxoptattr(xva);
XVA_SET_REQ(xva, XAT_IMMUTABLE);
if (ioctl_flags & FS_IMMUTABLE_FL)
xoap->xoa_immutable = B_TRUE;
XVA_SET_REQ(xva, XAT_APPENDONLY);
if (ioctl_flags & FS_APPEND_FL)
xoap->xoa_appendonly = B_TRUE;
XVA_SET_REQ(xva, XAT_NODUMP);
if (ioctl_flags & FS_NODUMP_FL)
xoap->xoa_nodump = B_TRUE;
XVA_SET_REQ(xva, XAT_PROJINHERIT);
if (ioctl_flags & ZFS_PROJINHERIT_FL)
xoap->xoa_projinherit = B_TRUE;
return (0);
}
static int
zpl_ioctl_setflags(struct file *filp, void __user *arg)
{
struct inode *ip = file_inode(filp);
uint32_t flags;
cred_t *cr = CRED();
xvattr_t xva;
int err;
fstrans_cookie_t cookie;
if (copy_from_user(&flags, arg, sizeof (flags)))
return (-EFAULT);
err = __zpl_ioctl_setflags(ip, flags, &xva);
if (err)
return (err);
crhold(cr);
cookie = spl_fstrans_mark();
err = -zfs_setattr(ITOZ(ip), (vattr_t *)&xva, 0, cr);
spl_fstrans_unmark(cookie);
crfree(cr);
return (err);
}
static int
zpl_ioctl_getxattr(struct file *filp, void __user *arg)
{
zfsxattr_t fsx = { 0 };
struct inode *ip = file_inode(filp);
int err;
fsx.fsx_xflags = __zpl_ioctl_getflags(ip);
fsx.fsx_projid = ITOZ(ip)->z_projid;
err = copy_to_user(arg, &fsx, sizeof (fsx));
return (err);
}
static int
zpl_ioctl_setxattr(struct file *filp, void __user *arg)
{
struct inode *ip = file_inode(filp);
zfsxattr_t fsx;
cred_t *cr = CRED();
xvattr_t xva;
xoptattr_t *xoap;
int err;
fstrans_cookie_t cookie;
if (copy_from_user(&fsx, arg, sizeof (fsx)))
return (-EFAULT);
if (!zpl_is_valid_projid(fsx.fsx_projid))
return (-EINVAL);
err = __zpl_ioctl_setflags(ip, fsx.fsx_xflags, &xva);
if (err)
return (err);
xoap = xva_getxoptattr(&xva);
XVA_SET_REQ(&xva, XAT_PROJID);
xoap->xoa_projid = fsx.fsx_projid;
crhold(cr);
cookie = spl_fstrans_mark();
err = -zfs_setattr(ITOZ(ip), (vattr_t *)&xva, 0, cr);
spl_fstrans_unmark(cookie);
crfree(cr);
return (err);
}
static long
zpl_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case FS_IOC_GETFLAGS:
return (zpl_ioctl_getflags(filp, (void *)arg));
case FS_IOC_SETFLAGS:
return (zpl_ioctl_setflags(filp, (void *)arg));
case ZFS_IOC_FSGETXATTR:
return (zpl_ioctl_getxattr(filp, (void *)arg));
case ZFS_IOC_FSSETXATTR:
return (zpl_ioctl_setxattr(filp, (void *)arg));
default:
return (-ENOTTY);
}
}
#ifdef CONFIG_COMPAT
static long
zpl_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case FS_IOC32_GETFLAGS:
cmd = FS_IOC_GETFLAGS;
break;
case FS_IOC32_SETFLAGS:
cmd = FS_IOC_SETFLAGS;
break;
default:
return (-ENOTTY);
}
return (zpl_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)));
}
#endif /* CONFIG_COMPAT */
const struct address_space_operations zpl_address_space_operations = {
.readpages = zpl_readpages,
.readpage = zpl_readpage,
.writepage = zpl_writepage,
.writepages = zpl_writepages,
.direct_IO = zpl_direct_IO,
+#ifdef HAVE_VFS_SET_PAGE_DIRTY_NOBUFFERS
+ .set_page_dirty = __set_page_dirty_nobuffers,
+#endif
};
const struct file_operations zpl_file_operations = {
.open = zpl_open,
.release = zpl_release,
.llseek = zpl_llseek,
#ifdef HAVE_VFS_RW_ITERATE
#ifdef HAVE_NEW_SYNC_READ
.read = new_sync_read,
.write = new_sync_write,
#endif
.read_iter = zpl_iter_read,
.write_iter = zpl_iter_write,
#ifdef HAVE_VFS_IOV_ITER
.splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
#endif
#else
.read = do_sync_read,
.write = do_sync_write,
.aio_read = zpl_aio_read,
.aio_write = zpl_aio_write,
#endif
.mmap = zpl_mmap,
.fsync = zpl_fsync,
#ifdef HAVE_FILE_AIO_FSYNC
.aio_fsync = zpl_aio_fsync,
#endif
.fallocate = zpl_fallocate,
.unlocked_ioctl = zpl_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = zpl_compat_ioctl,
#endif
};
const struct file_operations zpl_dir_file_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
#if defined(HAVE_VFS_ITERATE_SHARED)
.iterate_shared = zpl_iterate,
#elif defined(HAVE_VFS_ITERATE)
.iterate = zpl_iterate,
#else
.readdir = zpl_readdir,
#endif
.fsync = zpl_fsync,
.unlocked_ioctl = zpl_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = zpl_compat_ioctl,
#endif
};
/* BEGIN CSTYLED */
module_param(zfs_fallocate_reserve_percent, uint, 0644);
MODULE_PARM_DESC(zfs_fallocate_reserve_percent,
"Percentage of length to use for the available capacity check");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c b/sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c
index 741979f11af8..c17423426319 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c
@@ -1,1146 +1,1174 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2012, 2020 by Delphix. All rights reserved.
*/
#include <sys/dataset_kstats.h>
#include <sys/dbuf.h>
#include <sys/dmu_traverse.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_dir.h>
#include <sys/zap.h>
#include <sys/zfeature.h>
#include <sys/zil_impl.h>
#include <sys/dmu_tx.h>
#include <sys/zio.h>
#include <sys/zfs_rlock.h>
#include <sys/spa_impl.h>
#include <sys/zvol.h>
#include <sys/zvol_impl.h>
#include <linux/blkdev_compat.h>
#include <linux/task_io_accounting_ops.h>
unsigned int zvol_major = ZVOL_MAJOR;
unsigned int zvol_request_sync = 0;
unsigned int zvol_prefetch_bytes = (128 * 1024);
unsigned long zvol_max_discard_blocks = 16384;
unsigned int zvol_threads = 32;
struct zvol_state_os {
struct gendisk *zvo_disk; /* generic disk */
struct request_queue *zvo_queue; /* request queue */
dev_t zvo_dev; /* device id */
};
taskq_t *zvol_taskq;
static struct ida zvol_ida;
typedef struct zv_request_stack {
zvol_state_t *zv;
struct bio *bio;
} zv_request_t;
typedef struct zv_request_task {
zv_request_t zvr;
taskq_ent_t ent;
} zv_request_task_t;
static zv_request_task_t *
zv_request_task_create(zv_request_t zvr)
{
zv_request_task_t *task;
task = kmem_alloc(sizeof (zv_request_task_t), KM_SLEEP);
taskq_init_ent(&task->ent);
task->zvr = zvr;
return (task);
}
static void
zv_request_task_free(zv_request_task_t *task)
{
kmem_free(task, sizeof (*task));
}
/*
* Given a path, return TRUE if path is a ZVOL.
*/
static boolean_t
zvol_is_zvol_impl(const char *path)
{
dev_t dev = 0;
if (vdev_lookup_bdev(path, &dev) != 0)
return (B_FALSE);
if (MAJOR(dev) == zvol_major)
return (B_TRUE);
return (B_FALSE);
}
static void
zvol_write(zv_request_t *zvr)
{
struct bio *bio = zvr->bio;
int error = 0;
zfs_uio_t uio;
zfs_uio_bvec_init(&uio, bio);
zvol_state_t *zv = zvr->zv;
ASSERT3P(zv, !=, NULL);
ASSERT3U(zv->zv_open_count, >, 0);
ASSERT3P(zv->zv_zilog, !=, NULL);
/* bio marked as FLUSH need to flush before write */
if (bio_is_flush(bio))
zil_commit(zv->zv_zilog, ZVOL_OBJ);
/* Some requests are just for flush and nothing else. */
if (uio.uio_resid == 0) {
rw_exit(&zv->zv_suspend_lock);
BIO_END_IO(bio, 0);
return;
}
struct request_queue *q = zv->zv_zso->zvo_queue;
struct gendisk *disk = zv->zv_zso->zvo_disk;
ssize_t start_resid = uio.uio_resid;
unsigned long start_time;
boolean_t acct = blk_queue_io_stat(q);
if (acct)
start_time = blk_generic_start_io_acct(q, disk, WRITE, bio);
boolean_t sync =
bio_is_fua(bio) || zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
zfs_locked_range_t *lr = zfs_rangelock_enter(&zv->zv_rangelock,
uio.uio_loffset, uio.uio_resid, RL_WRITER);
uint64_t volsize = zv->zv_volsize;
while (uio.uio_resid > 0 && uio.uio_loffset < volsize) {
uint64_t bytes = MIN(uio.uio_resid, DMU_MAX_ACCESS >> 1);
uint64_t off = uio.uio_loffset;
dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
if (bytes > volsize - off) /* don't write past the end */
bytes = volsize - off;
dmu_tx_hold_write_by_dnode(tx, zv->zv_dn, off, bytes);
/* This will only fail for ENOSPC */
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
break;
}
error = dmu_write_uio_dnode(zv->zv_dn, &uio, bytes, tx);
if (error == 0) {
zvol_log_write(zv, tx, off, bytes, sync);
}
dmu_tx_commit(tx);
if (error)
break;
}
zfs_rangelock_exit(lr);
int64_t nwritten = start_resid - uio.uio_resid;
dataset_kstats_update_write_kstats(&zv->zv_kstat, nwritten);
task_io_account_write(nwritten);
if (sync)
zil_commit(zv->zv_zilog, ZVOL_OBJ);
rw_exit(&zv->zv_suspend_lock);
if (acct)
blk_generic_end_io_acct(q, disk, WRITE, bio, start_time);
BIO_END_IO(bio, -error);
}
static void
zvol_write_task(void *arg)
{
zv_request_task_t *task = arg;
zvol_write(&task->zvr);
zv_request_task_free(task);
}
static void
zvol_discard(zv_request_t *zvr)
{
struct bio *bio = zvr->bio;
zvol_state_t *zv = zvr->zv;
uint64_t start = BIO_BI_SECTOR(bio) << 9;
uint64_t size = BIO_BI_SIZE(bio);
uint64_t end = start + size;
boolean_t sync;
int error = 0;
dmu_tx_t *tx;
ASSERT3P(zv, !=, NULL);
ASSERT3U(zv->zv_open_count, >, 0);
ASSERT3P(zv->zv_zilog, !=, NULL);
struct request_queue *q = zv->zv_zso->zvo_queue;
struct gendisk *disk = zv->zv_zso->zvo_disk;
unsigned long start_time;
boolean_t acct = blk_queue_io_stat(q);
if (acct)
start_time = blk_generic_start_io_acct(q, disk, WRITE, bio);
sync = bio_is_fua(bio) || zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
if (end > zv->zv_volsize) {
error = SET_ERROR(EIO);
goto unlock;
}
/*
* Align the request to volume block boundaries when a secure erase is
* not required. This will prevent dnode_free_range() from zeroing out
* the unaligned parts which is slow (read-modify-write) and useless
* since we are not freeing any space by doing so.
*/
if (!bio_is_secure_erase(bio)) {
start = P2ROUNDUP(start, zv->zv_volblocksize);
end = P2ALIGN(end, zv->zv_volblocksize);
size = end - start;
}
if (start >= end)
goto unlock;
zfs_locked_range_t *lr = zfs_rangelock_enter(&zv->zv_rangelock,
start, size, RL_WRITER);
tx = dmu_tx_create(zv->zv_objset);
dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error != 0) {
dmu_tx_abort(tx);
} else {
zvol_log_truncate(zv, tx, start, size, B_TRUE);
dmu_tx_commit(tx);
error = dmu_free_long_range(zv->zv_objset,
ZVOL_OBJ, start, size);
}
zfs_rangelock_exit(lr);
if (error == 0 && sync)
zil_commit(zv->zv_zilog, ZVOL_OBJ);
unlock:
rw_exit(&zv->zv_suspend_lock);
if (acct)
blk_generic_end_io_acct(q, disk, WRITE, bio, start_time);
BIO_END_IO(bio, -error);
}
static void
zvol_discard_task(void *arg)
{
zv_request_task_t *task = arg;
zvol_discard(&task->zvr);
zv_request_task_free(task);
}
static void
zvol_read(zv_request_t *zvr)
{
struct bio *bio = zvr->bio;
int error = 0;
zfs_uio_t uio;
zfs_uio_bvec_init(&uio, bio);
zvol_state_t *zv = zvr->zv;
ASSERT3P(zv, !=, NULL);
ASSERT3U(zv->zv_open_count, >, 0);
struct request_queue *q = zv->zv_zso->zvo_queue;
struct gendisk *disk = zv->zv_zso->zvo_disk;
ssize_t start_resid = uio.uio_resid;
unsigned long start_time;
boolean_t acct = blk_queue_io_stat(q);
if (acct)
start_time = blk_generic_start_io_acct(q, disk, READ, bio);
zfs_locked_range_t *lr = zfs_rangelock_enter(&zv->zv_rangelock,
uio.uio_loffset, uio.uio_resid, RL_READER);
uint64_t volsize = zv->zv_volsize;
while (uio.uio_resid > 0 && uio.uio_loffset < volsize) {
uint64_t bytes = MIN(uio.uio_resid, DMU_MAX_ACCESS >> 1);
/* don't read past the end */
if (bytes > volsize - uio.uio_loffset)
bytes = volsize - uio.uio_loffset;
error = dmu_read_uio_dnode(zv->zv_dn, &uio, bytes);
if (error) {
/* convert checksum errors into IO errors */
if (error == ECKSUM)
error = SET_ERROR(EIO);
break;
}
}
zfs_rangelock_exit(lr);
int64_t nread = start_resid - uio.uio_resid;
dataset_kstats_update_read_kstats(&zv->zv_kstat, nread);
task_io_account_read(nread);
rw_exit(&zv->zv_suspend_lock);
if (acct)
blk_generic_end_io_acct(q, disk, READ, bio, start_time);
BIO_END_IO(bio, -error);
}
static void
zvol_read_task(void *arg)
{
zv_request_task_t *task = arg;
zvol_read(&task->zvr);
zv_request_task_free(task);
}
#ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
static blk_qc_t
zvol_submit_bio(struct bio *bio)
#else
static MAKE_REQUEST_FN_RET
zvol_request(struct request_queue *q, struct bio *bio)
#endif
{
#ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
#if defined(HAVE_BIO_BDEV_DISK)
struct request_queue *q = bio->bi_bdev->bd_disk->queue;
#else
struct request_queue *q = bio->bi_disk->queue;
#endif
#endif
zvol_state_t *zv = q->queuedata;
fstrans_cookie_t cookie = spl_fstrans_mark();
uint64_t offset = BIO_BI_SECTOR(bio) << 9;
uint64_t size = BIO_BI_SIZE(bio);
int rw = bio_data_dir(bio);
if (bio_has_data(bio) && offset + size > zv->zv_volsize) {
printk(KERN_INFO
"%s: bad access: offset=%llu, size=%lu\n",
zv->zv_zso->zvo_disk->disk_name,
(long long unsigned)offset,
(long unsigned)size);
BIO_END_IO(bio, -SET_ERROR(EIO));
goto out;
}
zv_request_t zvr = {
.zv = zv,
.bio = bio,
};
zv_request_task_t *task;
if (rw == WRITE) {
if (unlikely(zv->zv_flags & ZVOL_RDONLY)) {
BIO_END_IO(bio, -SET_ERROR(EROFS));
goto out;
}
/*
* Prevents the zvol from being suspended, or the ZIL being
* concurrently opened. Will be released after the i/o
* completes.
*/
rw_enter(&zv->zv_suspend_lock, RW_READER);
/*
* Open a ZIL if this is the first time we have written to this
* zvol. We protect zv->zv_zilog with zv_suspend_lock rather
* than zv_state_lock so that we don't need to acquire an
* additional lock in this path.
*/
if (zv->zv_zilog == NULL) {
rw_exit(&zv->zv_suspend_lock);
rw_enter(&zv->zv_suspend_lock, RW_WRITER);
if (zv->zv_zilog == NULL) {
zv->zv_zilog = zil_open(zv->zv_objset,
zvol_get_data);
zv->zv_flags |= ZVOL_WRITTEN_TO;
/* replay / destroy done in zvol_create_minor */
VERIFY0((zv->zv_zilog->zl_header->zh_flags &
ZIL_REPLAY_NEEDED));
}
rw_downgrade(&zv->zv_suspend_lock);
}
/*
* We don't want this thread to be blocked waiting for i/o to
* complete, so we instead wait from a taskq callback. The
* i/o may be a ZIL write (via zil_commit()), or a read of an
* indirect block, or a read of a data block (if this is a
* partial-block write). We will indicate that the i/o is
* complete by calling BIO_END_IO() from the taskq callback.
*
* This design allows the calling thread to continue and
* initiate more concurrent operations by calling
* zvol_request() again. There are typically only a small
* number of threads available to call zvol_request() (e.g.
* one per iSCSI target), so keeping the latency of
* zvol_request() low is important for performance.
*
* The zvol_request_sync module parameter allows this
* behavior to be altered, for performance evaluation
* purposes. If the callback blocks, setting
* zvol_request_sync=1 will result in much worse performance.
*
* We can have up to zvol_threads concurrent i/o's being
* processed for all zvols on the system. This is typically
* a vast improvement over the zvol_request_sync=1 behavior
* of one i/o at a time per zvol. However, an even better
* design would be for zvol_request() to initiate the zio
* directly, and then be notified by the zio_done callback,
* which would call BIO_END_IO(). Unfortunately, the DMU/ZIL
* interfaces lack this functionality (they block waiting for
* the i/o to complete).
*/
if (bio_is_discard(bio) || bio_is_secure_erase(bio)) {
if (zvol_request_sync) {
zvol_discard(&zvr);
} else {
task = zv_request_task_create(zvr);
taskq_dispatch_ent(zvol_taskq,
zvol_discard_task, task, 0, &task->ent);
}
} else {
if (zvol_request_sync) {
zvol_write(&zvr);
} else {
task = zv_request_task_create(zvr);
taskq_dispatch_ent(zvol_taskq,
zvol_write_task, task, 0, &task->ent);
}
}
} else {
/*
* The SCST driver, and possibly others, may issue READ I/Os
* with a length of zero bytes. These empty I/Os contain no
* data and require no additional handling.
*/
if (size == 0) {
BIO_END_IO(bio, 0);
goto out;
}
rw_enter(&zv->zv_suspend_lock, RW_READER);
/* See comment in WRITE case above. */
if (zvol_request_sync) {
zvol_read(&zvr);
} else {
task = zv_request_task_create(zvr);
taskq_dispatch_ent(zvol_taskq,
zvol_read_task, task, 0, &task->ent);
}
}
out:
spl_fstrans_unmark(cookie);
#if defined(HAVE_MAKE_REQUEST_FN_RET_QC) || \
defined(HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS)
return (BLK_QC_T_NONE);
#endif
}
static int
zvol_open(struct block_device *bdev, fmode_t flag)
{
zvol_state_t *zv;
int error = 0;
boolean_t drop_suspend = B_TRUE;
rw_enter(&zvol_state_lock, RW_READER);
/*
* Obtain a copy of private_data under the zvol_state_lock to make
* sure that either the result of zvol free code path setting
* bdev->bd_disk->private_data to NULL is observed, or zvol_free()
* is not called on this zv because of the positive zv_open_count.
*/
zv = bdev->bd_disk->private_data;
if (zv == NULL) {
rw_exit(&zvol_state_lock);
return (SET_ERROR(-ENXIO));
}
mutex_enter(&zv->zv_state_lock);
/*
* make sure zvol is not suspended during first open
* (hold zv_suspend_lock) and respect proper lock acquisition
* ordering - zv_suspend_lock before zv_state_lock
*/
if (zv->zv_open_count == 0) {
if (!rw_tryenter(&zv->zv_suspend_lock, RW_READER)) {
mutex_exit(&zv->zv_state_lock);
rw_enter(&zv->zv_suspend_lock, RW_READER);
mutex_enter(&zv->zv_state_lock);
/* check to see if zv_suspend_lock is needed */
if (zv->zv_open_count != 0) {
rw_exit(&zv->zv_suspend_lock);
drop_suspend = B_FALSE;
}
}
} else {
drop_suspend = B_FALSE;
}
rw_exit(&zvol_state_lock);
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
if (zv->zv_open_count == 0) {
ASSERT(RW_READ_HELD(&zv->zv_suspend_lock));
error = -zvol_first_open(zv, !(flag & FMODE_WRITE));
if (error)
goto out_mutex;
}
if ((flag & FMODE_WRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
error = -EROFS;
goto out_open_count;
}
zv->zv_open_count++;
mutex_exit(&zv->zv_state_lock);
if (drop_suspend)
rw_exit(&zv->zv_suspend_lock);
zfs_check_media_change(bdev);
return (0);
out_open_count:
if (zv->zv_open_count == 0)
zvol_last_close(zv);
out_mutex:
mutex_exit(&zv->zv_state_lock);
if (drop_suspend)
rw_exit(&zv->zv_suspend_lock);
if (error == -EINTR) {
error = -ERESTARTSYS;
schedule();
}
return (SET_ERROR(error));
}
static void
zvol_release(struct gendisk *disk, fmode_t mode)
{
zvol_state_t *zv;
boolean_t drop_suspend = B_TRUE;
rw_enter(&zvol_state_lock, RW_READER);
zv = disk->private_data;
mutex_enter(&zv->zv_state_lock);
ASSERT3U(zv->zv_open_count, >, 0);
/*
* make sure zvol is not suspended during last close
* (hold zv_suspend_lock) and respect proper lock acquisition
* ordering - zv_suspend_lock before zv_state_lock
*/
if (zv->zv_open_count == 1) {
if (!rw_tryenter(&zv->zv_suspend_lock, RW_READER)) {
mutex_exit(&zv->zv_state_lock);
rw_enter(&zv->zv_suspend_lock, RW_READER);
mutex_enter(&zv->zv_state_lock);
/* check to see if zv_suspend_lock is needed */
if (zv->zv_open_count != 1) {
rw_exit(&zv->zv_suspend_lock);
drop_suspend = B_FALSE;
}
}
} else {
drop_suspend = B_FALSE;
}
rw_exit(&zvol_state_lock);
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
zv->zv_open_count--;
if (zv->zv_open_count == 0) {
ASSERT(RW_READ_HELD(&zv->zv_suspend_lock));
zvol_last_close(zv);
}
mutex_exit(&zv->zv_state_lock);
if (drop_suspend)
rw_exit(&zv->zv_suspend_lock);
}
static int
zvol_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
zvol_state_t *zv = bdev->bd_disk->private_data;
int error = 0;
ASSERT3U(zv->zv_open_count, >, 0);
switch (cmd) {
case BLKFLSBUF:
fsync_bdev(bdev);
invalidate_bdev(bdev);
rw_enter(&zv->zv_suspend_lock, RW_READER);
if (!(zv->zv_flags & ZVOL_RDONLY))
txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
rw_exit(&zv->zv_suspend_lock);
break;
case BLKZNAME:
mutex_enter(&zv->zv_state_lock);
error = copy_to_user((void *)arg, zv->zv_name, MAXNAMELEN);
mutex_exit(&zv->zv_state_lock);
break;
default:
error = -ENOTTY;
break;
}
return (SET_ERROR(error));
}
#ifdef CONFIG_COMPAT
static int
zvol_compat_ioctl(struct block_device *bdev, fmode_t mode,
unsigned cmd, unsigned long arg)
{
return (zvol_ioctl(bdev, mode, cmd, arg));
}
#else
#define zvol_compat_ioctl NULL
#endif
static unsigned int
zvol_check_events(struct gendisk *disk, unsigned int clearing)
{
unsigned int mask = 0;
rw_enter(&zvol_state_lock, RW_READER);
zvol_state_t *zv = disk->private_data;
if (zv != NULL) {
mutex_enter(&zv->zv_state_lock);
mask = zv->zv_changed ? DISK_EVENT_MEDIA_CHANGE : 0;
zv->zv_changed = 0;
mutex_exit(&zv->zv_state_lock);
}
rw_exit(&zvol_state_lock);
return (mask);
}
static int
zvol_revalidate_disk(struct gendisk *disk)
{
rw_enter(&zvol_state_lock, RW_READER);
zvol_state_t *zv = disk->private_data;
if (zv != NULL) {
mutex_enter(&zv->zv_state_lock);
set_capacity(zv->zv_zso->zvo_disk,
zv->zv_volsize >> SECTOR_BITS);
mutex_exit(&zv->zv_state_lock);
}
rw_exit(&zvol_state_lock);
return (0);
}
static int
zvol_update_volsize(zvol_state_t *zv, uint64_t volsize)
{
struct gendisk *disk = zv->zv_zso->zvo_disk;
#if defined(HAVE_REVALIDATE_DISK_SIZE)
revalidate_disk_size(disk, zvol_revalidate_disk(disk) == 0);
#elif defined(HAVE_REVALIDATE_DISK)
revalidate_disk(disk);
#else
zvol_revalidate_disk(disk);
#endif
return (0);
}
static void
zvol_clear_private(zvol_state_t *zv)
{
/*
* Cleared while holding zvol_state_lock as a writer
* which will prevent zvol_open() from opening it.
*/
zv->zv_zso->zvo_disk->private_data = NULL;
}
/*
* Provide a simple virtual geometry for legacy compatibility. For devices
* smaller than 1 MiB a small head and sector count is used to allow very
* tiny devices. For devices over 1 Mib a standard head and sector count
* is used to keep the cylinders count reasonable.
*/
static int
zvol_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
zvol_state_t *zv = bdev->bd_disk->private_data;
sector_t sectors;
ASSERT3U(zv->zv_open_count, >, 0);
sectors = get_capacity(zv->zv_zso->zvo_disk);
if (sectors > 2048) {
geo->heads = 16;
geo->sectors = 63;
} else {
geo->heads = 2;
geo->sectors = 4;
}
geo->start = 0;
geo->cylinders = sectors / (geo->heads * geo->sectors);
return (0);
}
static struct block_device_operations zvol_ops = {
.open = zvol_open,
.release = zvol_release,
.ioctl = zvol_ioctl,
.compat_ioctl = zvol_compat_ioctl,
.check_events = zvol_check_events,
#ifdef HAVE_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK
.revalidate_disk = zvol_revalidate_disk,
#endif
.getgeo = zvol_getgeo,
.owner = THIS_MODULE,
#ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
- .submit_bio = zvol_submit_bio,
+ .submit_bio = zvol_submit_bio,
#endif
};
/*
* Allocate memory for a new zvol_state_t and setup the required
* request queue and generic disk structures for the block device.
*/
static zvol_state_t *
zvol_alloc(dev_t dev, const char *name)
{
zvol_state_t *zv;
struct zvol_state_os *zso;
uint64_t volmode;
if (dsl_prop_get_integer(name, "volmode", &volmode, NULL) != 0)
return (NULL);
if (volmode == ZFS_VOLMODE_DEFAULT)
volmode = zvol_volmode;
if (volmode == ZFS_VOLMODE_NONE)
return (NULL);
zv = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
zso = kmem_zalloc(sizeof (struct zvol_state_os), KM_SLEEP);
zv->zv_zso = zso;
zv->zv_volmode = volmode;
list_link_init(&zv->zv_next);
mutex_init(&zv->zv_state_lock, NULL, MUTEX_DEFAULT, NULL);
#ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
+#ifdef HAVE_BLK_ALLOC_DISK
+ zso->zvo_disk = blk_alloc_disk(NUMA_NO_NODE);
+ if (zso->zvo_disk == NULL)
+ goto out_kmem;
+
+ zso->zvo_disk->minors = ZVOL_MINORS;
+ zso->zvo_queue = zso->zvo_disk->queue;
+#else
zso->zvo_queue = blk_alloc_queue(NUMA_NO_NODE);
+ if (zso->zvo_queue == NULL)
+ goto out_kmem;
+
+ zso->zvo_disk = alloc_disk(ZVOL_MINORS);
+ if (zso->zvo_disk == NULL) {
+ blk_cleanup_queue(zso->zvo_queue);
+ goto out_kmem;
+ }
+
+ zso->zvo_disk->queue = zso->zvo_queue;
+#endif /* HAVE_BLK_ALLOC_DISK */
#else
zso->zvo_queue = blk_generic_alloc_queue(zvol_request, NUMA_NO_NODE);
-#endif
if (zso->zvo_queue == NULL)
goto out_kmem;
+ zso->zvo_disk = alloc_disk(ZVOL_MINORS);
+ if (zso->zvo_disk == NULL) {
+ blk_cleanup_queue(zso->zvo_queue);
+ goto out_kmem;
+ }
+
+ zso->zvo_disk->queue = zso->zvo_queue;
+#endif /* HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS */
+
blk_queue_set_write_cache(zso->zvo_queue, B_TRUE, B_TRUE);
/* Limit read-ahead to a single page to prevent over-prefetching. */
blk_queue_set_read_ahead(zso->zvo_queue, 1);
/* Disable write merging in favor of the ZIO pipeline. */
blk_queue_flag_set(QUEUE_FLAG_NOMERGES, zso->zvo_queue);
- zso->zvo_disk = alloc_disk(ZVOL_MINORS);
- if (zso->zvo_disk == NULL)
- goto out_queue;
+ /* Enable /proc/diskstats */
+ blk_queue_flag_set(QUEUE_FLAG_IO_STAT, zso->zvo_queue);
zso->zvo_queue->queuedata = zv;
zso->zvo_dev = dev;
zv->zv_open_count = 0;
strlcpy(zv->zv_name, name, MAXNAMELEN);
zfs_rangelock_init(&zv->zv_rangelock, NULL, NULL);
rw_init(&zv->zv_suspend_lock, NULL, RW_DEFAULT, NULL);
zso->zvo_disk->major = zvol_major;
zso->zvo_disk->events = DISK_EVENT_MEDIA_CHANGE;
if (volmode == ZFS_VOLMODE_DEV) {
/*
* ZFS_VOLMODE_DEV disable partitioning on ZVOL devices: set
* gendisk->minors = 1 as noted in include/linux/genhd.h.
* Also disable extended partition numbers (GENHD_FL_EXT_DEVT)
* and suppresses partition scanning (GENHD_FL_NO_PART_SCAN)
* setting gendisk->flags accordingly.
*/
zso->zvo_disk->minors = 1;
#if defined(GENHD_FL_EXT_DEVT)
zso->zvo_disk->flags &= ~GENHD_FL_EXT_DEVT;
#endif
#if defined(GENHD_FL_NO_PART_SCAN)
zso->zvo_disk->flags |= GENHD_FL_NO_PART_SCAN;
#endif
}
zso->zvo_disk->first_minor = (dev & MINORMASK);
zso->zvo_disk->fops = &zvol_ops;
zso->zvo_disk->private_data = zv;
- zso->zvo_disk->queue = zso->zvo_queue;
snprintf(zso->zvo_disk->disk_name, DISK_NAME_LEN, "%s%d",
ZVOL_DEV_NAME, (dev & MINORMASK));
return (zv);
-out_queue:
- blk_cleanup_queue(zso->zvo_queue);
out_kmem:
kmem_free(zso, sizeof (struct zvol_state_os));
kmem_free(zv, sizeof (zvol_state_t));
return (NULL);
}
/*
* Cleanup then free a zvol_state_t which was created by zvol_alloc().
* At this time, the structure is not opened by anyone, is taken off
* the zvol_state_list, and has its private data set to NULL.
* The zvol_state_lock is dropped.
*
* This function may take many milliseconds to complete (e.g. we've seen
* it take over 256ms), due to the calls to "blk_cleanup_queue" and
* "del_gendisk". Thus, consumers need to be careful to account for this
* latency when calling this function.
*/
static void
zvol_free(zvol_state_t *zv)
{
ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock));
ASSERT(!MUTEX_HELD(&zv->zv_state_lock));
ASSERT0(zv->zv_open_count);
ASSERT3P(zv->zv_zso->zvo_disk->private_data, ==, NULL);
rw_destroy(&zv->zv_suspend_lock);
zfs_rangelock_fini(&zv->zv_rangelock);
del_gendisk(zv->zv_zso->zvo_disk);
+#if defined(HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS) && \
+ defined(HAVE_BLK_ALLOC_DISK)
+ blk_cleanup_disk(zv->zv_zso->zvo_disk);
+#else
blk_cleanup_queue(zv->zv_zso->zvo_queue);
put_disk(zv->zv_zso->zvo_disk);
+#endif
ida_simple_remove(&zvol_ida,
MINOR(zv->zv_zso->zvo_dev) >> ZVOL_MINOR_BITS);
mutex_destroy(&zv->zv_state_lock);
dataset_kstats_destroy(&zv->zv_kstat);
kmem_free(zv->zv_zso, sizeof (struct zvol_state_os));
kmem_free(zv, sizeof (zvol_state_t));
}
void
zvol_wait_close(zvol_state_t *zv)
{
}
/*
* Create a block device minor node and setup the linkage between it
* and the specified volume. Once this function returns the block
* device is live and ready for use.
*/
static int
zvol_os_create_minor(const char *name)
{
zvol_state_t *zv;
objset_t *os;
dmu_object_info_t *doi;
uint64_t volsize;
uint64_t len;
unsigned minor = 0;
int error = 0;
int idx;
uint64_t hash = zvol_name_hash(name);
if (zvol_inhibit_dev)
return (0);
idx = ida_simple_get(&zvol_ida, 0, 0, kmem_flags_convert(KM_SLEEP));
if (idx < 0)
return (SET_ERROR(-idx));
minor = idx << ZVOL_MINOR_BITS;
zv = zvol_find_by_name_hash(name, hash, RW_NONE);
if (zv) {
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
mutex_exit(&zv->zv_state_lock);
ida_simple_remove(&zvol_ida, idx);
return (SET_ERROR(EEXIST));
}
doi = kmem_alloc(sizeof (dmu_object_info_t), KM_SLEEP);
error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, B_TRUE, FTAG, &os);
if (error)
goto out_doi;
error = dmu_object_info(os, ZVOL_OBJ, doi);
if (error)
goto out_dmu_objset_disown;
error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
if (error)
goto out_dmu_objset_disown;
zv = zvol_alloc(MKDEV(zvol_major, minor), name);
if (zv == NULL) {
error = SET_ERROR(EAGAIN);
goto out_dmu_objset_disown;
}
zv->zv_hash = hash;
if (dmu_objset_is_snapshot(os))
zv->zv_flags |= ZVOL_RDONLY;
zv->zv_volblocksize = doi->doi_data_block_size;
zv->zv_volsize = volsize;
zv->zv_objset = os;
set_capacity(zv->zv_zso->zvo_disk, zv->zv_volsize >> 9);
blk_queue_max_hw_sectors(zv->zv_zso->zvo_queue,
(DMU_MAX_ACCESS / 4) >> 9);
blk_queue_max_segments(zv->zv_zso->zvo_queue, UINT16_MAX);
blk_queue_max_segment_size(zv->zv_zso->zvo_queue, UINT_MAX);
blk_queue_physical_block_size(zv->zv_zso->zvo_queue,
zv->zv_volblocksize);
blk_queue_io_opt(zv->zv_zso->zvo_queue, zv->zv_volblocksize);
blk_queue_max_discard_sectors(zv->zv_zso->zvo_queue,
(zvol_max_discard_blocks * zv->zv_volblocksize) >> 9);
blk_queue_discard_granularity(zv->zv_zso->zvo_queue,
zv->zv_volblocksize);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, zv->zv_zso->zvo_queue);
#ifdef QUEUE_FLAG_NONROT
blk_queue_flag_set(QUEUE_FLAG_NONROT, zv->zv_zso->zvo_queue);
#endif
#ifdef QUEUE_FLAG_ADD_RANDOM
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, zv->zv_zso->zvo_queue);
#endif
/* This flag was introduced in kernel version 4.12. */
#ifdef QUEUE_FLAG_SCSI_PASSTHROUGH
blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, zv->zv_zso->zvo_queue);
#endif
ASSERT3P(zv->zv_zilog, ==, NULL);
zv->zv_zilog = zil_open(os, zvol_get_data);
if (spa_writeable(dmu_objset_spa(os))) {
if (zil_replay_disable)
zil_destroy(zv->zv_zilog, B_FALSE);
else
zil_replay(os, zv, zvol_replay_vector);
}
zil_close(zv->zv_zilog);
zv->zv_zilog = NULL;
ASSERT3P(zv->zv_kstat.dk_kstats, ==, NULL);
dataset_kstats_create(&zv->zv_kstat, zv->zv_objset);
/*
* When udev detects the addition of the device it will immediately
* invoke blkid(8) to determine the type of content on the device.
* Prefetching the blocks commonly scanned by blkid(8) will speed
* up this process.
*/
len = MIN(MAX(zvol_prefetch_bytes, 0), SPA_MAXBLOCKSIZE);
if (len > 0) {
dmu_prefetch(os, ZVOL_OBJ, 0, 0, len, ZIO_PRIORITY_SYNC_READ);
dmu_prefetch(os, ZVOL_OBJ, 0, volsize - len, len,
ZIO_PRIORITY_SYNC_READ);
}
zv->zv_objset = NULL;
out_dmu_objset_disown:
dmu_objset_disown(os, B_TRUE, FTAG);
out_doi:
kmem_free(doi, sizeof (dmu_object_info_t));
/*
* Keep in mind that once add_disk() is called, the zvol is
* announced to the world, and zvol_open()/zvol_release() can
* be called at any time. Incidentally, add_disk() itself calls
* zvol_open()->zvol_first_open() and zvol_release()->zvol_last_close()
* directly as well.
*/
if (error == 0) {
rw_enter(&zvol_state_lock, RW_WRITER);
zvol_insert(zv);
rw_exit(&zvol_state_lock);
add_disk(zv->zv_zso->zvo_disk);
} else {
ida_simple_remove(&zvol_ida, idx);
}
return (error);
}
static void
zvol_rename_minor(zvol_state_t *zv, const char *newname)
{
int readonly = get_disk_ro(zv->zv_zso->zvo_disk);
ASSERT(RW_LOCK_HELD(&zvol_state_lock));
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
strlcpy(zv->zv_name, newname, sizeof (zv->zv_name));
/* move to new hashtable entry */
zv->zv_hash = zvol_name_hash(zv->zv_name);
hlist_del(&zv->zv_hlink);
hlist_add_head(&zv->zv_hlink, ZVOL_HT_HEAD(zv->zv_hash));
/*
* The block device's read-only state is briefly changed causing
* a KOBJ_CHANGE uevent to be issued. This ensures udev detects
* the name change and fixes the symlinks. This does not change
* ZVOL_RDONLY in zv->zv_flags so the actual read-only state never
* changes. This would normally be done using kobject_uevent() but
* that is a GPL-only symbol which is why we need this workaround.
*/
set_disk_ro(zv->zv_zso->zvo_disk, !readonly);
set_disk_ro(zv->zv_zso->zvo_disk, readonly);
}
static void
zvol_set_disk_ro_impl(zvol_state_t *zv, int flags)
{
set_disk_ro(zv->zv_zso->zvo_disk, flags);
}
static void
zvol_set_capacity_impl(zvol_state_t *zv, uint64_t capacity)
{
set_capacity(zv->zv_zso->zvo_disk, capacity);
}
const static zvol_platform_ops_t zvol_linux_ops = {
.zv_free = zvol_free,
.zv_rename_minor = zvol_rename_minor,
.zv_create_minor = zvol_os_create_minor,
.zv_update_volsize = zvol_update_volsize,
.zv_clear_private = zvol_clear_private,
.zv_is_zvol = zvol_is_zvol_impl,
.zv_set_disk_ro = zvol_set_disk_ro_impl,
.zv_set_capacity = zvol_set_capacity_impl,
};
int
zvol_init(void)
{
int error;
int threads = MIN(MAX(zvol_threads, 1), 1024);
error = register_blkdev(zvol_major, ZVOL_DRIVER);
if (error) {
printk(KERN_INFO "ZFS: register_blkdev() failed %d\n", error);
return (error);
}
zvol_taskq = taskq_create(ZVOL_DRIVER, threads, maxclsyspri,
threads * 2, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
if (zvol_taskq == NULL) {
unregister_blkdev(zvol_major, ZVOL_DRIVER);
return (-ENOMEM);
}
zvol_init_impl();
ida_init(&zvol_ida);
zvol_register_ops(&zvol_linux_ops);
return (0);
}
void
zvol_fini(void)
{
zvol_fini_impl();
unregister_blkdev(zvol_major, ZVOL_DRIVER);
taskq_destroy(zvol_taskq);
ida_destroy(&zvol_ida);
}
/* BEGIN CSTYLED */
module_param(zvol_inhibit_dev, uint, 0644);
MODULE_PARM_DESC(zvol_inhibit_dev, "Do not create zvol device nodes");
module_param(zvol_major, uint, 0444);
MODULE_PARM_DESC(zvol_major, "Major number for zvol device");
module_param(zvol_threads, uint, 0444);
MODULE_PARM_DESC(zvol_threads, "Max number of threads to handle I/O requests");
module_param(zvol_request_sync, uint, 0644);
MODULE_PARM_DESC(zvol_request_sync, "Synchronously handle bio requests");
module_param(zvol_max_discard_blocks, ulong, 0444);
MODULE_PARM_DESC(zvol_max_discard_blocks, "Max number of blocks to discard");
module_param(zvol_prefetch_bytes, uint, 0644);
MODULE_PARM_DESC(zvol_prefetch_bytes, "Prefetch N bytes at zvol start+end");
module_param(zvol_volmode, uint, 0644);
MODULE_PARM_DESC(zvol_volmode, "Default volmode property value");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/abd.c b/sys/contrib/openzfs/module/zfs/abd.c
index cc2d3575db63..f306c7a1dcca 100644
--- a/sys/contrib/openzfs/module/zfs/abd.c
+++ b/sys/contrib/openzfs/module/zfs/abd.c
@@ -1,1216 +1,1216 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2014 by Chunwei Chen. All rights reserved.
* Copyright (c) 2019 by Delphix. All rights reserved.
*/
/*
* ARC buffer data (ABD).
*
* ABDs are an abstract data structure for the ARC which can use two
* different ways of storing the underlying data:
*
* (a) Linear buffer. In this case, all the data in the ABD is stored in one
* contiguous buffer in memory (from a zio_[data_]buf_* kmem cache).
*
* +-------------------+
* | ABD (linear) |
* | abd_flags = ... |
* | abd_size = ... | +--------------------------------+
* | abd_buf ------------->| raw buffer of size abd_size |
* +-------------------+ +--------------------------------+
* no abd_chunks
*
* (b) Scattered buffer. In this case, the data in the ABD is split into
* equal-sized chunks (from the abd_chunk_cache kmem_cache), with pointers
* to the chunks recorded in an array at the end of the ABD structure.
*
* +-------------------+
* | ABD (scattered) |
* | abd_flags = ... |
* | abd_size = ... |
* | abd_offset = 0 | +-----------+
* | abd_chunks[0] ----------------------------->| chunk 0 |
* | abd_chunks[1] ---------------------+ +-----------+
* | ... | | +-----------+
* | abd_chunks[N-1] ---------+ +------->| chunk 1 |
* +-------------------+ | +-----------+
* | ...
* | +-----------+
* +----------------->| chunk N-1 |
* +-----------+
*
* In addition to directly allocating a linear or scattered ABD, it is also
* possible to create an ABD by requesting the "sub-ABD" starting at an offset
* within an existing ABD. In linear buffers this is simple (set abd_buf of
* the new ABD to the starting point within the original raw buffer), but
* scattered ABDs are a little more complex. The new ABD makes a copy of the
* relevant abd_chunks pointers (but not the underlying data). However, to
* provide arbitrary rather than only chunk-aligned starting offsets, it also
* tracks an abd_offset field which represents the starting point of the data
* within the first chunk in abd_chunks. For both linear and scattered ABDs,
* creating an offset ABD marks the original ABD as the offset's parent, and the
* original ABD's abd_children refcount is incremented. This data allows us to
* ensure the root ABD isn't deleted before its children.
*
* Most consumers should never need to know what type of ABD they're using --
* the ABD public API ensures that it's possible to transparently switch from
* using a linear ABD to a scattered one when doing so would be beneficial.
*
* If you need to use the data within an ABD directly, if you know it's linear
* (because you allocated it) you can use abd_to_buf() to access the underlying
* raw buffer. Otherwise, you should use one of the abd_borrow_buf* functions
* which will allocate a raw buffer if necessary. Use the abd_return_buf*
* functions to return any raw buffers that are no longer necessary when you're
* done using them.
*
* There are a variety of ABD APIs that implement basic buffer operations:
* compare, copy, read, write, and fill with zeroes. If you need a custom
* function which progressively accesses the whole ABD, use the abd_iterate_*
* functions.
*
* As an additional feature, linear and scatter ABD's can be stitched together
* by using the gang ABD type (abd_alloc_gang_abd()). This allows for
* multiple ABDs to be viewed as a singular ABD.
*
* It is possible to make all ABDs linear by setting zfs_abd_scatter_enabled to
* B_FALSE.
*/
#include <sys/abd_impl.h>
#include <sys/param.h>
#include <sys/zio.h>
#include <sys/zfs_context.h>
#include <sys/zfs_znode.h>
/* see block comment above for description */
int zfs_abd_scatter_enabled = B_TRUE;
void
abd_verify(abd_t *abd)
{
#ifdef ZFS_DEBUG
ASSERT3U(abd->abd_size, >, 0);
ASSERT3U(abd->abd_size, <=, SPA_MAXBLOCKSIZE);
ASSERT3U(abd->abd_flags, ==, abd->abd_flags & (ABD_FLAG_LINEAR |
ABD_FLAG_OWNER | ABD_FLAG_META | ABD_FLAG_MULTI_ZONE |
ABD_FLAG_MULTI_CHUNK | ABD_FLAG_LINEAR_PAGE | ABD_FLAG_GANG |
ABD_FLAG_GANG_FREE | ABD_FLAG_ZEROS | ABD_FLAG_ALLOCD));
IMPLY(abd->abd_parent != NULL, !(abd->abd_flags & ABD_FLAG_OWNER));
IMPLY(abd->abd_flags & ABD_FLAG_META, abd->abd_flags & ABD_FLAG_OWNER);
if (abd_is_linear(abd)) {
ASSERT3P(ABD_LINEAR_BUF(abd), !=, NULL);
} else if (abd_is_gang(abd)) {
uint_t child_sizes = 0;
for (abd_t *cabd = list_head(&ABD_GANG(abd).abd_gang_chain);
cabd != NULL;
cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
ASSERT(list_link_active(&cabd->abd_gang_link));
child_sizes += cabd->abd_size;
abd_verify(cabd);
}
ASSERT3U(abd->abd_size, ==, child_sizes);
} else {
abd_verify_scatter(abd);
}
#endif
}
static void
abd_init_struct(abd_t *abd)
{
list_link_init(&abd->abd_gang_link);
mutex_init(&abd->abd_mtx, NULL, MUTEX_DEFAULT, NULL);
abd->abd_flags = 0;
#ifdef ZFS_DEBUG
zfs_refcount_create(&abd->abd_children);
abd->abd_parent = NULL;
#endif
abd->abd_size = 0;
}
static void
abd_fini_struct(abd_t *abd)
{
mutex_destroy(&abd->abd_mtx);
ASSERT(!list_link_active(&abd->abd_gang_link));
#ifdef ZFS_DEBUG
zfs_refcount_destroy(&abd->abd_children);
#endif
}
abd_t *
abd_alloc_struct(size_t size)
{
abd_t *abd = abd_alloc_struct_impl(size);
abd_init_struct(abd);
abd->abd_flags |= ABD_FLAG_ALLOCD;
return (abd);
}
void
abd_free_struct(abd_t *abd)
{
abd_fini_struct(abd);
abd_free_struct_impl(abd);
}
/*
* Allocate an ABD, along with its own underlying data buffers. Use this if you
* don't care whether the ABD is linear or not.
*/
abd_t *
abd_alloc(size_t size, boolean_t is_metadata)
{
- if (!zfs_abd_scatter_enabled || abd_size_alloc_linear(size))
+ if (abd_size_alloc_linear(size))
return (abd_alloc_linear(size, is_metadata));
VERIFY3U(size, <=, SPA_MAXBLOCKSIZE);
abd_t *abd = abd_alloc_struct(size);
abd->abd_flags |= ABD_FLAG_OWNER;
abd->abd_u.abd_scatter.abd_offset = 0;
abd_alloc_chunks(abd, size);
if (is_metadata) {
abd->abd_flags |= ABD_FLAG_META;
}
abd->abd_size = size;
abd_update_scatter_stats(abd, ABDSTAT_INCR);
return (abd);
}
/*
* Allocate an ABD that must be linear, along with its own underlying data
* buffer. Only use this when it would be very annoying to write your ABD
* consumer with a scattered ABD.
*/
abd_t *
abd_alloc_linear(size_t size, boolean_t is_metadata)
{
abd_t *abd = abd_alloc_struct(0);
VERIFY3U(size, <=, SPA_MAXBLOCKSIZE);
abd->abd_flags |= ABD_FLAG_LINEAR | ABD_FLAG_OWNER;
if (is_metadata) {
abd->abd_flags |= ABD_FLAG_META;
}
abd->abd_size = size;
if (is_metadata) {
ABD_LINEAR_BUF(abd) = zio_buf_alloc(size);
} else {
ABD_LINEAR_BUF(abd) = zio_data_buf_alloc(size);
}
abd_update_linear_stats(abd, ABDSTAT_INCR);
return (abd);
}
static void
abd_free_linear(abd_t *abd)
{
if (abd_is_linear_page(abd)) {
abd_free_linear_page(abd);
return;
}
if (abd->abd_flags & ABD_FLAG_META) {
zio_buf_free(ABD_LINEAR_BUF(abd), abd->abd_size);
} else {
zio_data_buf_free(ABD_LINEAR_BUF(abd), abd->abd_size);
}
abd_update_linear_stats(abd, ABDSTAT_DECR);
}
static void
abd_free_gang(abd_t *abd)
{
ASSERT(abd_is_gang(abd));
abd_t *cabd;
while ((cabd = list_head(&ABD_GANG(abd).abd_gang_chain)) != NULL) {
/*
* We must acquire the child ABDs mutex to ensure that if it
* is being added to another gang ABD we will set the link
* as inactive when removing it from this gang ABD and before
* adding it to the other gang ABD.
*/
mutex_enter(&cabd->abd_mtx);
ASSERT(list_link_active(&cabd->abd_gang_link));
list_remove(&ABD_GANG(abd).abd_gang_chain, cabd);
mutex_exit(&cabd->abd_mtx);
if (cabd->abd_flags & ABD_FLAG_GANG_FREE)
abd_free(cabd);
}
list_destroy(&ABD_GANG(abd).abd_gang_chain);
}
static void
abd_free_scatter(abd_t *abd)
{
abd_free_chunks(abd);
abd_update_scatter_stats(abd, ABDSTAT_DECR);
}
/*
* Free an ABD. Use with any kind of abd: those created with abd_alloc_*()
* and abd_get_*(), including abd_get_offset_struct().
*
* If the ABD was created with abd_alloc_*(), the underlying data
* (scatterlist or linear buffer) will also be freed. (Subject to ownership
* changes via abd_*_ownership_of_buf().)
*
* Unless the ABD was created with abd_get_offset_struct(), the abd_t will
* also be freed.
*/
void
abd_free(abd_t *abd)
{
if (abd == NULL)
return;
abd_verify(abd);
#ifdef ZFS_DEBUG
IMPLY(abd->abd_flags & ABD_FLAG_OWNER, abd->abd_parent == NULL);
#endif
if (abd_is_gang(abd)) {
abd_free_gang(abd);
} else if (abd_is_linear(abd)) {
if (abd->abd_flags & ABD_FLAG_OWNER)
abd_free_linear(abd);
} else {
if (abd->abd_flags & ABD_FLAG_OWNER)
abd_free_scatter(abd);
}
#ifdef ZFS_DEBUG
if (abd->abd_parent != NULL) {
(void) zfs_refcount_remove_many(&abd->abd_parent->abd_children,
abd->abd_size, abd);
}
#endif
abd_fini_struct(abd);
if (abd->abd_flags & ABD_FLAG_ALLOCD)
abd_free_struct_impl(abd);
}
/*
* Allocate an ABD of the same format (same metadata flag, same scatterize
* setting) as another ABD.
*/
abd_t *
abd_alloc_sametype(abd_t *sabd, size_t size)
{
boolean_t is_metadata = (sabd->abd_flags & ABD_FLAG_META) != 0;
if (abd_is_linear(sabd) &&
!abd_is_linear_page(sabd)) {
return (abd_alloc_linear(size, is_metadata));
} else {
return (abd_alloc(size, is_metadata));
}
}
/*
* Create gang ABD that will be the head of a list of ABD's. This is used
* to "chain" scatter/gather lists together when constructing aggregated
* IO's. To free this abd, abd_free() must be called.
*/
abd_t *
abd_alloc_gang(void)
{
abd_t *abd = abd_alloc_struct(0);
abd->abd_flags |= ABD_FLAG_GANG | ABD_FLAG_OWNER;
list_create(&ABD_GANG(abd).abd_gang_chain,
sizeof (abd_t), offsetof(abd_t, abd_gang_link));
return (abd);
}
/*
* Add a child gang ABD to a parent gang ABDs chained list.
*/
static void
abd_gang_add_gang(abd_t *pabd, abd_t *cabd, boolean_t free_on_free)
{
ASSERT(abd_is_gang(pabd));
ASSERT(abd_is_gang(cabd));
if (free_on_free) {
/*
* If the parent is responsible for freeing the child gang
* ABD we will just splice the child's children ABD list to
* the parent's list and immediately free the child gang ABD
* struct. The parent gang ABDs children from the child gang
* will retain all the free_on_free settings after being
* added to the parents list.
*/
pabd->abd_size += cabd->abd_size;
list_move_tail(&ABD_GANG(pabd).abd_gang_chain,
&ABD_GANG(cabd).abd_gang_chain);
ASSERT(list_is_empty(&ABD_GANG(cabd).abd_gang_chain));
abd_verify(pabd);
abd_free(cabd);
} else {
for (abd_t *child = list_head(&ABD_GANG(cabd).abd_gang_chain);
child != NULL;
child = list_next(&ABD_GANG(cabd).abd_gang_chain, child)) {
/*
* We always pass B_FALSE for free_on_free as it is the
* original child gang ABDs responsibility to determine
* if any of its child ABDs should be free'd on the call
* to abd_free().
*/
abd_gang_add(pabd, child, B_FALSE);
}
abd_verify(pabd);
}
}
/*
* Add a child ABD to a gang ABD's chained list.
*/
void
abd_gang_add(abd_t *pabd, abd_t *cabd, boolean_t free_on_free)
{
ASSERT(abd_is_gang(pabd));
abd_t *child_abd = NULL;
/*
* If the child being added is a gang ABD, we will add the
* child's ABDs to the parent gang ABD. This allows us to account
* for the offset correctly in the parent gang ABD.
*/
if (abd_is_gang(cabd)) {
ASSERT(!list_link_active(&cabd->abd_gang_link));
ASSERT(!list_is_empty(&ABD_GANG(cabd).abd_gang_chain));
return (abd_gang_add_gang(pabd, cabd, free_on_free));
}
ASSERT(!abd_is_gang(cabd));
/*
* In order to verify that an ABD is not already part of
* another gang ABD, we must lock the child ABD's abd_mtx
* to check its abd_gang_link status. We unlock the abd_mtx
* only after it is has been added to a gang ABD, which
* will update the abd_gang_link's status. See comment below
* for how an ABD can be in multiple gang ABD's simultaneously.
*/
mutex_enter(&cabd->abd_mtx);
if (list_link_active(&cabd->abd_gang_link)) {
/*
* If the child ABD is already part of another
* gang ABD then we must allocate a new
* ABD to use a separate link. We mark the newly
* allocated ABD with ABD_FLAG_GANG_FREE, before
* adding it to the gang ABD's list, to make the
* gang ABD aware that it is responsible to call
* abd_free(). We use abd_get_offset() in order
* to just allocate a new ABD but avoid copying the
* data over into the newly allocated ABD.
*
* An ABD may become part of multiple gang ABD's. For
* example, when writing ditto bocks, the same ABD
* is used to write 2 or 3 locations with 2 or 3
* zio_t's. Each of the zio's may be aggregated with
* different adjacent zio's. zio aggregation uses gang
* zio's, so the single ABD can become part of multiple
* gang zio's.
*
* The ASSERT below is to make sure that if
* free_on_free is passed as B_TRUE, the ABD can
* not be in multiple gang ABD's. The gang ABD
* can not be responsible for cleaning up the child
* ABD memory allocation if the ABD can be in
* multiple gang ABD's at one time.
*/
ASSERT3B(free_on_free, ==, B_FALSE);
child_abd = abd_get_offset(cabd, 0);
child_abd->abd_flags |= ABD_FLAG_GANG_FREE;
} else {
child_abd = cabd;
if (free_on_free)
child_abd->abd_flags |= ABD_FLAG_GANG_FREE;
}
ASSERT3P(child_abd, !=, NULL);
list_insert_tail(&ABD_GANG(pabd).abd_gang_chain, child_abd);
mutex_exit(&cabd->abd_mtx);
pabd->abd_size += child_abd->abd_size;
}
/*
* Locate the ABD for the supplied offset in the gang ABD.
* Return a new offset relative to the returned ABD.
*/
abd_t *
abd_gang_get_offset(abd_t *abd, size_t *off)
{
abd_t *cabd;
ASSERT(abd_is_gang(abd));
ASSERT3U(*off, <, abd->abd_size);
for (cabd = list_head(&ABD_GANG(abd).abd_gang_chain); cabd != NULL;
cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
if (*off >= cabd->abd_size)
*off -= cabd->abd_size;
else
return (cabd);
}
VERIFY3P(cabd, !=, NULL);
return (cabd);
}
/*
* Allocate a new ABD, using the provided struct (if non-NULL, and if
* circumstances allow - otherwise allocate the struct). The returned ABD will
* point to offset off of sabd. It shares the underlying buffer data with sabd.
* Use abd_free() to free. sabd must not be freed while any derived ABDs exist.
*/
static abd_t *
abd_get_offset_impl(abd_t *abd, abd_t *sabd, size_t off, size_t size)
{
abd_verify(sabd);
ASSERT3U(off + size, <=, sabd->abd_size);
if (abd_is_linear(sabd)) {
if (abd == NULL)
abd = abd_alloc_struct(0);
/*
* Even if this buf is filesystem metadata, we only track that
* if we own the underlying data buffer, which is not true in
* this case. Therefore, we don't ever use ABD_FLAG_META here.
*/
abd->abd_flags |= ABD_FLAG_LINEAR;
ABD_LINEAR_BUF(abd) = (char *)ABD_LINEAR_BUF(sabd) + off;
} else if (abd_is_gang(sabd)) {
size_t left = size;
if (abd == NULL) {
abd = abd_alloc_gang();
} else {
abd->abd_flags |= ABD_FLAG_GANG;
list_create(&ABD_GANG(abd).abd_gang_chain,
sizeof (abd_t), offsetof(abd_t, abd_gang_link));
}
abd->abd_flags &= ~ABD_FLAG_OWNER;
for (abd_t *cabd = abd_gang_get_offset(sabd, &off);
cabd != NULL && left > 0;
cabd = list_next(&ABD_GANG(sabd).abd_gang_chain, cabd)) {
int csize = MIN(left, cabd->abd_size - off);
abd_t *nabd = abd_get_offset_size(cabd, off, csize);
abd_gang_add(abd, nabd, B_TRUE);
left -= csize;
off = 0;
}
ASSERT3U(left, ==, 0);
} else {
abd = abd_get_offset_scatter(abd, sabd, off, size);
}
ASSERT3P(abd, !=, NULL);
abd->abd_size = size;
#ifdef ZFS_DEBUG
abd->abd_parent = sabd;
(void) zfs_refcount_add_many(&sabd->abd_children, abd->abd_size, abd);
#endif
return (abd);
}
/*
* Like abd_get_offset_size(), but memory for the abd_t is provided by the
* caller. Using this routine can improve performance by avoiding the cost
* of allocating memory for the abd_t struct, and updating the abd stats.
* Usually, the provided abd is returned, but in some circumstances (FreeBSD,
* if sabd is scatter and size is more than 2 pages) a new abd_t may need to
* be allocated. Therefore callers should be careful to use the returned
* abd_t*.
*/
abd_t *
abd_get_offset_struct(abd_t *abd, abd_t *sabd, size_t off, size_t size)
{
abd_t *result;
abd_init_struct(abd);
result = abd_get_offset_impl(abd, sabd, off, size);
if (result != abd)
abd_fini_struct(abd);
return (result);
}
abd_t *
abd_get_offset(abd_t *sabd, size_t off)
{
size_t size = sabd->abd_size > off ? sabd->abd_size - off : 0;
VERIFY3U(size, >, 0);
return (abd_get_offset_impl(NULL, sabd, off, size));
}
abd_t *
abd_get_offset_size(abd_t *sabd, size_t off, size_t size)
{
ASSERT3U(off + size, <=, sabd->abd_size);
return (abd_get_offset_impl(NULL, sabd, off, size));
}
/*
* Return a size scatter ABD containing only zeros.
*/
abd_t *
abd_get_zeros(size_t size)
{
ASSERT3P(abd_zero_scatter, !=, NULL);
ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
return (abd_get_offset_size(abd_zero_scatter, 0, size));
}
/*
* Allocate a linear ABD structure for buf.
*/
abd_t *
abd_get_from_buf(void *buf, size_t size)
{
abd_t *abd = abd_alloc_struct(0);
VERIFY3U(size, <=, SPA_MAXBLOCKSIZE);
/*
* Even if this buf is filesystem metadata, we only track that if we
* own the underlying data buffer, which is not true in this case.
* Therefore, we don't ever use ABD_FLAG_META here.
*/
abd->abd_flags |= ABD_FLAG_LINEAR;
abd->abd_size = size;
ABD_LINEAR_BUF(abd) = buf;
return (abd);
}
/*
* Get the raw buffer associated with a linear ABD.
*/
void *
abd_to_buf(abd_t *abd)
{
ASSERT(abd_is_linear(abd));
abd_verify(abd);
return (ABD_LINEAR_BUF(abd));
}
/*
* Borrow a raw buffer from an ABD without copying the contents of the ABD
* into the buffer. If the ABD is scattered, this will allocate a raw buffer
* whose contents are undefined. To copy over the existing data in the ABD, use
* abd_borrow_buf_copy() instead.
*/
void *
abd_borrow_buf(abd_t *abd, size_t n)
{
void *buf;
abd_verify(abd);
ASSERT3U(abd->abd_size, >=, n);
if (abd_is_linear(abd)) {
buf = abd_to_buf(abd);
} else {
buf = zio_buf_alloc(n);
}
#ifdef ZFS_DEBUG
(void) zfs_refcount_add_many(&abd->abd_children, n, buf);
#endif
return (buf);
}
void *
abd_borrow_buf_copy(abd_t *abd, size_t n)
{
void *buf = abd_borrow_buf(abd, n);
if (!abd_is_linear(abd)) {
abd_copy_to_buf(buf, abd, n);
}
return (buf);
}
/*
* Return a borrowed raw buffer to an ABD. If the ABD is scattered, this will
* not change the contents of the ABD and will ASSERT that you didn't modify
* the buffer since it was borrowed. If you want any changes you made to buf to
* be copied back to abd, use abd_return_buf_copy() instead.
*/
void
abd_return_buf(abd_t *abd, void *buf, size_t n)
{
abd_verify(abd);
ASSERT3U(abd->abd_size, >=, n);
if (abd_is_linear(abd)) {
ASSERT3P(buf, ==, abd_to_buf(abd));
} else {
ASSERT0(abd_cmp_buf(abd, buf, n));
zio_buf_free(buf, n);
}
#ifdef ZFS_DEBUG
(void) zfs_refcount_remove_many(&abd->abd_children, n, buf);
#endif
}
void
abd_return_buf_copy(abd_t *abd, void *buf, size_t n)
{
if (!abd_is_linear(abd)) {
abd_copy_from_buf(abd, buf, n);
}
abd_return_buf(abd, buf, n);
}
void
abd_release_ownership_of_buf(abd_t *abd)
{
ASSERT(abd_is_linear(abd));
ASSERT(abd->abd_flags & ABD_FLAG_OWNER);
/*
* abd_free() needs to handle LINEAR_PAGE ABD's specially.
* Since that flag does not survive the
* abd_release_ownership_of_buf() -> abd_get_from_buf() ->
* abd_take_ownership_of_buf() sequence, we don't allow releasing
* these "linear but not zio_[data_]buf_alloc()'ed" ABD's.
*/
ASSERT(!abd_is_linear_page(abd));
abd_verify(abd);
abd->abd_flags &= ~ABD_FLAG_OWNER;
/* Disable this flag since we no longer own the data buffer */
abd->abd_flags &= ~ABD_FLAG_META;
abd_update_linear_stats(abd, ABDSTAT_DECR);
}
/*
* Give this ABD ownership of the buffer that it's storing. Can only be used on
* linear ABDs which were allocated via abd_get_from_buf(), or ones allocated
* with abd_alloc_linear() which subsequently released ownership of their buf
* with abd_release_ownership_of_buf().
*/
void
abd_take_ownership_of_buf(abd_t *abd, boolean_t is_metadata)
{
ASSERT(abd_is_linear(abd));
ASSERT(!(abd->abd_flags & ABD_FLAG_OWNER));
abd_verify(abd);
abd->abd_flags |= ABD_FLAG_OWNER;
if (is_metadata) {
abd->abd_flags |= ABD_FLAG_META;
}
abd_update_linear_stats(abd, ABDSTAT_INCR);
}
/*
* Initializes an abd_iter based on whether the abd is a gang ABD
* or just a single ABD.
*/
static inline abd_t *
abd_init_abd_iter(abd_t *abd, struct abd_iter *aiter, size_t off)
{
abd_t *cabd = NULL;
if (abd_is_gang(abd)) {
cabd = abd_gang_get_offset(abd, &off);
if (cabd) {
abd_iter_init(aiter, cabd);
abd_iter_advance(aiter, off);
}
} else {
abd_iter_init(aiter, abd);
abd_iter_advance(aiter, off);
}
return (cabd);
}
/*
* Advances an abd_iter. We have to be careful with gang ABD as
* advancing could mean that we are at the end of a particular ABD and
* must grab the ABD in the gang ABD's list.
*/
static inline abd_t *
abd_advance_abd_iter(abd_t *abd, abd_t *cabd, struct abd_iter *aiter,
size_t len)
{
abd_iter_advance(aiter, len);
if (abd_is_gang(abd) && abd_iter_at_end(aiter)) {
ASSERT3P(cabd, !=, NULL);
cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd);
if (cabd) {
abd_iter_init(aiter, cabd);
abd_iter_advance(aiter, 0);
}
}
return (cabd);
}
int
abd_iterate_func(abd_t *abd, size_t off, size_t size,
abd_iter_func_t *func, void *private)
{
struct abd_iter aiter;
int ret = 0;
if (size == 0)
return (0);
abd_verify(abd);
ASSERT3U(off + size, <=, abd->abd_size);
boolean_t gang = abd_is_gang(abd);
abd_t *c_abd = abd_init_abd_iter(abd, &aiter, off);
while (size > 0) {
/* If we are at the end of the gang ABD we are done */
if (gang && !c_abd)
break;
abd_iter_map(&aiter);
size_t len = MIN(aiter.iter_mapsize, size);
ASSERT3U(len, >, 0);
ret = func(aiter.iter_mapaddr, len, private);
abd_iter_unmap(&aiter);
if (ret != 0)
break;
size -= len;
c_abd = abd_advance_abd_iter(abd, c_abd, &aiter, len);
}
return (ret);
}
struct buf_arg {
void *arg_buf;
};
static int
abd_copy_to_buf_off_cb(void *buf, size_t size, void *private)
{
struct buf_arg *ba_ptr = private;
(void) memcpy(ba_ptr->arg_buf, buf, size);
ba_ptr->arg_buf = (char *)ba_ptr->arg_buf + size;
return (0);
}
/*
* Copy abd to buf. (off is the offset in abd.)
*/
void
abd_copy_to_buf_off(void *buf, abd_t *abd, size_t off, size_t size)
{
struct buf_arg ba_ptr = { buf };
(void) abd_iterate_func(abd, off, size, abd_copy_to_buf_off_cb,
&ba_ptr);
}
static int
abd_cmp_buf_off_cb(void *buf, size_t size, void *private)
{
int ret;
struct buf_arg *ba_ptr = private;
ret = memcmp(buf, ba_ptr->arg_buf, size);
ba_ptr->arg_buf = (char *)ba_ptr->arg_buf + size;
return (ret);
}
/*
* Compare the contents of abd to buf. (off is the offset in abd.)
*/
int
abd_cmp_buf_off(abd_t *abd, const void *buf, size_t off, size_t size)
{
struct buf_arg ba_ptr = { (void *) buf };
return (abd_iterate_func(abd, off, size, abd_cmp_buf_off_cb, &ba_ptr));
}
static int
abd_copy_from_buf_off_cb(void *buf, size_t size, void *private)
{
struct buf_arg *ba_ptr = private;
(void) memcpy(buf, ba_ptr->arg_buf, size);
ba_ptr->arg_buf = (char *)ba_ptr->arg_buf + size;
return (0);
}
/*
* Copy from buf to abd. (off is the offset in abd.)
*/
void
abd_copy_from_buf_off(abd_t *abd, const void *buf, size_t off, size_t size)
{
struct buf_arg ba_ptr = { (void *) buf };
(void) abd_iterate_func(abd, off, size, abd_copy_from_buf_off_cb,
&ba_ptr);
}
/*ARGSUSED*/
static int
abd_zero_off_cb(void *buf, size_t size, void *private)
{
(void) memset(buf, 0, size);
return (0);
}
/*
* Zero out the abd from a particular offset to the end.
*/
void
abd_zero_off(abd_t *abd, size_t off, size_t size)
{
(void) abd_iterate_func(abd, off, size, abd_zero_off_cb, NULL);
}
/*
* Iterate over two ABDs and call func incrementally on the two ABDs' data in
* equal-sized chunks (passed to func as raw buffers). func could be called many
* times during this iteration.
*/
int
abd_iterate_func2(abd_t *dabd, abd_t *sabd, size_t doff, size_t soff,
size_t size, abd_iter_func2_t *func, void *private)
{
int ret = 0;
struct abd_iter daiter, saiter;
boolean_t dabd_is_gang_abd, sabd_is_gang_abd;
abd_t *c_dabd, *c_sabd;
if (size == 0)
return (0);
abd_verify(dabd);
abd_verify(sabd);
ASSERT3U(doff + size, <=, dabd->abd_size);
ASSERT3U(soff + size, <=, sabd->abd_size);
dabd_is_gang_abd = abd_is_gang(dabd);
sabd_is_gang_abd = abd_is_gang(sabd);
c_dabd = abd_init_abd_iter(dabd, &daiter, doff);
c_sabd = abd_init_abd_iter(sabd, &saiter, soff);
while (size > 0) {
/* if we are at the end of the gang ABD we are done */
if ((dabd_is_gang_abd && !c_dabd) ||
(sabd_is_gang_abd && !c_sabd))
break;
abd_iter_map(&daiter);
abd_iter_map(&saiter);
size_t dlen = MIN(daiter.iter_mapsize, size);
size_t slen = MIN(saiter.iter_mapsize, size);
size_t len = MIN(dlen, slen);
ASSERT(dlen > 0 || slen > 0);
ret = func(daiter.iter_mapaddr, saiter.iter_mapaddr, len,
private);
abd_iter_unmap(&saiter);
abd_iter_unmap(&daiter);
if (ret != 0)
break;
size -= len;
c_dabd =
abd_advance_abd_iter(dabd, c_dabd, &daiter, len);
c_sabd =
abd_advance_abd_iter(sabd, c_sabd, &saiter, len);
}
return (ret);
}
/*ARGSUSED*/
static int
abd_copy_off_cb(void *dbuf, void *sbuf, size_t size, void *private)
{
(void) memcpy(dbuf, sbuf, size);
return (0);
}
/*
* Copy from sabd to dabd starting from soff and doff.
*/
void
abd_copy_off(abd_t *dabd, abd_t *sabd, size_t doff, size_t soff, size_t size)
{
(void) abd_iterate_func2(dabd, sabd, doff, soff, size,
abd_copy_off_cb, NULL);
}
/*ARGSUSED*/
static int
abd_cmp_cb(void *bufa, void *bufb, size_t size, void *private)
{
return (memcmp(bufa, bufb, size));
}
/*
* Compares the contents of two ABDs.
*/
int
abd_cmp(abd_t *dabd, abd_t *sabd)
{
ASSERT3U(dabd->abd_size, ==, sabd->abd_size);
return (abd_iterate_func2(dabd, sabd, 0, 0, dabd->abd_size,
abd_cmp_cb, NULL));
}
/*
* Iterate over code ABDs and a data ABD and call @func_raidz_gen.
*
* @cabds parity ABDs, must have equal size
* @dabd data ABD. Can be NULL (in this case @dsize = 0)
* @func_raidz_gen should be implemented so that its behaviour
* is the same when taking linear and when taking scatter
*/
void
abd_raidz_gen_iterate(abd_t **cabds, abd_t *dabd,
ssize_t csize, ssize_t dsize, const unsigned parity,
void (*func_raidz_gen)(void **, const void *, size_t, size_t))
{
int i;
ssize_t len, dlen;
struct abd_iter caiters[3];
struct abd_iter daiter = {0};
void *caddrs[3];
unsigned long flags __maybe_unused = 0;
abd_t *c_cabds[3];
abd_t *c_dabd = NULL;
boolean_t cabds_is_gang_abd[3];
boolean_t dabd_is_gang_abd = B_FALSE;
ASSERT3U(parity, <=, 3);
for (i = 0; i < parity; i++) {
cabds_is_gang_abd[i] = abd_is_gang(cabds[i]);
c_cabds[i] = abd_init_abd_iter(cabds[i], &caiters[i], 0);
}
if (dabd) {
dabd_is_gang_abd = abd_is_gang(dabd);
c_dabd = abd_init_abd_iter(dabd, &daiter, 0);
}
ASSERT3S(dsize, >=, 0);
abd_enter_critical(flags);
while (csize > 0) {
/* if we are at the end of the gang ABD we are done */
if (dabd_is_gang_abd && !c_dabd)
break;
for (i = 0; i < parity; i++) {
/*
* If we are at the end of the gang ABD we are
* done.
*/
if (cabds_is_gang_abd[i] && !c_cabds[i])
break;
abd_iter_map(&caiters[i]);
caddrs[i] = caiters[i].iter_mapaddr;
}
len = csize;
if (dabd && dsize > 0)
abd_iter_map(&daiter);
switch (parity) {
case 3:
len = MIN(caiters[2].iter_mapsize, len);
/* falls through */
case 2:
len = MIN(caiters[1].iter_mapsize, len);
/* falls through */
case 1:
len = MIN(caiters[0].iter_mapsize, len);
}
/* must be progressive */
ASSERT3S(len, >, 0);
if (dabd && dsize > 0) {
/* this needs precise iter.length */
len = MIN(daiter.iter_mapsize, len);
dlen = len;
} else
dlen = 0;
/* must be progressive */
ASSERT3S(len, >, 0);
/*
* The iterated function likely will not do well if each
* segment except the last one is not multiple of 512 (raidz).
*/
ASSERT3U(((uint64_t)len & 511ULL), ==, 0);
func_raidz_gen(caddrs, daiter.iter_mapaddr, len, dlen);
for (i = parity-1; i >= 0; i--) {
abd_iter_unmap(&caiters[i]);
c_cabds[i] =
abd_advance_abd_iter(cabds[i], c_cabds[i],
&caiters[i], len);
}
if (dabd && dsize > 0) {
abd_iter_unmap(&daiter);
c_dabd =
abd_advance_abd_iter(dabd, c_dabd, &daiter,
dlen);
dsize -= dlen;
}
csize -= len;
ASSERT3S(dsize, >=, 0);
ASSERT3S(csize, >=, 0);
}
abd_exit_critical(flags);
}
/*
* Iterate over code ABDs and data reconstruction target ABDs and call
* @func_raidz_rec. Function maps at most 6 pages atomically.
*
* @cabds parity ABDs, must have equal size
* @tabds rec target ABDs, at most 3
* @tsize size of data target columns
* @func_raidz_rec expects syndrome data in target columns. Function
* reconstructs data and overwrites target columns.
*/
void
abd_raidz_rec_iterate(abd_t **cabds, abd_t **tabds,
ssize_t tsize, const unsigned parity,
void (*func_raidz_rec)(void **t, const size_t tsize, void **c,
const unsigned *mul),
const unsigned *mul)
{
int i;
ssize_t len;
struct abd_iter citers[3];
struct abd_iter xiters[3];
void *caddrs[3], *xaddrs[3];
unsigned long flags __maybe_unused = 0;
boolean_t cabds_is_gang_abd[3];
boolean_t tabds_is_gang_abd[3];
abd_t *c_cabds[3];
abd_t *c_tabds[3];
ASSERT3U(parity, <=, 3);
for (i = 0; i < parity; i++) {
cabds_is_gang_abd[i] = abd_is_gang(cabds[i]);
tabds_is_gang_abd[i] = abd_is_gang(tabds[i]);
c_cabds[i] =
abd_init_abd_iter(cabds[i], &citers[i], 0);
c_tabds[i] =
abd_init_abd_iter(tabds[i], &xiters[i], 0);
}
abd_enter_critical(flags);
while (tsize > 0) {
for (i = 0; i < parity; i++) {
/*
* If we are at the end of the gang ABD we
* are done.
*/
if (cabds_is_gang_abd[i] && !c_cabds[i])
break;
if (tabds_is_gang_abd[i] && !c_tabds[i])
break;
abd_iter_map(&citers[i]);
abd_iter_map(&xiters[i]);
caddrs[i] = citers[i].iter_mapaddr;
xaddrs[i] = xiters[i].iter_mapaddr;
}
len = tsize;
switch (parity) {
case 3:
len = MIN(xiters[2].iter_mapsize, len);
len = MIN(citers[2].iter_mapsize, len);
/* falls through */
case 2:
len = MIN(xiters[1].iter_mapsize, len);
len = MIN(citers[1].iter_mapsize, len);
/* falls through */
case 1:
len = MIN(xiters[0].iter_mapsize, len);
len = MIN(citers[0].iter_mapsize, len);
}
/* must be progressive */
ASSERT3S(len, >, 0);
/*
* The iterated function likely will not do well if each
* segment except the last one is not multiple of 512 (raidz).
*/
ASSERT3U(((uint64_t)len & 511ULL), ==, 0);
func_raidz_rec(xaddrs, len, caddrs, mul);
for (i = parity-1; i >= 0; i--) {
abd_iter_unmap(&xiters[i]);
abd_iter_unmap(&citers[i]);
c_tabds[i] =
abd_advance_abd_iter(tabds[i], c_tabds[i],
&xiters[i], len);
c_cabds[i] =
abd_advance_abd_iter(cabds[i], c_cabds[i],
&citers[i], len);
}
tsize -= len;
ASSERT3S(tsize, >=, 0);
}
abd_exit_critical(flags);
}
diff --git a/sys/contrib/openzfs/module/zfs/arc.c b/sys/contrib/openzfs/module/zfs/arc.c
index 02663e8e2e5d..bd64a4b24a2c 100644
--- a/sys/contrib/openzfs/module/zfs/arc.c
+++ b/sys/contrib/openzfs/module/zfs/arc.c
@@ -1,11058 +1,11084 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, Joyent, Inc.
* Copyright (c) 2011, 2020, Delphix. All rights reserved.
* Copyright (c) 2014, Saso Kiselkov. All rights reserved.
* Copyright (c) 2017, Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
* Copyright (c) 2020, George Amanakis. All rights reserved.
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019, Allan Jude
* Copyright (c) 2020, The FreeBSD Foundation [1]
*
* [1] Portions of this software were developed by Allan Jude
* under sponsorship from the FreeBSD Foundation.
*/
/*
* DVA-based Adjustable Replacement Cache
*
* While much of the theory of operation used here is
* based on the self-tuning, low overhead replacement cache
* presented by Megiddo and Modha at FAST 2003, there are some
* significant differences:
*
* 1. The Megiddo and Modha model assumes any page is evictable.
* Pages in its cache cannot be "locked" into memory. This makes
* the eviction algorithm simple: evict the last page in the list.
* This also make the performance characteristics easy to reason
* about. Our cache is not so simple. At any given moment, some
* subset of the blocks in the cache are un-evictable because we
* have handed out a reference to them. Blocks are only evictable
* when there are no external references active. This makes
* eviction far more problematic: we choose to evict the evictable
* blocks that are the "lowest" in the list.
*
* There are times when it is not possible to evict the requested
* space. In these circumstances we are unable to adjust the cache
* size. To prevent the cache growing unbounded at these times we
* implement a "cache throttle" that slows the flow of new data
* into the cache until we can make space available.
*
* 2. The Megiddo and Modha model assumes a fixed cache size.
* Pages are evicted when the cache is full and there is a cache
* miss. Our model has a variable sized cache. It grows with
* high use, but also tries to react to memory pressure from the
* operating system: decreasing its size when system memory is
* tight.
*
* 3. The Megiddo and Modha model assumes a fixed page size. All
* elements of the cache are therefore exactly the same size. So
* when adjusting the cache size following a cache miss, its simply
* a matter of choosing a single page to evict. In our model, we
* have variable sized cache blocks (ranging from 512 bytes to
* 128K bytes). We therefore choose a set of blocks to evict to make
* space for a cache miss that approximates as closely as possible
* the space used by the new block.
*
* See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache"
* by N. Megiddo & D. Modha, FAST 2003
*/
/*
* The locking model:
*
* A new reference to a cache buffer can be obtained in two
* ways: 1) via a hash table lookup using the DVA as a key,
* or 2) via one of the ARC lists. The arc_read() interface
* uses method 1, while the internal ARC algorithms for
* adjusting the cache use method 2. We therefore provide two
* types of locks: 1) the hash table lock array, and 2) the
* ARC list locks.
*
* Buffers do not have their own mutexes, rather they rely on the
* hash table mutexes for the bulk of their protection (i.e. most
* fields in the arc_buf_hdr_t are protected by these mutexes).
*
* buf_hash_find() returns the appropriate mutex (held) when it
* locates the requested buffer in the hash table. It returns
* NULL for the mutex if the buffer was not in the table.
*
* buf_hash_remove() expects the appropriate hash mutex to be
* already held before it is invoked.
*
* Each ARC state also has a mutex which is used to protect the
* buffer list associated with the state. When attempting to
* obtain a hash table lock while holding an ARC list lock you
* must use: mutex_tryenter() to avoid deadlock. Also note that
* the active state mutex must be held before the ghost state mutex.
*
* It as also possible to register a callback which is run when the
* arc_meta_limit is reached and no buffers can be safely evicted. In
* this case the arc user should drop a reference on some arc buffers so
* they can be reclaimed and the arc_meta_limit honored. For example,
* when using the ZPL each dentry holds a references on a znode. These
* dentries must be pruned before the arc buffer holding the znode can
* be safely evicted.
*
* Note that the majority of the performance stats are manipulated
* with atomic operations.
*
* The L2ARC uses the l2ad_mtx on each vdev for the following:
*
* - L2ARC buflist creation
* - L2ARC buflist eviction
* - L2ARC write completion, which walks L2ARC buflists
* - ARC header destruction, as it removes from L2ARC buflists
* - ARC header release, as it removes from L2ARC buflists
*/
/*
* ARC operation:
*
* Every block that is in the ARC is tracked by an arc_buf_hdr_t structure.
* This structure can point either to a block that is still in the cache or to
* one that is only accessible in an L2 ARC device, or it can provide
* information about a block that was recently evicted. If a block is
* only accessible in the L2ARC, then the arc_buf_hdr_t only has enough
* information to retrieve it from the L2ARC device. This information is
* stored in the l2arc_buf_hdr_t sub-structure of the arc_buf_hdr_t. A block
* that is in this state cannot access the data directly.
*
* Blocks that are actively being referenced or have not been evicted
* are cached in the L1ARC. The L1ARC (l1arc_buf_hdr_t) is a structure within
* the arc_buf_hdr_t that will point to the data block in memory. A block can
* only be read by a consumer if it has an l1arc_buf_hdr_t. The L1ARC
* caches data in two ways -- in a list of ARC buffers (arc_buf_t) and
* also in the arc_buf_hdr_t's private physical data block pointer (b_pabd).
*
* The L1ARC's data pointer may or may not be uncompressed. The ARC has the
* ability to store the physical data (b_pabd) associated with the DVA of the
* arc_buf_hdr_t. Since the b_pabd is a copy of the on-disk physical block,
* it will match its on-disk compression characteristics. This behavior can be
* disabled by setting 'zfs_compressed_arc_enabled' to B_FALSE. When the
* compressed ARC functionality is disabled, the b_pabd will point to an
* uncompressed version of the on-disk data.
*
* Data in the L1ARC is not accessed by consumers of the ARC directly. Each
* arc_buf_hdr_t can have multiple ARC buffers (arc_buf_t) which reference it.
* Each ARC buffer (arc_buf_t) is being actively accessed by a specific ARC
* consumer. The ARC will provide references to this data and will keep it
* cached until it is no longer in use. The ARC caches only the L1ARC's physical
* data block and will evict any arc_buf_t that is no longer referenced. The
* amount of memory consumed by the arc_buf_ts' data buffers can be seen via the
* "overhead_size" kstat.
*
* Depending on the consumer, an arc_buf_t can be requested in uncompressed or
* compressed form. The typical case is that consumers will want uncompressed
* data, and when that happens a new data buffer is allocated where the data is
* decompressed for them to use. Currently the only consumer who wants
* compressed arc_buf_t's is "zfs send", when it streams data exactly as it
* exists on disk. When this happens, the arc_buf_t's data buffer is shared
* with the arc_buf_hdr_t.
*
* Here is a diagram showing an arc_buf_hdr_t referenced by two arc_buf_t's. The
* first one is owned by a compressed send consumer (and therefore references
* the same compressed data buffer as the arc_buf_hdr_t) and the second could be
* used by any other consumer (and has its own uncompressed copy of the data
* buffer).
*
* arc_buf_hdr_t
* +-----------+
* | fields |
* | common to |
* | L1- and |
* | L2ARC |
* +-----------+
* | l2arc_buf_hdr_t
* | |
* +-----------+
* | l1arc_buf_hdr_t
* | | arc_buf_t
* | b_buf +------------>+-----------+ arc_buf_t
* | b_pabd +-+ |b_next +---->+-----------+
* +-----------+ | |-----------| |b_next +-->NULL
* | |b_comp = T | +-----------+
* | |b_data +-+ |b_comp = F |
* | +-----------+ | |b_data +-+
* +->+------+ | +-----------+ |
* compressed | | | |
* data | |<--------------+ | uncompressed
* +------+ compressed, | data
* shared +-->+------+
* data | |
* | |
* +------+
*
* When a consumer reads a block, the ARC must first look to see if the
* arc_buf_hdr_t is cached. If the hdr is cached then the ARC allocates a new
* arc_buf_t and either copies uncompressed data into a new data buffer from an
* existing uncompressed arc_buf_t, decompresses the hdr's b_pabd buffer into a
* new data buffer, or shares the hdr's b_pabd buffer, depending on whether the
* hdr is compressed and the desired compression characteristics of the
* arc_buf_t consumer. If the arc_buf_t ends up sharing data with the
* arc_buf_hdr_t and both of them are uncompressed then the arc_buf_t must be
* the last buffer in the hdr's b_buf list, however a shared compressed buf can
* be anywhere in the hdr's list.
*
* The diagram below shows an example of an uncompressed ARC hdr that is
* sharing its data with an arc_buf_t (note that the shared uncompressed buf is
* the last element in the buf list):
*
* arc_buf_hdr_t
* +-----------+
* | |
* | |
* | |
* +-----------+
* l2arc_buf_hdr_t| |
* | |
* +-----------+
* l1arc_buf_hdr_t| |
* | | arc_buf_t (shared)
* | b_buf +------------>+---------+ arc_buf_t
* | | |b_next +---->+---------+
* | b_pabd +-+ |---------| |b_next +-->NULL
* +-----------+ | | | +---------+
* | |b_data +-+ | |
* | +---------+ | |b_data +-+
* +->+------+ | +---------+ |
* | | | |
* uncompressed | | | |
* data +------+ | |
* ^ +->+------+ |
* | uncompressed | | |
* | data | | |
* | +------+ |
* +---------------------------------+
*
* Writing to the ARC requires that the ARC first discard the hdr's b_pabd
* since the physical block is about to be rewritten. The new data contents
* will be contained in the arc_buf_t. As the I/O pipeline performs the write,
* it may compress the data before writing it to disk. The ARC will be called
* with the transformed data and will bcopy the transformed on-disk block into
* a newly allocated b_pabd. Writes are always done into buffers which have
* either been loaned (and hence are new and don't have other readers) or
* buffers which have been released (and hence have their own hdr, if there
* were originally other readers of the buf's original hdr). This ensures that
* the ARC only needs to update a single buf and its hdr after a write occurs.
*
* When the L2ARC is in use, it will also take advantage of the b_pabd. The
* L2ARC will always write the contents of b_pabd to the L2ARC. This means
* that when compressed ARC is enabled that the L2ARC blocks are identical
* to the on-disk block in the main data pool. This provides a significant
* advantage since the ARC can leverage the bp's checksum when reading from the
* L2ARC to determine if the contents are valid. However, if the compressed
* ARC is disabled, then the L2ARC's block must be transformed to look
* like the physical block in the main data pool before comparing the
* checksum and determining its validity.
*
* The L1ARC has a slightly different system for storing encrypted data.
* Raw (encrypted + possibly compressed) data has a few subtle differences from
* data that is just compressed. The biggest difference is that it is not
* possible to decrypt encrypted data (or vice-versa) if the keys aren't loaded.
* The other difference is that encryption cannot be treated as a suggestion.
* If a caller would prefer compressed data, but they actually wind up with
* uncompressed data the worst thing that could happen is there might be a
* performance hit. If the caller requests encrypted data, however, we must be
* sure they actually get it or else secret information could be leaked. Raw
* data is stored in hdr->b_crypt_hdr.b_rabd. An encrypted header, therefore,
* may have both an encrypted version and a decrypted version of its data at
* once. When a caller needs a raw arc_buf_t, it is allocated and the data is
* copied out of this header. To avoid complications with b_pabd, raw buffers
* cannot be shared.
*/
#include <sys/spa.h>
#include <sys/zio.h>
#include <sys/spa_impl.h>
#include <sys/zio_compress.h>
#include <sys/zio_checksum.h>
#include <sys/zfs_context.h>
#include <sys/arc.h>
#include <sys/zfs_refcount.h>
#include <sys/vdev.h>
#include <sys/vdev_impl.h>
#include <sys/dsl_pool.h>
#include <sys/multilist.h>
#include <sys/abd.h>
#include <sys/zil.h>
#include <sys/fm/fs/zfs.h>
#include <sys/callb.h>
#include <sys/kstat.h>
#include <sys/zthr.h>
#include <zfs_fletcher.h>
#include <sys/arc_impl.h>
#include <sys/trace_zfs.h>
#include <sys/aggsum.h>
#include <sys/wmsum.h>
#include <cityhash.h>
#include <sys/vdev_trim.h>
#include <sys/zfs_racct.h>
#include <sys/zstd/zstd.h>
#ifndef _KERNEL
/* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */
boolean_t arc_watch = B_FALSE;
#endif
/*
* This thread's job is to keep enough free memory in the system, by
* calling arc_kmem_reap_soon() plus arc_reduce_target_size(), which improves
* arc_available_memory().
*/
static zthr_t *arc_reap_zthr;
/*
* This thread's job is to keep arc_size under arc_c, by calling
* arc_evict(), which improves arc_is_overflowing().
*/
static zthr_t *arc_evict_zthr;
static kmutex_t arc_evict_lock;
static boolean_t arc_evict_needed = B_FALSE;
/*
* Count of bytes evicted since boot.
*/
static uint64_t arc_evict_count;
/*
* List of arc_evict_waiter_t's, representing threads waiting for the
* arc_evict_count to reach specific values.
*/
static list_t arc_evict_waiters;
/*
* When arc_is_overflowing(), arc_get_data_impl() waits for this percent of
* the requested amount of data to be evicted. For example, by default for
* every 2KB that's evicted, 1KB of it may be "reused" by a new allocation.
* Since this is above 100%, it ensures that progress is made towards getting
* arc_size under arc_c. Since this is finite, it ensures that allocations
* can still happen, even during the potentially long time that arc_size is
* more than arc_c.
*/
int zfs_arc_eviction_pct = 200;
/*
* The number of headers to evict in arc_evict_state_impl() before
* dropping the sublist lock and evicting from another sublist. A lower
* value means we're more likely to evict the "correct" header (i.e. the
* oldest header in the arc state), but comes with higher overhead
* (i.e. more invocations of arc_evict_state_impl()).
*/
int zfs_arc_evict_batch_limit = 10;
/* number of seconds before growing cache again */
int arc_grow_retry = 5;
/*
* Minimum time between calls to arc_kmem_reap_soon().
*/
int arc_kmem_cache_reap_retry_ms = 1000;
/* shift of arc_c for calculating overflow limit in arc_get_data_impl */
int zfs_arc_overflow_shift = 8;
/* shift of arc_c for calculating both min and max arc_p */
int arc_p_min_shift = 4;
/* log2(fraction of arc to reclaim) */
int arc_shrink_shift = 7;
/* percent of pagecache to reclaim arc to */
#ifdef _KERNEL
uint_t zfs_arc_pc_percent = 0;
#endif
/*
* log2(fraction of ARC which must be free to allow growing).
* I.e. If there is less than arc_c >> arc_no_grow_shift free memory,
* when reading a new block into the ARC, we will evict an equal-sized block
* from the ARC.
*
* This must be less than arc_shrink_shift, so that when we shrink the ARC,
* we will still not allow it to grow.
*/
int arc_no_grow_shift = 5;
/*
* minimum lifespan of a prefetch block in clock ticks
* (initialized in arc_init())
*/
static int arc_min_prefetch_ms;
static int arc_min_prescient_prefetch_ms;
/*
* If this percent of memory is free, don't throttle.
*/
int arc_lotsfree_percent = 10;
/*
* The arc has filled available memory and has now warmed up.
*/
boolean_t arc_warm;
/*
* These tunables are for performance analysis.
*/
unsigned long zfs_arc_max = 0;
unsigned long zfs_arc_min = 0;
unsigned long zfs_arc_meta_limit = 0;
unsigned long zfs_arc_meta_min = 0;
unsigned long zfs_arc_dnode_limit = 0;
unsigned long zfs_arc_dnode_reduce_percent = 10;
int zfs_arc_grow_retry = 0;
int zfs_arc_shrink_shift = 0;
int zfs_arc_p_min_shift = 0;
int zfs_arc_average_blocksize = 8 * 1024; /* 8KB */
/*
* ARC dirty data constraints for arc_tempreserve_space() throttle.
*/
unsigned long zfs_arc_dirty_limit_percent = 50; /* total dirty data limit */
unsigned long zfs_arc_anon_limit_percent = 25; /* anon block dirty limit */
unsigned long zfs_arc_pool_dirty_percent = 20; /* each pool's anon allowance */
/*
* Enable or disable compressed arc buffers.
*/
int zfs_compressed_arc_enabled = B_TRUE;
/*
* ARC will evict meta buffers that exceed arc_meta_limit. This
* tunable make arc_meta_limit adjustable for different workloads.
*/
unsigned long zfs_arc_meta_limit_percent = 75;
/*
* Percentage that can be consumed by dnodes of ARC meta buffers.
*/
unsigned long zfs_arc_dnode_limit_percent = 10;
/*
* These tunables are Linux specific
*/
unsigned long zfs_arc_sys_free = 0;
int zfs_arc_min_prefetch_ms = 0;
int zfs_arc_min_prescient_prefetch_ms = 0;
int zfs_arc_p_dampener_disable = 1;
int zfs_arc_meta_prune = 10000;
int zfs_arc_meta_strategy = ARC_STRATEGY_META_BALANCED;
int zfs_arc_meta_adjust_restarts = 4096;
int zfs_arc_lotsfree_percent = 10;
/* The 6 states: */
arc_state_t ARC_anon;
arc_state_t ARC_mru;
arc_state_t ARC_mru_ghost;
arc_state_t ARC_mfu;
arc_state_t ARC_mfu_ghost;
arc_state_t ARC_l2c_only;
arc_stats_t arc_stats = {
{ "hits", KSTAT_DATA_UINT64 },
{ "misses", KSTAT_DATA_UINT64 },
{ "demand_data_hits", KSTAT_DATA_UINT64 },
{ "demand_data_misses", KSTAT_DATA_UINT64 },
{ "demand_metadata_hits", KSTAT_DATA_UINT64 },
{ "demand_metadata_misses", KSTAT_DATA_UINT64 },
{ "prefetch_data_hits", KSTAT_DATA_UINT64 },
{ "prefetch_data_misses", KSTAT_DATA_UINT64 },
{ "prefetch_metadata_hits", KSTAT_DATA_UINT64 },
{ "prefetch_metadata_misses", KSTAT_DATA_UINT64 },
{ "mru_hits", KSTAT_DATA_UINT64 },
{ "mru_ghost_hits", KSTAT_DATA_UINT64 },
{ "mfu_hits", KSTAT_DATA_UINT64 },
{ "mfu_ghost_hits", KSTAT_DATA_UINT64 },
{ "deleted", KSTAT_DATA_UINT64 },
{ "mutex_miss", KSTAT_DATA_UINT64 },
{ "access_skip", KSTAT_DATA_UINT64 },
{ "evict_skip", KSTAT_DATA_UINT64 },
{ "evict_not_enough", KSTAT_DATA_UINT64 },
{ "evict_l2_cached", KSTAT_DATA_UINT64 },
{ "evict_l2_eligible", KSTAT_DATA_UINT64 },
{ "evict_l2_eligible_mfu", KSTAT_DATA_UINT64 },
{ "evict_l2_eligible_mru", KSTAT_DATA_UINT64 },
{ "evict_l2_ineligible", KSTAT_DATA_UINT64 },
{ "evict_l2_skip", KSTAT_DATA_UINT64 },
{ "hash_elements", KSTAT_DATA_UINT64 },
{ "hash_elements_max", KSTAT_DATA_UINT64 },
{ "hash_collisions", KSTAT_DATA_UINT64 },
{ "hash_chains", KSTAT_DATA_UINT64 },
{ "hash_chain_max", KSTAT_DATA_UINT64 },
{ "p", KSTAT_DATA_UINT64 },
{ "c", KSTAT_DATA_UINT64 },
{ "c_min", KSTAT_DATA_UINT64 },
{ "c_max", KSTAT_DATA_UINT64 },
{ "size", KSTAT_DATA_UINT64 },
{ "compressed_size", KSTAT_DATA_UINT64 },
{ "uncompressed_size", KSTAT_DATA_UINT64 },
{ "overhead_size", KSTAT_DATA_UINT64 },
{ "hdr_size", KSTAT_DATA_UINT64 },
{ "data_size", KSTAT_DATA_UINT64 },
{ "metadata_size", KSTAT_DATA_UINT64 },
{ "dbuf_size", KSTAT_DATA_UINT64 },
{ "dnode_size", KSTAT_DATA_UINT64 },
{ "bonus_size", KSTAT_DATA_UINT64 },
#if defined(COMPAT_FREEBSD11)
{ "other_size", KSTAT_DATA_UINT64 },
#endif
{ "anon_size", KSTAT_DATA_UINT64 },
{ "anon_evictable_data", KSTAT_DATA_UINT64 },
{ "anon_evictable_metadata", KSTAT_DATA_UINT64 },
{ "mru_size", KSTAT_DATA_UINT64 },
{ "mru_evictable_data", KSTAT_DATA_UINT64 },
{ "mru_evictable_metadata", KSTAT_DATA_UINT64 },
{ "mru_ghost_size", KSTAT_DATA_UINT64 },
{ "mru_ghost_evictable_data", KSTAT_DATA_UINT64 },
{ "mru_ghost_evictable_metadata", KSTAT_DATA_UINT64 },
{ "mfu_size", KSTAT_DATA_UINT64 },
{ "mfu_evictable_data", KSTAT_DATA_UINT64 },
{ "mfu_evictable_metadata", KSTAT_DATA_UINT64 },
{ "mfu_ghost_size", KSTAT_DATA_UINT64 },
{ "mfu_ghost_evictable_data", KSTAT_DATA_UINT64 },
{ "mfu_ghost_evictable_metadata", KSTAT_DATA_UINT64 },
{ "l2_hits", KSTAT_DATA_UINT64 },
{ "l2_misses", KSTAT_DATA_UINT64 },
{ "l2_prefetch_asize", KSTAT_DATA_UINT64 },
{ "l2_mru_asize", KSTAT_DATA_UINT64 },
{ "l2_mfu_asize", KSTAT_DATA_UINT64 },
{ "l2_bufc_data_asize", KSTAT_DATA_UINT64 },
{ "l2_bufc_metadata_asize", KSTAT_DATA_UINT64 },
{ "l2_feeds", KSTAT_DATA_UINT64 },
{ "l2_rw_clash", KSTAT_DATA_UINT64 },
{ "l2_read_bytes", KSTAT_DATA_UINT64 },
{ "l2_write_bytes", KSTAT_DATA_UINT64 },
{ "l2_writes_sent", KSTAT_DATA_UINT64 },
{ "l2_writes_done", KSTAT_DATA_UINT64 },
{ "l2_writes_error", KSTAT_DATA_UINT64 },
{ "l2_writes_lock_retry", KSTAT_DATA_UINT64 },
{ "l2_evict_lock_retry", KSTAT_DATA_UINT64 },
{ "l2_evict_reading", KSTAT_DATA_UINT64 },
{ "l2_evict_l1cached", KSTAT_DATA_UINT64 },
{ "l2_free_on_write", KSTAT_DATA_UINT64 },
{ "l2_abort_lowmem", KSTAT_DATA_UINT64 },
{ "l2_cksum_bad", KSTAT_DATA_UINT64 },
{ "l2_io_error", KSTAT_DATA_UINT64 },
{ "l2_size", KSTAT_DATA_UINT64 },
{ "l2_asize", KSTAT_DATA_UINT64 },
{ "l2_hdr_size", KSTAT_DATA_UINT64 },
{ "l2_log_blk_writes", KSTAT_DATA_UINT64 },
{ "l2_log_blk_avg_asize", KSTAT_DATA_UINT64 },
{ "l2_log_blk_asize", KSTAT_DATA_UINT64 },
{ "l2_log_blk_count", KSTAT_DATA_UINT64 },
{ "l2_data_to_meta_ratio", KSTAT_DATA_UINT64 },
{ "l2_rebuild_success", KSTAT_DATA_UINT64 },
{ "l2_rebuild_unsupported", KSTAT_DATA_UINT64 },
{ "l2_rebuild_io_errors", KSTAT_DATA_UINT64 },
{ "l2_rebuild_dh_errors", KSTAT_DATA_UINT64 },
{ "l2_rebuild_cksum_lb_errors", KSTAT_DATA_UINT64 },
{ "l2_rebuild_lowmem", KSTAT_DATA_UINT64 },
{ "l2_rebuild_size", KSTAT_DATA_UINT64 },
{ "l2_rebuild_asize", KSTAT_DATA_UINT64 },
{ "l2_rebuild_bufs", KSTAT_DATA_UINT64 },
{ "l2_rebuild_bufs_precached", KSTAT_DATA_UINT64 },
{ "l2_rebuild_log_blks", KSTAT_DATA_UINT64 },
{ "memory_throttle_count", KSTAT_DATA_UINT64 },
{ "memory_direct_count", KSTAT_DATA_UINT64 },
{ "memory_indirect_count", KSTAT_DATA_UINT64 },
{ "memory_all_bytes", KSTAT_DATA_UINT64 },
{ "memory_free_bytes", KSTAT_DATA_UINT64 },
{ "memory_available_bytes", KSTAT_DATA_INT64 },
{ "arc_no_grow", KSTAT_DATA_UINT64 },
{ "arc_tempreserve", KSTAT_DATA_UINT64 },
{ "arc_loaned_bytes", KSTAT_DATA_UINT64 },
{ "arc_prune", KSTAT_DATA_UINT64 },
{ "arc_meta_used", KSTAT_DATA_UINT64 },
{ "arc_meta_limit", KSTAT_DATA_UINT64 },
{ "arc_dnode_limit", KSTAT_DATA_UINT64 },
{ "arc_meta_max", KSTAT_DATA_UINT64 },
{ "arc_meta_min", KSTAT_DATA_UINT64 },
{ "async_upgrade_sync", KSTAT_DATA_UINT64 },
{ "demand_hit_predictive_prefetch", KSTAT_DATA_UINT64 },
{ "demand_hit_prescient_prefetch", KSTAT_DATA_UINT64 },
{ "arc_need_free", KSTAT_DATA_UINT64 },
{ "arc_sys_free", KSTAT_DATA_UINT64 },
{ "arc_raw_size", KSTAT_DATA_UINT64 },
{ "cached_only_in_progress", KSTAT_DATA_UINT64 },
{ "abd_chunk_waste_size", KSTAT_DATA_UINT64 },
};
arc_sums_t arc_sums;
#define ARCSTAT_MAX(stat, val) { \
uint64_t m; \
while ((val) > (m = arc_stats.stat.value.ui64) && \
(m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \
continue; \
}
/*
* We define a macro to allow ARC hits/misses to be easily broken down by
* two separate conditions, giving a total of four different subtypes for
* each of hits and misses (so eight statistics total).
*/
#define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \
if (cond1) { \
if (cond2) { \
ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \
} else { \
ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \
} \
} else { \
if (cond2) { \
ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \
} else { \
ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\
} \
}
/*
* This macro allows us to use kstats as floating averages. Each time we
* update this kstat, we first factor it and the update value by
* ARCSTAT_AVG_FACTOR to shrink the new value's contribution to the overall
* average. This macro assumes that integer loads and stores are atomic, but
* is not safe for multiple writers updating the kstat in parallel (only the
* last writer's update will remain).
*/
#define ARCSTAT_F_AVG_FACTOR 3
#define ARCSTAT_F_AVG(stat, value) \
do { \
uint64_t x = ARCSTAT(stat); \
x = x - x / ARCSTAT_F_AVG_FACTOR + \
(value) / ARCSTAT_F_AVG_FACTOR; \
ARCSTAT(stat) = x; \
- _NOTE(CONSTCOND) \
} while (0)
kstat_t *arc_ksp;
/*
* There are several ARC variables that are critical to export as kstats --
* but we don't want to have to grovel around in the kstat whenever we wish to
* manipulate them. For these variables, we therefore define them to be in
* terms of the statistic variable. This assures that we are not introducing
* the possibility of inconsistency by having shadow copies of the variables,
* while still allowing the code to be readable.
*/
#define arc_tempreserve ARCSTAT(arcstat_tempreserve)
#define arc_loaned_bytes ARCSTAT(arcstat_loaned_bytes)
#define arc_meta_limit ARCSTAT(arcstat_meta_limit) /* max size for metadata */
/* max size for dnodes */
#define arc_dnode_size_limit ARCSTAT(arcstat_dnode_limit)
#define arc_meta_min ARCSTAT(arcstat_meta_min) /* min size for metadata */
#define arc_need_free ARCSTAT(arcstat_need_free) /* waiting to be evicted */
hrtime_t arc_growtime;
list_t arc_prune_list;
kmutex_t arc_prune_mtx;
taskq_t *arc_prune_taskq;
#define GHOST_STATE(state) \
((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \
(state) == arc_l2c_only)
#define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_FLAG_IN_HASH_TABLE)
#define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS)
#define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_FLAG_IO_ERROR)
#define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_FLAG_PREFETCH)
#define HDR_PRESCIENT_PREFETCH(hdr) \
((hdr)->b_flags & ARC_FLAG_PRESCIENT_PREFETCH)
#define HDR_COMPRESSION_ENABLED(hdr) \
((hdr)->b_flags & ARC_FLAG_COMPRESSED_ARC)
#define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_FLAG_L2CACHE)
#define HDR_L2_READING(hdr) \
(((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS) && \
((hdr)->b_flags & ARC_FLAG_HAS_L2HDR))
#define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITING)
#define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_FLAG_L2_EVICTED)
#define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITE_HEAD)
#define HDR_PROTECTED(hdr) ((hdr)->b_flags & ARC_FLAG_PROTECTED)
#define HDR_NOAUTH(hdr) ((hdr)->b_flags & ARC_FLAG_NOAUTH)
#define HDR_SHARED_DATA(hdr) ((hdr)->b_flags & ARC_FLAG_SHARED_DATA)
#define HDR_ISTYPE_METADATA(hdr) \
((hdr)->b_flags & ARC_FLAG_BUFC_METADATA)
#define HDR_ISTYPE_DATA(hdr) (!HDR_ISTYPE_METADATA(hdr))
#define HDR_HAS_L1HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L1HDR)
#define HDR_HAS_L2HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR)
#define HDR_HAS_RABD(hdr) \
(HDR_HAS_L1HDR(hdr) && HDR_PROTECTED(hdr) && \
(hdr)->b_crypt_hdr.b_rabd != NULL)
#define HDR_ENCRYPTED(hdr) \
(HDR_PROTECTED(hdr) && DMU_OT_IS_ENCRYPTED((hdr)->b_crypt_hdr.b_ot))
#define HDR_AUTHENTICATED(hdr) \
(HDR_PROTECTED(hdr) && !DMU_OT_IS_ENCRYPTED((hdr)->b_crypt_hdr.b_ot))
/* For storing compression mode in b_flags */
#define HDR_COMPRESS_OFFSET (highbit64(ARC_FLAG_COMPRESS_0) - 1)
#define HDR_GET_COMPRESS(hdr) ((enum zio_compress)BF32_GET((hdr)->b_flags, \
HDR_COMPRESS_OFFSET, SPA_COMPRESSBITS))
#define HDR_SET_COMPRESS(hdr, cmp) BF32_SET((hdr)->b_flags, \
HDR_COMPRESS_OFFSET, SPA_COMPRESSBITS, (cmp));
#define ARC_BUF_LAST(buf) ((buf)->b_next == NULL)
#define ARC_BUF_SHARED(buf) ((buf)->b_flags & ARC_BUF_FLAG_SHARED)
#define ARC_BUF_COMPRESSED(buf) ((buf)->b_flags & ARC_BUF_FLAG_COMPRESSED)
#define ARC_BUF_ENCRYPTED(buf) ((buf)->b_flags & ARC_BUF_FLAG_ENCRYPTED)
/*
* Other sizes
*/
#define HDR_FULL_CRYPT_SIZE ((int64_t)sizeof (arc_buf_hdr_t))
#define HDR_FULL_SIZE ((int64_t)offsetof(arc_buf_hdr_t, b_crypt_hdr))
#define HDR_L2ONLY_SIZE ((int64_t)offsetof(arc_buf_hdr_t, b_l1hdr))
/*
* Hash table routines
*/
#define BUF_LOCKS 2048
typedef struct buf_hash_table {
uint64_t ht_mask;
arc_buf_hdr_t **ht_table;
kmutex_t ht_locks[BUF_LOCKS] ____cacheline_aligned;
} buf_hash_table_t;
static buf_hash_table_t buf_hash_table;
#define BUF_HASH_INDEX(spa, dva, birth) \
(buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
#define BUF_HASH_LOCK(idx) (&buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
#define HDR_LOCK(hdr) \
(BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth)))
uint64_t zfs_crc64_table[256];
/*
* Level 2 ARC
*/
#define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */
#define L2ARC_HEADROOM 2 /* num of writes */
/*
* If we discover during ARC scan any buffers to be compressed, we boost
* our headroom for the next scanning cycle by this percentage multiple.
*/
#define L2ARC_HEADROOM_BOOST 200
#define L2ARC_FEED_SECS 1 /* caching interval secs */
#define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */
/*
* We can feed L2ARC from two states of ARC buffers, mru and mfu,
* and each of the state has two types: data and metadata.
*/
#define L2ARC_FEED_TYPES 4
/* L2ARC Performance Tunables */
unsigned long l2arc_write_max = L2ARC_WRITE_SIZE; /* def max write size */
unsigned long l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra warmup write */
unsigned long l2arc_headroom = L2ARC_HEADROOM; /* # of dev writes */
unsigned long l2arc_headroom_boost = L2ARC_HEADROOM_BOOST;
unsigned long l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */
unsigned long l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval msecs */
int l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */
int l2arc_feed_again = B_TRUE; /* turbo warmup */
int l2arc_norw = B_FALSE; /* no reads during writes */
int l2arc_meta_percent = 33; /* limit on headers size */
/*
* L2ARC Internals
*/
static list_t L2ARC_dev_list; /* device list */
static list_t *l2arc_dev_list; /* device list pointer */
static kmutex_t l2arc_dev_mtx; /* device list mutex */
static l2arc_dev_t *l2arc_dev_last; /* last device used */
static list_t L2ARC_free_on_write; /* free after write buf list */
static list_t *l2arc_free_on_write; /* free after write list ptr */
static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */
static uint64_t l2arc_ndev; /* number of devices */
typedef struct l2arc_read_callback {
arc_buf_hdr_t *l2rcb_hdr; /* read header */
blkptr_t l2rcb_bp; /* original blkptr */
zbookmark_phys_t l2rcb_zb; /* original bookmark */
int l2rcb_flags; /* original flags */
abd_t *l2rcb_abd; /* temporary buffer */
} l2arc_read_callback_t;
typedef struct l2arc_data_free {
/* protected by l2arc_free_on_write_mtx */
abd_t *l2df_abd;
size_t l2df_size;
arc_buf_contents_t l2df_type;
list_node_t l2df_list_node;
} l2arc_data_free_t;
typedef enum arc_fill_flags {
ARC_FILL_LOCKED = 1 << 0, /* hdr lock is held */
ARC_FILL_COMPRESSED = 1 << 1, /* fill with compressed data */
ARC_FILL_ENCRYPTED = 1 << 2, /* fill with encrypted data */
ARC_FILL_NOAUTH = 1 << 3, /* don't attempt to authenticate */
ARC_FILL_IN_PLACE = 1 << 4 /* fill in place (special case) */
} arc_fill_flags_t;
typedef enum arc_ovf_level {
ARC_OVF_NONE, /* ARC within target size. */
ARC_OVF_SOME, /* ARC is slightly overflowed. */
ARC_OVF_SEVERE /* ARC is severely overflowed. */
} arc_ovf_level_t;
static kmutex_t l2arc_feed_thr_lock;
static kcondvar_t l2arc_feed_thr_cv;
static uint8_t l2arc_thread_exit;
static kmutex_t l2arc_rebuild_thr_lock;
static kcondvar_t l2arc_rebuild_thr_cv;
enum arc_hdr_alloc_flags {
ARC_HDR_ALLOC_RDATA = 0x1,
ARC_HDR_DO_ADAPT = 0x2,
};
static abd_t *arc_get_data_abd(arc_buf_hdr_t *, uint64_t, void *, boolean_t);
static void *arc_get_data_buf(arc_buf_hdr_t *, uint64_t, void *);
static void arc_get_data_impl(arc_buf_hdr_t *, uint64_t, void *, boolean_t);
static void arc_free_data_abd(arc_buf_hdr_t *, abd_t *, uint64_t, void *);
static void arc_free_data_buf(arc_buf_hdr_t *, void *, uint64_t, void *);
static void arc_free_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag);
static void arc_hdr_free_abd(arc_buf_hdr_t *, boolean_t);
static void arc_hdr_alloc_abd(arc_buf_hdr_t *, int);
static void arc_access(arc_buf_hdr_t *, kmutex_t *);
static void arc_buf_watch(arc_buf_t *);
static arc_buf_contents_t arc_buf_type(arc_buf_hdr_t *);
static uint32_t arc_bufc_to_flags(arc_buf_contents_t);
static inline void arc_hdr_set_flags(arc_buf_hdr_t *hdr, arc_flags_t flags);
static inline void arc_hdr_clear_flags(arc_buf_hdr_t *hdr, arc_flags_t flags);
static boolean_t l2arc_write_eligible(uint64_t, arc_buf_hdr_t *);
static void l2arc_read_done(zio_t *);
static void l2arc_do_free_on_write(void);
static void l2arc_hdr_arcstats_update(arc_buf_hdr_t *hdr, boolean_t incr,
boolean_t state_only);
#define l2arc_hdr_arcstats_increment(hdr) \
l2arc_hdr_arcstats_update((hdr), B_TRUE, B_FALSE)
#define l2arc_hdr_arcstats_decrement(hdr) \
l2arc_hdr_arcstats_update((hdr), B_FALSE, B_FALSE)
#define l2arc_hdr_arcstats_increment_state(hdr) \
l2arc_hdr_arcstats_update((hdr), B_TRUE, B_TRUE)
#define l2arc_hdr_arcstats_decrement_state(hdr) \
l2arc_hdr_arcstats_update((hdr), B_FALSE, B_TRUE)
/*
* l2arc_mfuonly : A ZFS module parameter that controls whether only MFU
* metadata and data are cached from ARC into L2ARC.
*/
int l2arc_mfuonly = 0;
/*
* L2ARC TRIM
* l2arc_trim_ahead : A ZFS module parameter that controls how much ahead of
* the current write size (l2arc_write_max) we should TRIM if we
* have filled the device. It is defined as a percentage of the
* write size. If set to 100 we trim twice the space required to
* accommodate upcoming writes. A minimum of 64MB will be trimmed.
* It also enables TRIM of the whole L2ARC device upon creation or
* addition to an existing pool or if the header of the device is
* invalid upon importing a pool or onlining a cache device. The
* default is 0, which disables TRIM on L2ARC altogether as it can
* put significant stress on the underlying storage devices. This
* will vary depending of how well the specific device handles
* these commands.
*/
unsigned long l2arc_trim_ahead = 0;
/*
* Performance tuning of L2ARC persistence:
*
* l2arc_rebuild_enabled : A ZFS module parameter that controls whether adding
* an L2ARC device (either at pool import or later) will attempt
* to rebuild L2ARC buffer contents.
* l2arc_rebuild_blocks_min_l2size : A ZFS module parameter that controls
* whether log blocks are written to the L2ARC device. If the L2ARC
* device is less than 1GB, the amount of data l2arc_evict()
* evicts is significant compared to the amount of restored L2ARC
* data. In this case do not write log blocks in L2ARC in order
* not to waste space.
*/
int l2arc_rebuild_enabled = B_TRUE;
unsigned long l2arc_rebuild_blocks_min_l2size = 1024 * 1024 * 1024;
/* L2ARC persistence rebuild control routines. */
void l2arc_rebuild_vdev(vdev_t *vd, boolean_t reopen);
static void l2arc_dev_rebuild_thread(void *arg);
static int l2arc_rebuild(l2arc_dev_t *dev);
/* L2ARC persistence read I/O routines. */
static int l2arc_dev_hdr_read(l2arc_dev_t *dev);
static int l2arc_log_blk_read(l2arc_dev_t *dev,
const l2arc_log_blkptr_t *this_lp, const l2arc_log_blkptr_t *next_lp,
l2arc_log_blk_phys_t *this_lb, l2arc_log_blk_phys_t *next_lb,
zio_t *this_io, zio_t **next_io);
static zio_t *l2arc_log_blk_fetch(vdev_t *vd,
const l2arc_log_blkptr_t *lp, l2arc_log_blk_phys_t *lb);
static void l2arc_log_blk_fetch_abort(zio_t *zio);
/* L2ARC persistence block restoration routines. */
static void l2arc_log_blk_restore(l2arc_dev_t *dev,
const l2arc_log_blk_phys_t *lb, uint64_t lb_asize);
static void l2arc_hdr_restore(const l2arc_log_ent_phys_t *le,
l2arc_dev_t *dev);
/* L2ARC persistence write I/O routines. */
static void l2arc_log_blk_commit(l2arc_dev_t *dev, zio_t *pio,
l2arc_write_callback_t *cb);
/* L2ARC persistence auxiliary routines. */
boolean_t l2arc_log_blkptr_valid(l2arc_dev_t *dev,
const l2arc_log_blkptr_t *lbp);
static boolean_t l2arc_log_blk_insert(l2arc_dev_t *dev,
const arc_buf_hdr_t *ab);
boolean_t l2arc_range_check_overlap(uint64_t bottom,
uint64_t top, uint64_t check);
static void l2arc_blk_fetch_done(zio_t *zio);
static inline uint64_t
l2arc_log_blk_overhead(uint64_t write_sz, l2arc_dev_t *dev);
/*
* We use Cityhash for this. It's fast, and has good hash properties without
* requiring any large static buffers.
*/
static uint64_t
buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth)
{
return (cityhash4(spa, dva->dva_word[0], dva->dva_word[1], birth));
}
#define HDR_EMPTY(hdr) \
((hdr)->b_dva.dva_word[0] == 0 && \
(hdr)->b_dva.dva_word[1] == 0)
#define HDR_EMPTY_OR_LOCKED(hdr) \
(HDR_EMPTY(hdr) || MUTEX_HELD(HDR_LOCK(hdr)))
#define HDR_EQUAL(spa, dva, birth, hdr) \
((hdr)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \
((hdr)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \
((hdr)->b_birth == birth) && ((hdr)->b_spa == spa)
static void
buf_discard_identity(arc_buf_hdr_t *hdr)
{
hdr->b_dva.dva_word[0] = 0;
hdr->b_dva.dva_word[1] = 0;
hdr->b_birth = 0;
}
static arc_buf_hdr_t *
buf_hash_find(uint64_t spa, const blkptr_t *bp, kmutex_t **lockp)
{
const dva_t *dva = BP_IDENTITY(bp);
uint64_t birth = BP_PHYSICAL_BIRTH(bp);
uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
arc_buf_hdr_t *hdr;
mutex_enter(hash_lock);
for (hdr = buf_hash_table.ht_table[idx]; hdr != NULL;
hdr = hdr->b_hash_next) {
if (HDR_EQUAL(spa, dva, birth, hdr)) {
*lockp = hash_lock;
return (hdr);
}
}
mutex_exit(hash_lock);
*lockp = NULL;
return (NULL);
}
/*
* Insert an entry into the hash table. If there is already an element
* equal to elem in the hash table, then the already existing element
* will be returned and the new element will not be inserted.
* Otherwise returns NULL.
* If lockp == NULL, the caller is assumed to already hold the hash lock.
*/
static arc_buf_hdr_t *
buf_hash_insert(arc_buf_hdr_t *hdr, kmutex_t **lockp)
{
uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth);
kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
arc_buf_hdr_t *fhdr;
uint32_t i;
ASSERT(!DVA_IS_EMPTY(&hdr->b_dva));
ASSERT(hdr->b_birth != 0);
ASSERT(!HDR_IN_HASH_TABLE(hdr));
if (lockp != NULL) {
*lockp = hash_lock;
mutex_enter(hash_lock);
} else {
ASSERT(MUTEX_HELD(hash_lock));
}
for (fhdr = buf_hash_table.ht_table[idx], i = 0; fhdr != NULL;
fhdr = fhdr->b_hash_next, i++) {
if (HDR_EQUAL(hdr->b_spa, &hdr->b_dva, hdr->b_birth, fhdr))
return (fhdr);
}
hdr->b_hash_next = buf_hash_table.ht_table[idx];
buf_hash_table.ht_table[idx] = hdr;
arc_hdr_set_flags(hdr, ARC_FLAG_IN_HASH_TABLE);
/* collect some hash table performance data */
if (i > 0) {
ARCSTAT_BUMP(arcstat_hash_collisions);
if (i == 1)
ARCSTAT_BUMP(arcstat_hash_chains);
ARCSTAT_MAX(arcstat_hash_chain_max, i);
}
uint64_t he = atomic_inc_64_nv(
&arc_stats.arcstat_hash_elements.value.ui64);
ARCSTAT_MAX(arcstat_hash_elements_max, he);
return (NULL);
}
static void
buf_hash_remove(arc_buf_hdr_t *hdr)
{
arc_buf_hdr_t *fhdr, **hdrp;
uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth);
ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
ASSERT(HDR_IN_HASH_TABLE(hdr));
hdrp = &buf_hash_table.ht_table[idx];
while ((fhdr = *hdrp) != hdr) {
ASSERT3P(fhdr, !=, NULL);
hdrp = &fhdr->b_hash_next;
}
*hdrp = hdr->b_hash_next;
hdr->b_hash_next = NULL;
arc_hdr_clear_flags(hdr, ARC_FLAG_IN_HASH_TABLE);
/* collect some hash table performance data */
atomic_dec_64(&arc_stats.arcstat_hash_elements.value.ui64);
if (buf_hash_table.ht_table[idx] &&
buf_hash_table.ht_table[idx]->b_hash_next == NULL)
ARCSTAT_BUMPDOWN(arcstat_hash_chains);
}
/*
* Global data structures and functions for the buf kmem cache.
*/
static kmem_cache_t *hdr_full_cache;
static kmem_cache_t *hdr_full_crypt_cache;
static kmem_cache_t *hdr_l2only_cache;
static kmem_cache_t *buf_cache;
static void
buf_fini(void)
{
int i;
#if defined(_KERNEL)
/*
* Large allocations which do not require contiguous pages
* should be using vmem_free() in the linux kernel\
*/
vmem_free(buf_hash_table.ht_table,
(buf_hash_table.ht_mask + 1) * sizeof (void *));
#else
kmem_free(buf_hash_table.ht_table,
(buf_hash_table.ht_mask + 1) * sizeof (void *));
#endif
for (i = 0; i < BUF_LOCKS; i++)
mutex_destroy(BUF_HASH_LOCK(i));
kmem_cache_destroy(hdr_full_cache);
kmem_cache_destroy(hdr_full_crypt_cache);
kmem_cache_destroy(hdr_l2only_cache);
kmem_cache_destroy(buf_cache);
}
/*
* Constructor callback - called when the cache is empty
* and a new buf is requested.
*/
/* ARGSUSED */
static int
hdr_full_cons(void *vbuf, void *unused, int kmflag)
{
arc_buf_hdr_t *hdr = vbuf;
bzero(hdr, HDR_FULL_SIZE);
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
cv_init(&hdr->b_l1hdr.b_cv, NULL, CV_DEFAULT, NULL);
zfs_refcount_create(&hdr->b_l1hdr.b_refcnt);
mutex_init(&hdr->b_l1hdr.b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
list_link_init(&hdr->b_l1hdr.b_arc_node);
list_link_init(&hdr->b_l2hdr.b_l2node);
multilist_link_init(&hdr->b_l1hdr.b_arc_node);
arc_space_consume(HDR_FULL_SIZE, ARC_SPACE_HDRS);
return (0);
}
/* ARGSUSED */
static int
hdr_full_crypt_cons(void *vbuf, void *unused, int kmflag)
{
arc_buf_hdr_t *hdr = vbuf;
hdr_full_cons(vbuf, unused, kmflag);
bzero(&hdr->b_crypt_hdr, sizeof (hdr->b_crypt_hdr));
arc_space_consume(sizeof (hdr->b_crypt_hdr), ARC_SPACE_HDRS);
return (0);
}
/* ARGSUSED */
static int
hdr_l2only_cons(void *vbuf, void *unused, int kmflag)
{
arc_buf_hdr_t *hdr = vbuf;
bzero(hdr, HDR_L2ONLY_SIZE);
arc_space_consume(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS);
return (0);
}
/* ARGSUSED */
static int
buf_cons(void *vbuf, void *unused, int kmflag)
{
arc_buf_t *buf = vbuf;
bzero(buf, sizeof (arc_buf_t));
mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL);
arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS);
return (0);
}
/*
* Destructor callback - called when a cached buf is
* no longer required.
*/
/* ARGSUSED */
static void
hdr_full_dest(void *vbuf, void *unused)
{
arc_buf_hdr_t *hdr = vbuf;
ASSERT(HDR_EMPTY(hdr));
cv_destroy(&hdr->b_l1hdr.b_cv);
zfs_refcount_destroy(&hdr->b_l1hdr.b_refcnt);
mutex_destroy(&hdr->b_l1hdr.b_freeze_lock);
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
arc_space_return(HDR_FULL_SIZE, ARC_SPACE_HDRS);
}
/* ARGSUSED */
static void
hdr_full_crypt_dest(void *vbuf, void *unused)
{
arc_buf_hdr_t *hdr = vbuf;
hdr_full_dest(vbuf, unused);
arc_space_return(sizeof (hdr->b_crypt_hdr), ARC_SPACE_HDRS);
}
/* ARGSUSED */
static void
hdr_l2only_dest(void *vbuf, void *unused)
{
arc_buf_hdr_t *hdr __maybe_unused = vbuf;
ASSERT(HDR_EMPTY(hdr));
arc_space_return(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS);
}
/* ARGSUSED */
static void
buf_dest(void *vbuf, void *unused)
{
arc_buf_t *buf = vbuf;
mutex_destroy(&buf->b_evict_lock);
arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS);
}
static void
buf_init(void)
{
uint64_t *ct = NULL;
uint64_t hsize = 1ULL << 12;
int i, j;
/*
* The hash table is big enough to fill all of physical memory
* with an average block size of zfs_arc_average_blocksize (default 8K).
* By default, the table will take up
* totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
*/
while (hsize * zfs_arc_average_blocksize < arc_all_memory())
hsize <<= 1;
retry:
buf_hash_table.ht_mask = hsize - 1;
#if defined(_KERNEL)
/*
* Large allocations which do not require contiguous pages
* should be using vmem_alloc() in the linux kernel
*/
buf_hash_table.ht_table =
vmem_zalloc(hsize * sizeof (void*), KM_SLEEP);
#else
buf_hash_table.ht_table =
kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP);
#endif
if (buf_hash_table.ht_table == NULL) {
ASSERT(hsize > (1ULL << 8));
hsize >>= 1;
goto retry;
}
hdr_full_cache = kmem_cache_create("arc_buf_hdr_t_full", HDR_FULL_SIZE,
0, hdr_full_cons, hdr_full_dest, NULL, NULL, NULL, 0);
hdr_full_crypt_cache = kmem_cache_create("arc_buf_hdr_t_full_crypt",
HDR_FULL_CRYPT_SIZE, 0, hdr_full_crypt_cons, hdr_full_crypt_dest,
NULL, NULL, NULL, 0);
hdr_l2only_cache = kmem_cache_create("arc_buf_hdr_t_l2only",
HDR_L2ONLY_SIZE, 0, hdr_l2only_cons, hdr_l2only_dest, NULL,
NULL, NULL, 0);
buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
0, buf_cons, buf_dest, NULL, NULL, NULL, 0);
for (i = 0; i < 256; i++)
for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
*ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
for (i = 0; i < BUF_LOCKS; i++)
mutex_init(BUF_HASH_LOCK(i), NULL, MUTEX_DEFAULT, NULL);
}
#define ARC_MINTIME (hz>>4) /* 62 ms */
/*
* This is the size that the buf occupies in memory. If the buf is compressed,
* it will correspond to the compressed size. You should use this method of
* getting the buf size unless you explicitly need the logical size.
*/
uint64_t
arc_buf_size(arc_buf_t *buf)
{
return (ARC_BUF_COMPRESSED(buf) ?
HDR_GET_PSIZE(buf->b_hdr) : HDR_GET_LSIZE(buf->b_hdr));
}
uint64_t
arc_buf_lsize(arc_buf_t *buf)
{
return (HDR_GET_LSIZE(buf->b_hdr));
}
/*
* This function will return B_TRUE if the buffer is encrypted in memory.
* This buffer can be decrypted by calling arc_untransform().
*/
boolean_t
arc_is_encrypted(arc_buf_t *buf)
{
return (ARC_BUF_ENCRYPTED(buf) != 0);
}
/*
* Returns B_TRUE if the buffer represents data that has not had its MAC
* verified yet.
*/
boolean_t
arc_is_unauthenticated(arc_buf_t *buf)
{
return (HDR_NOAUTH(buf->b_hdr) != 0);
}
void
arc_get_raw_params(arc_buf_t *buf, boolean_t *byteorder, uint8_t *salt,
uint8_t *iv, uint8_t *mac)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT(HDR_PROTECTED(hdr));
bcopy(hdr->b_crypt_hdr.b_salt, salt, ZIO_DATA_SALT_LEN);
bcopy(hdr->b_crypt_hdr.b_iv, iv, ZIO_DATA_IV_LEN);
bcopy(hdr->b_crypt_hdr.b_mac, mac, ZIO_DATA_MAC_LEN);
*byteorder = (hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS) ?
ZFS_HOST_BYTEORDER : !ZFS_HOST_BYTEORDER;
}
/*
* Indicates how this buffer is compressed in memory. If it is not compressed
* the value will be ZIO_COMPRESS_OFF. It can be made normally readable with
* arc_untransform() as long as it is also unencrypted.
*/
enum zio_compress
arc_get_compression(arc_buf_t *buf)
{
return (ARC_BUF_COMPRESSED(buf) ?
HDR_GET_COMPRESS(buf->b_hdr) : ZIO_COMPRESS_OFF);
}
/*
* Return the compression algorithm used to store this data in the ARC. If ARC
* compression is enabled or this is an encrypted block, this will be the same
* as what's used to store it on-disk. Otherwise, this will be ZIO_COMPRESS_OFF.
*/
static inline enum zio_compress
arc_hdr_get_compress(arc_buf_hdr_t *hdr)
{
return (HDR_COMPRESSION_ENABLED(hdr) ?
HDR_GET_COMPRESS(hdr) : ZIO_COMPRESS_OFF);
}
uint8_t
arc_get_complevel(arc_buf_t *buf)
{
return (buf->b_hdr->b_complevel);
}
static inline boolean_t
arc_buf_is_shared(arc_buf_t *buf)
{
boolean_t shared = (buf->b_data != NULL &&
buf->b_hdr->b_l1hdr.b_pabd != NULL &&
abd_is_linear(buf->b_hdr->b_l1hdr.b_pabd) &&
buf->b_data == abd_to_buf(buf->b_hdr->b_l1hdr.b_pabd));
IMPLY(shared, HDR_SHARED_DATA(buf->b_hdr));
IMPLY(shared, ARC_BUF_SHARED(buf));
IMPLY(shared, ARC_BUF_COMPRESSED(buf) || ARC_BUF_LAST(buf));
/*
* It would be nice to assert arc_can_share() too, but the "hdr isn't
* already being shared" requirement prevents us from doing that.
*/
return (shared);
}
/*
* Free the checksum associated with this header. If there is no checksum, this
* is a no-op.
*/
static inline void
arc_cksum_free(arc_buf_hdr_t *hdr)
{
ASSERT(HDR_HAS_L1HDR(hdr));
mutex_enter(&hdr->b_l1hdr.b_freeze_lock);
if (hdr->b_l1hdr.b_freeze_cksum != NULL) {
kmem_free(hdr->b_l1hdr.b_freeze_cksum, sizeof (zio_cksum_t));
hdr->b_l1hdr.b_freeze_cksum = NULL;
}
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
}
/*
* Return true iff at least one of the bufs on hdr is not compressed.
* Encrypted buffers count as compressed.
*/
static boolean_t
arc_hdr_has_uncompressed_buf(arc_buf_hdr_t *hdr)
{
ASSERT(hdr->b_l1hdr.b_state == arc_anon || HDR_EMPTY_OR_LOCKED(hdr));
for (arc_buf_t *b = hdr->b_l1hdr.b_buf; b != NULL; b = b->b_next) {
if (!ARC_BUF_COMPRESSED(b)) {
return (B_TRUE);
}
}
return (B_FALSE);
}
/*
* If we've turned on the ZFS_DEBUG_MODIFY flag, verify that the buf's data
* matches the checksum that is stored in the hdr. If there is no checksum,
* or if the buf is compressed, this is a no-op.
*/
static void
arc_cksum_verify(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
zio_cksum_t zc;
if (!(zfs_flags & ZFS_DEBUG_MODIFY))
return;
if (ARC_BUF_COMPRESSED(buf))
return;
ASSERT(HDR_HAS_L1HDR(hdr));
mutex_enter(&hdr->b_l1hdr.b_freeze_lock);
if (hdr->b_l1hdr.b_freeze_cksum == NULL || HDR_IO_ERROR(hdr)) {
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
return;
}
fletcher_2_native(buf->b_data, arc_buf_size(buf), NULL, &zc);
if (!ZIO_CHECKSUM_EQUAL(*hdr->b_l1hdr.b_freeze_cksum, zc))
panic("buffer modified while frozen!");
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
}
/*
* This function makes the assumption that data stored in the L2ARC
* will be transformed exactly as it is in the main pool. Because of
* this we can verify the checksum against the reading process's bp.
*/
static boolean_t
arc_cksum_is_equal(arc_buf_hdr_t *hdr, zio_t *zio)
{
ASSERT(!BP_IS_EMBEDDED(zio->io_bp));
VERIFY3U(BP_GET_PSIZE(zio->io_bp), ==, HDR_GET_PSIZE(hdr));
/*
* Block pointers always store the checksum for the logical data.
* If the block pointer has the gang bit set, then the checksum
* it represents is for the reconstituted data and not for an
* individual gang member. The zio pipeline, however, must be able to
* determine the checksum of each of the gang constituents so it
* treats the checksum comparison differently than what we need
* for l2arc blocks. This prevents us from using the
* zio_checksum_error() interface directly. Instead we must call the
* zio_checksum_error_impl() so that we can ensure the checksum is
* generated using the correct checksum algorithm and accounts for the
* logical I/O size and not just a gang fragment.
*/
return (zio_checksum_error_impl(zio->io_spa, zio->io_bp,
BP_GET_CHECKSUM(zio->io_bp), zio->io_abd, zio->io_size,
zio->io_offset, NULL) == 0);
}
/*
* Given a buf full of data, if ZFS_DEBUG_MODIFY is enabled this computes a
* checksum and attaches it to the buf's hdr so that we can ensure that the buf
* isn't modified later on. If buf is compressed or there is already a checksum
* on the hdr, this is a no-op (we only checksum uncompressed bufs).
*/
static void
arc_cksum_compute(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
if (!(zfs_flags & ZFS_DEBUG_MODIFY))
return;
ASSERT(HDR_HAS_L1HDR(hdr));
mutex_enter(&buf->b_hdr->b_l1hdr.b_freeze_lock);
if (hdr->b_l1hdr.b_freeze_cksum != NULL || ARC_BUF_COMPRESSED(buf)) {
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
return;
}
ASSERT(!ARC_BUF_ENCRYPTED(buf));
ASSERT(!ARC_BUF_COMPRESSED(buf));
hdr->b_l1hdr.b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t),
KM_SLEEP);
fletcher_2_native(buf->b_data, arc_buf_size(buf), NULL,
hdr->b_l1hdr.b_freeze_cksum);
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
arc_buf_watch(buf);
}
#ifndef _KERNEL
void
arc_buf_sigsegv(int sig, siginfo_t *si, void *unused)
{
panic("Got SIGSEGV at address: 0x%lx\n", (long)si->si_addr);
}
#endif
/* ARGSUSED */
static void
arc_buf_unwatch(arc_buf_t *buf)
{
#ifndef _KERNEL
if (arc_watch) {
ASSERT0(mprotect(buf->b_data, arc_buf_size(buf),
PROT_READ | PROT_WRITE));
}
#endif
}
/* ARGSUSED */
static void
arc_buf_watch(arc_buf_t *buf)
{
#ifndef _KERNEL
if (arc_watch)
ASSERT0(mprotect(buf->b_data, arc_buf_size(buf),
PROT_READ));
#endif
}
static arc_buf_contents_t
arc_buf_type(arc_buf_hdr_t *hdr)
{
arc_buf_contents_t type;
if (HDR_ISTYPE_METADATA(hdr)) {
type = ARC_BUFC_METADATA;
} else {
type = ARC_BUFC_DATA;
}
VERIFY3U(hdr->b_type, ==, type);
return (type);
}
boolean_t
arc_is_metadata(arc_buf_t *buf)
{
return (HDR_ISTYPE_METADATA(buf->b_hdr) != 0);
}
static uint32_t
arc_bufc_to_flags(arc_buf_contents_t type)
{
switch (type) {
case ARC_BUFC_DATA:
/* metadata field is 0 if buffer contains normal data */
return (0);
case ARC_BUFC_METADATA:
return (ARC_FLAG_BUFC_METADATA);
default:
break;
}
panic("undefined ARC buffer type!");
return ((uint32_t)-1);
}
void
arc_buf_thaw(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
arc_cksum_verify(buf);
/*
* Compressed buffers do not manipulate the b_freeze_cksum.
*/
if (ARC_BUF_COMPRESSED(buf))
return;
ASSERT(HDR_HAS_L1HDR(hdr));
arc_cksum_free(hdr);
arc_buf_unwatch(buf);
}
void
arc_buf_freeze(arc_buf_t *buf)
{
if (!(zfs_flags & ZFS_DEBUG_MODIFY))
return;
if (ARC_BUF_COMPRESSED(buf))
return;
ASSERT(HDR_HAS_L1HDR(buf->b_hdr));
arc_cksum_compute(buf);
}
/*
* The arc_buf_hdr_t's b_flags should never be modified directly. Instead,
* the following functions should be used to ensure that the flags are
* updated in a thread-safe way. When manipulating the flags either
* the hash_lock must be held or the hdr must be undiscoverable. This
* ensures that we're not racing with any other threads when updating
* the flags.
*/
static inline void
arc_hdr_set_flags(arc_buf_hdr_t *hdr, arc_flags_t flags)
{
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
hdr->b_flags |= flags;
}
static inline void
arc_hdr_clear_flags(arc_buf_hdr_t *hdr, arc_flags_t flags)
{
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
hdr->b_flags &= ~flags;
}
/*
* Setting the compression bits in the arc_buf_hdr_t's b_flags is
* done in a special way since we have to clear and set bits
* at the same time. Consumers that wish to set the compression bits
* must use this function to ensure that the flags are updated in
* thread-safe manner.
*/
static void
arc_hdr_set_compress(arc_buf_hdr_t *hdr, enum zio_compress cmp)
{
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
/*
* Holes and embedded blocks will always have a psize = 0 so
* we ignore the compression of the blkptr and set the
* want to uncompress them. Mark them as uncompressed.
*/
if (!zfs_compressed_arc_enabled || HDR_GET_PSIZE(hdr) == 0) {
arc_hdr_clear_flags(hdr, ARC_FLAG_COMPRESSED_ARC);
ASSERT(!HDR_COMPRESSION_ENABLED(hdr));
} else {
arc_hdr_set_flags(hdr, ARC_FLAG_COMPRESSED_ARC);
ASSERT(HDR_COMPRESSION_ENABLED(hdr));
}
HDR_SET_COMPRESS(hdr, cmp);
ASSERT3U(HDR_GET_COMPRESS(hdr), ==, cmp);
}
/*
* Looks for another buf on the same hdr which has the data decompressed, copies
* from it, and returns true. If no such buf exists, returns false.
*/
static boolean_t
arc_buf_try_copy_decompressed_data(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
boolean_t copied = B_FALSE;
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT3P(buf->b_data, !=, NULL);
ASSERT(!ARC_BUF_COMPRESSED(buf));
for (arc_buf_t *from = hdr->b_l1hdr.b_buf; from != NULL;
from = from->b_next) {
/* can't use our own data buffer */
if (from == buf) {
continue;
}
if (!ARC_BUF_COMPRESSED(from)) {
bcopy(from->b_data, buf->b_data, arc_buf_size(buf));
copied = B_TRUE;
break;
}
}
/*
* There were no decompressed bufs, so there should not be a
* checksum on the hdr either.
*/
if (zfs_flags & ZFS_DEBUG_MODIFY)
EQUIV(!copied, hdr->b_l1hdr.b_freeze_cksum == NULL);
return (copied);
}
/*
* Allocates an ARC buf header that's in an evicted & L2-cached state.
* This is used during l2arc reconstruction to make empty ARC buffers
* which circumvent the regular disk->arc->l2arc path and instead come
* into being in the reverse order, i.e. l2arc->arc.
*/
static arc_buf_hdr_t *
arc_buf_alloc_l2only(size_t size, arc_buf_contents_t type, l2arc_dev_t *dev,
dva_t dva, uint64_t daddr, int32_t psize, uint64_t birth,
enum zio_compress compress, uint8_t complevel, boolean_t protected,
boolean_t prefetch, arc_state_type_t arcs_state)
{
arc_buf_hdr_t *hdr;
ASSERT(size != 0);
hdr = kmem_cache_alloc(hdr_l2only_cache, KM_SLEEP);
hdr->b_birth = birth;
hdr->b_type = type;
hdr->b_flags = 0;
arc_hdr_set_flags(hdr, arc_bufc_to_flags(type) | ARC_FLAG_HAS_L2HDR);
HDR_SET_LSIZE(hdr, size);
HDR_SET_PSIZE(hdr, psize);
arc_hdr_set_compress(hdr, compress);
hdr->b_complevel = complevel;
if (protected)
arc_hdr_set_flags(hdr, ARC_FLAG_PROTECTED);
if (prefetch)
arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH);
hdr->b_spa = spa_load_guid(dev->l2ad_vdev->vdev_spa);
hdr->b_dva = dva;
hdr->b_l2hdr.b_dev = dev;
hdr->b_l2hdr.b_daddr = daddr;
hdr->b_l2hdr.b_arcs_state = arcs_state;
return (hdr);
}
/*
* Return the size of the block, b_pabd, that is stored in the arc_buf_hdr_t.
*/
static uint64_t
arc_hdr_size(arc_buf_hdr_t *hdr)
{
uint64_t size;
if (arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF &&
HDR_GET_PSIZE(hdr) > 0) {
size = HDR_GET_PSIZE(hdr);
} else {
ASSERT3U(HDR_GET_LSIZE(hdr), !=, 0);
size = HDR_GET_LSIZE(hdr);
}
return (size);
}
static int
arc_hdr_authenticate(arc_buf_hdr_t *hdr, spa_t *spa, uint64_t dsobj)
{
int ret;
uint64_t csize;
uint64_t lsize = HDR_GET_LSIZE(hdr);
uint64_t psize = HDR_GET_PSIZE(hdr);
void *tmpbuf = NULL;
abd_t *abd = hdr->b_l1hdr.b_pabd;
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
ASSERT(HDR_AUTHENTICATED(hdr));
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
/*
* The MAC is calculated on the compressed data that is stored on disk.
* However, if compressed arc is disabled we will only have the
* decompressed data available to us now. Compress it into a temporary
* abd so we can verify the MAC. The performance overhead of this will
* be relatively low, since most objects in an encrypted objset will
* be encrypted (instead of authenticated) anyway.
*/
if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
!HDR_COMPRESSION_ENABLED(hdr)) {
tmpbuf = zio_buf_alloc(lsize);
abd = abd_get_from_buf(tmpbuf, lsize);
abd_take_ownership_of_buf(abd, B_TRUE);
csize = zio_compress_data(HDR_GET_COMPRESS(hdr),
hdr->b_l1hdr.b_pabd, tmpbuf, lsize, hdr->b_complevel);
ASSERT3U(csize, <=, psize);
abd_zero_off(abd, csize, psize - csize);
}
/*
* Authentication is best effort. We authenticate whenever the key is
* available. If we succeed we clear ARC_FLAG_NOAUTH.
*/
if (hdr->b_crypt_hdr.b_ot == DMU_OT_OBJSET) {
ASSERT3U(HDR_GET_COMPRESS(hdr), ==, ZIO_COMPRESS_OFF);
ASSERT3U(lsize, ==, psize);
ret = spa_do_crypt_objset_mac_abd(B_FALSE, spa, dsobj, abd,
psize, hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS);
} else {
ret = spa_do_crypt_mac_abd(B_FALSE, spa, dsobj, abd, psize,
hdr->b_crypt_hdr.b_mac);
}
if (ret == 0)
arc_hdr_clear_flags(hdr, ARC_FLAG_NOAUTH);
else if (ret != ENOENT)
goto error;
if (tmpbuf != NULL)
abd_free(abd);
return (0);
error:
if (tmpbuf != NULL)
abd_free(abd);
return (ret);
}
/*
* This function will take a header that only has raw encrypted data in
* b_crypt_hdr.b_rabd and decrypt it into a new buffer which is stored in
* b_l1hdr.b_pabd. If designated in the header flags, this function will
* also decompress the data.
*/
static int
arc_hdr_decrypt(arc_buf_hdr_t *hdr, spa_t *spa, const zbookmark_phys_t *zb)
{
int ret;
abd_t *cabd = NULL;
void *tmp = NULL;
boolean_t no_crypt = B_FALSE;
boolean_t bswap = (hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS);
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
ASSERT(HDR_ENCRYPTED(hdr));
arc_hdr_alloc_abd(hdr, ARC_HDR_DO_ADAPT);
ret = spa_do_crypt_abd(B_FALSE, spa, zb, hdr->b_crypt_hdr.b_ot,
B_FALSE, bswap, hdr->b_crypt_hdr.b_salt, hdr->b_crypt_hdr.b_iv,
hdr->b_crypt_hdr.b_mac, HDR_GET_PSIZE(hdr), hdr->b_l1hdr.b_pabd,
hdr->b_crypt_hdr.b_rabd, &no_crypt);
if (ret != 0)
goto error;
if (no_crypt) {
abd_copy(hdr->b_l1hdr.b_pabd, hdr->b_crypt_hdr.b_rabd,
HDR_GET_PSIZE(hdr));
}
/*
* If this header has disabled arc compression but the b_pabd is
* compressed after decrypting it, we need to decompress the newly
* decrypted data.
*/
if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
!HDR_COMPRESSION_ENABLED(hdr)) {
/*
* We want to make sure that we are correctly honoring the
* zfs_abd_scatter_enabled setting, so we allocate an abd here
* and then loan a buffer from it, rather than allocating a
* linear buffer and wrapping it in an abd later.
*/
cabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr, B_TRUE);
tmp = abd_borrow_buf(cabd, arc_hdr_size(hdr));
ret = zio_decompress_data(HDR_GET_COMPRESS(hdr),
hdr->b_l1hdr.b_pabd, tmp, HDR_GET_PSIZE(hdr),
HDR_GET_LSIZE(hdr), &hdr->b_complevel);
if (ret != 0) {
abd_return_buf(cabd, tmp, arc_hdr_size(hdr));
goto error;
}
abd_return_buf_copy(cabd, tmp, arc_hdr_size(hdr));
arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd,
arc_hdr_size(hdr), hdr);
hdr->b_l1hdr.b_pabd = cabd;
}
return (0);
error:
arc_hdr_free_abd(hdr, B_FALSE);
if (cabd != NULL)
arc_free_data_buf(hdr, cabd, arc_hdr_size(hdr), hdr);
return (ret);
}
/*
* This function is called during arc_buf_fill() to prepare the header's
* abd plaintext pointer for use. This involves authenticated protected
* data and decrypting encrypted data into the plaintext abd.
*/
static int
arc_fill_hdr_crypt(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, spa_t *spa,
const zbookmark_phys_t *zb, boolean_t noauth)
{
int ret;
ASSERT(HDR_PROTECTED(hdr));
if (hash_lock != NULL)
mutex_enter(hash_lock);
if (HDR_NOAUTH(hdr) && !noauth) {
/*
* The caller requested authenticated data but our data has
* not been authenticated yet. Verify the MAC now if we can.
*/
ret = arc_hdr_authenticate(hdr, spa, zb->zb_objset);
if (ret != 0)
goto error;
} else if (HDR_HAS_RABD(hdr) && hdr->b_l1hdr.b_pabd == NULL) {
/*
* If we only have the encrypted version of the data, but the
* unencrypted version was requested we take this opportunity
* to store the decrypted version in the header for future use.
*/
ret = arc_hdr_decrypt(hdr, spa, zb);
if (ret != 0)
goto error;
}
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
if (hash_lock != NULL)
mutex_exit(hash_lock);
return (0);
error:
if (hash_lock != NULL)
mutex_exit(hash_lock);
return (ret);
}
/*
* This function is used by the dbuf code to decrypt bonus buffers in place.
* The dbuf code itself doesn't have any locking for decrypting a shared dnode
* block, so we use the hash lock here to protect against concurrent calls to
* arc_buf_fill().
*/
static void
arc_buf_untransform_in_place(arc_buf_t *buf, kmutex_t *hash_lock)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT(HDR_ENCRYPTED(hdr));
ASSERT3U(hdr->b_crypt_hdr.b_ot, ==, DMU_OT_DNODE);
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
zio_crypt_copy_dnode_bonus(hdr->b_l1hdr.b_pabd, buf->b_data,
arc_buf_size(buf));
buf->b_flags &= ~ARC_BUF_FLAG_ENCRYPTED;
buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED;
hdr->b_crypt_hdr.b_ebufcnt -= 1;
}
/*
* Given a buf that has a data buffer attached to it, this function will
* efficiently fill the buf with data of the specified compression setting from
* the hdr and update the hdr's b_freeze_cksum if necessary. If the buf and hdr
* are already sharing a data buf, no copy is performed.
*
* If the buf is marked as compressed but uncompressed data was requested, this
* will allocate a new data buffer for the buf, remove that flag, and fill the
* buf with uncompressed data. You can't request a compressed buf on a hdr with
* uncompressed data, and (since we haven't added support for it yet) if you
* want compressed data your buf must already be marked as compressed and have
* the correct-sized data buffer.
*/
static int
arc_buf_fill(arc_buf_t *buf, spa_t *spa, const zbookmark_phys_t *zb,
arc_fill_flags_t flags)
{
int error = 0;
arc_buf_hdr_t *hdr = buf->b_hdr;
boolean_t hdr_compressed =
(arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF);
boolean_t compressed = (flags & ARC_FILL_COMPRESSED) != 0;
boolean_t encrypted = (flags & ARC_FILL_ENCRYPTED) != 0;
dmu_object_byteswap_t bswap = hdr->b_l1hdr.b_byteswap;
kmutex_t *hash_lock = (flags & ARC_FILL_LOCKED) ? NULL : HDR_LOCK(hdr);
ASSERT3P(buf->b_data, !=, NULL);
IMPLY(compressed, hdr_compressed || ARC_BUF_ENCRYPTED(buf));
IMPLY(compressed, ARC_BUF_COMPRESSED(buf));
IMPLY(encrypted, HDR_ENCRYPTED(hdr));
IMPLY(encrypted, ARC_BUF_ENCRYPTED(buf));
IMPLY(encrypted, ARC_BUF_COMPRESSED(buf));
IMPLY(encrypted, !ARC_BUF_SHARED(buf));
/*
* If the caller wanted encrypted data we just need to copy it from
* b_rabd and potentially byteswap it. We won't be able to do any
* further transforms on it.
*/
if (encrypted) {
ASSERT(HDR_HAS_RABD(hdr));
abd_copy_to_buf(buf->b_data, hdr->b_crypt_hdr.b_rabd,
HDR_GET_PSIZE(hdr));
goto byteswap;
}
/*
* Adjust encrypted and authenticated headers to accommodate
* the request if needed. Dnode blocks (ARC_FILL_IN_PLACE) are
* allowed to fail decryption due to keys not being loaded
* without being marked as an IO error.
*/
if (HDR_PROTECTED(hdr)) {
error = arc_fill_hdr_crypt(hdr, hash_lock, spa,
zb, !!(flags & ARC_FILL_NOAUTH));
if (error == EACCES && (flags & ARC_FILL_IN_PLACE) != 0) {
return (error);
} else if (error != 0) {
if (hash_lock != NULL)
mutex_enter(hash_lock);
arc_hdr_set_flags(hdr, ARC_FLAG_IO_ERROR);
if (hash_lock != NULL)
mutex_exit(hash_lock);
return (error);
}
}
/*
* There is a special case here for dnode blocks which are
* decrypting their bonus buffers. These blocks may request to
* be decrypted in-place. This is necessary because there may
* be many dnodes pointing into this buffer and there is
* currently no method to synchronize replacing the backing
* b_data buffer and updating all of the pointers. Here we use
* the hash lock to ensure there are no races. If the need
* arises for other types to be decrypted in-place, they must
* add handling here as well.
*/
if ((flags & ARC_FILL_IN_PLACE) != 0) {
ASSERT(!hdr_compressed);
ASSERT(!compressed);
ASSERT(!encrypted);
if (HDR_ENCRYPTED(hdr) && ARC_BUF_ENCRYPTED(buf)) {
ASSERT3U(hdr->b_crypt_hdr.b_ot, ==, DMU_OT_DNODE);
if (hash_lock != NULL)
mutex_enter(hash_lock);
arc_buf_untransform_in_place(buf, hash_lock);
if (hash_lock != NULL)
mutex_exit(hash_lock);
/* Compute the hdr's checksum if necessary */
arc_cksum_compute(buf);
}
return (0);
}
if (hdr_compressed == compressed) {
if (!arc_buf_is_shared(buf)) {
abd_copy_to_buf(buf->b_data, hdr->b_l1hdr.b_pabd,
arc_buf_size(buf));
}
} else {
ASSERT(hdr_compressed);
ASSERT(!compressed);
ASSERT3U(HDR_GET_LSIZE(hdr), !=, HDR_GET_PSIZE(hdr));
/*
* If the buf is sharing its data with the hdr, unlink it and
* allocate a new data buffer for the buf.
*/
if (arc_buf_is_shared(buf)) {
ASSERT(ARC_BUF_COMPRESSED(buf));
/* We need to give the buf its own b_data */
buf->b_flags &= ~ARC_BUF_FLAG_SHARED;
buf->b_data =
arc_get_data_buf(hdr, HDR_GET_LSIZE(hdr), buf);
arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA);
/* Previously overhead was 0; just add new overhead */
ARCSTAT_INCR(arcstat_overhead_size, HDR_GET_LSIZE(hdr));
} else if (ARC_BUF_COMPRESSED(buf)) {
/* We need to reallocate the buf's b_data */
arc_free_data_buf(hdr, buf->b_data, HDR_GET_PSIZE(hdr),
buf);
buf->b_data =
arc_get_data_buf(hdr, HDR_GET_LSIZE(hdr), buf);
/* We increased the size of b_data; update overhead */
ARCSTAT_INCR(arcstat_overhead_size,
HDR_GET_LSIZE(hdr) - HDR_GET_PSIZE(hdr));
}
/*
* Regardless of the buf's previous compression settings, it
* should not be compressed at the end of this function.
*/
buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED;
/*
* Try copying the data from another buf which already has a
* decompressed version. If that's not possible, it's time to
* bite the bullet and decompress the data from the hdr.
*/
if (arc_buf_try_copy_decompressed_data(buf)) {
/* Skip byteswapping and checksumming (already done) */
return (0);
} else {
error = zio_decompress_data(HDR_GET_COMPRESS(hdr),
hdr->b_l1hdr.b_pabd, buf->b_data,
HDR_GET_PSIZE(hdr), HDR_GET_LSIZE(hdr),
&hdr->b_complevel);
/*
* Absent hardware errors or software bugs, this should
* be impossible, but log it anyway so we can debug it.
*/
if (error != 0) {
zfs_dbgmsg(
"hdr %px, compress %d, psize %d, lsize %d",
hdr, arc_hdr_get_compress(hdr),
HDR_GET_PSIZE(hdr), HDR_GET_LSIZE(hdr));
if (hash_lock != NULL)
mutex_enter(hash_lock);
arc_hdr_set_flags(hdr, ARC_FLAG_IO_ERROR);
if (hash_lock != NULL)
mutex_exit(hash_lock);
return (SET_ERROR(EIO));
}
}
}
byteswap:
/* Byteswap the buf's data if necessary */
if (bswap != DMU_BSWAP_NUMFUNCS) {
ASSERT(!HDR_SHARED_DATA(hdr));
ASSERT3U(bswap, <, DMU_BSWAP_NUMFUNCS);
dmu_ot_byteswap[bswap].ob_func(buf->b_data, HDR_GET_LSIZE(hdr));
}
/* Compute the hdr's checksum if necessary */
arc_cksum_compute(buf);
return (0);
}
/*
* If this function is being called to decrypt an encrypted buffer or verify an
* authenticated one, the key must be loaded and a mapping must be made
* available in the keystore via spa_keystore_create_mapping() or one of its
* callers.
*/
int
arc_untransform(arc_buf_t *buf, spa_t *spa, const zbookmark_phys_t *zb,
boolean_t in_place)
{
int ret;
arc_fill_flags_t flags = 0;
if (in_place)
flags |= ARC_FILL_IN_PLACE;
ret = arc_buf_fill(buf, spa, zb, flags);
if (ret == ECKSUM) {
/*
* Convert authentication and decryption errors to EIO
* (and generate an ereport) before leaving the ARC.
*/
ret = SET_ERROR(EIO);
spa_log_error(spa, zb);
(void) zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION,
spa, NULL, zb, NULL, 0);
}
return (ret);
}
/*
* Increment the amount of evictable space in the arc_state_t's refcount.
* We account for the space used by the hdr and the arc buf individually
* so that we can add and remove them from the refcount individually.
*/
static void
arc_evictable_space_increment(arc_buf_hdr_t *hdr, arc_state_t *state)
{
arc_buf_contents_t type = arc_buf_type(hdr);
ASSERT(HDR_HAS_L1HDR(hdr));
if (GHOST_STATE(state)) {
ASSERT0(hdr->b_l1hdr.b_bufcnt);
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
(void) zfs_refcount_add_many(&state->arcs_esize[type],
HDR_GET_LSIZE(hdr), hdr);
return;
}
if (hdr->b_l1hdr.b_pabd != NULL) {
(void) zfs_refcount_add_many(&state->arcs_esize[type],
arc_hdr_size(hdr), hdr);
}
if (HDR_HAS_RABD(hdr)) {
(void) zfs_refcount_add_many(&state->arcs_esize[type],
HDR_GET_PSIZE(hdr), hdr);
}
for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
buf = buf->b_next) {
if (arc_buf_is_shared(buf))
continue;
(void) zfs_refcount_add_many(&state->arcs_esize[type],
arc_buf_size(buf), buf);
}
}
/*
* Decrement the amount of evictable space in the arc_state_t's refcount.
* We account for the space used by the hdr and the arc buf individually
* so that we can add and remove them from the refcount individually.
*/
static void
arc_evictable_space_decrement(arc_buf_hdr_t *hdr, arc_state_t *state)
{
arc_buf_contents_t type = arc_buf_type(hdr);
ASSERT(HDR_HAS_L1HDR(hdr));
if (GHOST_STATE(state)) {
ASSERT0(hdr->b_l1hdr.b_bufcnt);
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
HDR_GET_LSIZE(hdr), hdr);
return;
}
if (hdr->b_l1hdr.b_pabd != NULL) {
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
arc_hdr_size(hdr), hdr);
}
if (HDR_HAS_RABD(hdr)) {
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
HDR_GET_PSIZE(hdr), hdr);
}
for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
buf = buf->b_next) {
if (arc_buf_is_shared(buf))
continue;
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
arc_buf_size(buf), buf);
}
}
/*
* Add a reference to this hdr indicating that someone is actively
* referencing that memory. When the refcount transitions from 0 to 1,
* we remove it from the respective arc_state_t list to indicate that
* it is not evictable.
*/
static void
add_reference(arc_buf_hdr_t *hdr, void *tag)
{
arc_state_t *state;
ASSERT(HDR_HAS_L1HDR(hdr));
if (!HDR_EMPTY(hdr) && !MUTEX_HELD(HDR_LOCK(hdr))) {
ASSERT(hdr->b_l1hdr.b_state == arc_anon);
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
}
state = hdr->b_l1hdr.b_state;
if ((zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, tag) == 1) &&
(state != arc_anon)) {
/* We don't use the L2-only state list. */
if (state != arc_l2c_only) {
multilist_remove(&state->arcs_list[arc_buf_type(hdr)],
hdr);
arc_evictable_space_decrement(hdr, state);
}
/* remove the prefetch flag if we get a reference */
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_decrement_state(hdr);
arc_hdr_clear_flags(hdr, ARC_FLAG_PREFETCH);
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_increment_state(hdr);
}
}
/*
* Remove a reference from this hdr. When the reference transitions from
* 1 to 0 and we're not anonymous, then we add this hdr to the arc_state_t's
* list making it eligible for eviction.
*/
static int
remove_reference(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, void *tag)
{
int cnt;
arc_state_t *state = hdr->b_l1hdr.b_state;
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(state == arc_anon || MUTEX_HELD(hash_lock));
ASSERT(!GHOST_STATE(state));
/*
* arc_l2c_only counts as a ghost state so we don't need to explicitly
* check to prevent usage of the arc_l2c_only list.
*/
if (((cnt = zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, tag)) == 0) &&
(state != arc_anon)) {
multilist_insert(&state->arcs_list[arc_buf_type(hdr)], hdr);
ASSERT3U(hdr->b_l1hdr.b_bufcnt, >, 0);
arc_evictable_space_increment(hdr, state);
}
return (cnt);
}
/*
* Returns detailed information about a specific arc buffer. When the
* state_index argument is set the function will calculate the arc header
* list position for its arc state. Since this requires a linear traversal
* callers are strongly encourage not to do this. However, it can be helpful
* for targeted analysis so the functionality is provided.
*/
void
arc_buf_info(arc_buf_t *ab, arc_buf_info_t *abi, int state_index)
{
arc_buf_hdr_t *hdr = ab->b_hdr;
l1arc_buf_hdr_t *l1hdr = NULL;
l2arc_buf_hdr_t *l2hdr = NULL;
arc_state_t *state = NULL;
memset(abi, 0, sizeof (arc_buf_info_t));
if (hdr == NULL)
return;
abi->abi_flags = hdr->b_flags;
if (HDR_HAS_L1HDR(hdr)) {
l1hdr = &hdr->b_l1hdr;
state = l1hdr->b_state;
}
if (HDR_HAS_L2HDR(hdr))
l2hdr = &hdr->b_l2hdr;
if (l1hdr) {
abi->abi_bufcnt = l1hdr->b_bufcnt;
abi->abi_access = l1hdr->b_arc_access;
abi->abi_mru_hits = l1hdr->b_mru_hits;
abi->abi_mru_ghost_hits = l1hdr->b_mru_ghost_hits;
abi->abi_mfu_hits = l1hdr->b_mfu_hits;
abi->abi_mfu_ghost_hits = l1hdr->b_mfu_ghost_hits;
abi->abi_holds = zfs_refcount_count(&l1hdr->b_refcnt);
}
if (l2hdr) {
abi->abi_l2arc_dattr = l2hdr->b_daddr;
abi->abi_l2arc_hits = l2hdr->b_hits;
}
abi->abi_state_type = state ? state->arcs_state : ARC_STATE_ANON;
abi->abi_state_contents = arc_buf_type(hdr);
abi->abi_size = arc_hdr_size(hdr);
}
/*
* Move the supplied buffer to the indicated state. The hash lock
* for the buffer must be held by the caller.
*/
static void
arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
kmutex_t *hash_lock)
{
arc_state_t *old_state;
int64_t refcnt;
uint32_t bufcnt;
boolean_t update_old, update_new;
arc_buf_contents_t buftype = arc_buf_type(hdr);
/*
* We almost always have an L1 hdr here, since we call arc_hdr_realloc()
* in arc_read() when bringing a buffer out of the L2ARC. However, the
* L1 hdr doesn't always exist when we change state to arc_anon before
* destroying a header, in which case reallocating to add the L1 hdr is
* pointless.
*/
if (HDR_HAS_L1HDR(hdr)) {
old_state = hdr->b_l1hdr.b_state;
refcnt = zfs_refcount_count(&hdr->b_l1hdr.b_refcnt);
bufcnt = hdr->b_l1hdr.b_bufcnt;
update_old = (bufcnt > 0 || hdr->b_l1hdr.b_pabd != NULL ||
HDR_HAS_RABD(hdr));
} else {
old_state = arc_l2c_only;
refcnt = 0;
bufcnt = 0;
update_old = B_FALSE;
}
update_new = update_old;
ASSERT(MUTEX_HELD(hash_lock));
ASSERT3P(new_state, !=, old_state);
ASSERT(!GHOST_STATE(new_state) || bufcnt == 0);
ASSERT(old_state != arc_anon || bufcnt <= 1);
/*
* If this buffer is evictable, transfer it from the
* old state list to the new state list.
*/
if (refcnt == 0) {
if (old_state != arc_anon && old_state != arc_l2c_only) {
ASSERT(HDR_HAS_L1HDR(hdr));
multilist_remove(&old_state->arcs_list[buftype], hdr);
if (GHOST_STATE(old_state)) {
ASSERT0(bufcnt);
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
update_old = B_TRUE;
}
arc_evictable_space_decrement(hdr, old_state);
}
if (new_state != arc_anon && new_state != arc_l2c_only) {
/*
* An L1 header always exists here, since if we're
* moving to some L1-cached state (i.e. not l2c_only or
* anonymous), we realloc the header to add an L1hdr
* beforehand.
*/
ASSERT(HDR_HAS_L1HDR(hdr));
multilist_insert(&new_state->arcs_list[buftype], hdr);
if (GHOST_STATE(new_state)) {
ASSERT0(bufcnt);
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
update_new = B_TRUE;
}
arc_evictable_space_increment(hdr, new_state);
}
}
ASSERT(!HDR_EMPTY(hdr));
if (new_state == arc_anon && HDR_IN_HASH_TABLE(hdr))
buf_hash_remove(hdr);
/* adjust state sizes (ignore arc_l2c_only) */
if (update_new && new_state != arc_l2c_only) {
ASSERT(HDR_HAS_L1HDR(hdr));
if (GHOST_STATE(new_state)) {
ASSERT0(bufcnt);
/*
* When moving a header to a ghost state, we first
* remove all arc buffers. Thus, we'll have a
* bufcnt of zero, and no arc buffer to use for
* the reference. As a result, we use the arc
* header pointer for the reference.
*/
(void) zfs_refcount_add_many(&new_state->arcs_size,
HDR_GET_LSIZE(hdr), hdr);
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
} else {
uint32_t buffers = 0;
/*
* Each individual buffer holds a unique reference,
* thus we must remove each of these references one
* at a time.
*/
for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
buf = buf->b_next) {
ASSERT3U(bufcnt, !=, 0);
buffers++;
/*
* When the arc_buf_t is sharing the data
* block with the hdr, the owner of the
* reference belongs to the hdr. Only
* add to the refcount if the arc_buf_t is
* not shared.
*/
if (arc_buf_is_shared(buf))
continue;
(void) zfs_refcount_add_many(
&new_state->arcs_size,
arc_buf_size(buf), buf);
}
ASSERT3U(bufcnt, ==, buffers);
if (hdr->b_l1hdr.b_pabd != NULL) {
(void) zfs_refcount_add_many(
&new_state->arcs_size,
arc_hdr_size(hdr), hdr);
}
if (HDR_HAS_RABD(hdr)) {
(void) zfs_refcount_add_many(
&new_state->arcs_size,
HDR_GET_PSIZE(hdr), hdr);
}
}
}
if (update_old && old_state != arc_l2c_only) {
ASSERT(HDR_HAS_L1HDR(hdr));
if (GHOST_STATE(old_state)) {
ASSERT0(bufcnt);
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
/*
* When moving a header off of a ghost state,
* the header will not contain any arc buffers.
* We use the arc header pointer for the reference
* which is exactly what we did when we put the
* header on the ghost state.
*/
(void) zfs_refcount_remove_many(&old_state->arcs_size,
HDR_GET_LSIZE(hdr), hdr);
} else {
uint32_t buffers = 0;
/*
* Each individual buffer holds a unique reference,
* thus we must remove each of these references one
* at a time.
*/
for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
buf = buf->b_next) {
ASSERT3U(bufcnt, !=, 0);
buffers++;
/*
* When the arc_buf_t is sharing the data
* block with the hdr, the owner of the
* reference belongs to the hdr. Only
* add to the refcount if the arc_buf_t is
* not shared.
*/
if (arc_buf_is_shared(buf))
continue;
(void) zfs_refcount_remove_many(
&old_state->arcs_size, arc_buf_size(buf),
buf);
}
ASSERT3U(bufcnt, ==, buffers);
ASSERT(hdr->b_l1hdr.b_pabd != NULL ||
HDR_HAS_RABD(hdr));
if (hdr->b_l1hdr.b_pabd != NULL) {
(void) zfs_refcount_remove_many(
&old_state->arcs_size, arc_hdr_size(hdr),
hdr);
}
if (HDR_HAS_RABD(hdr)) {
(void) zfs_refcount_remove_many(
&old_state->arcs_size, HDR_GET_PSIZE(hdr),
hdr);
}
}
}
if (HDR_HAS_L1HDR(hdr)) {
hdr->b_l1hdr.b_state = new_state;
if (HDR_HAS_L2HDR(hdr) && new_state != arc_l2c_only) {
l2arc_hdr_arcstats_decrement_state(hdr);
hdr->b_l2hdr.b_arcs_state = new_state->arcs_state;
l2arc_hdr_arcstats_increment_state(hdr);
}
}
/*
* L2 headers should never be on the L2 state list since they don't
* have L1 headers allocated.
*/
ASSERT(multilist_is_empty(&arc_l2c_only->arcs_list[ARC_BUFC_DATA]) &&
multilist_is_empty(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA]));
}
void
arc_space_consume(uint64_t space, arc_space_type_t type)
{
ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
switch (type) {
default:
break;
case ARC_SPACE_DATA:
ARCSTAT_INCR(arcstat_data_size, space);
break;
case ARC_SPACE_META:
ARCSTAT_INCR(arcstat_metadata_size, space);
break;
case ARC_SPACE_BONUS:
ARCSTAT_INCR(arcstat_bonus_size, space);
break;
case ARC_SPACE_DNODE:
aggsum_add(&arc_sums.arcstat_dnode_size, space);
break;
case ARC_SPACE_DBUF:
ARCSTAT_INCR(arcstat_dbuf_size, space);
break;
case ARC_SPACE_HDRS:
ARCSTAT_INCR(arcstat_hdr_size, space);
break;
case ARC_SPACE_L2HDRS:
aggsum_add(&arc_sums.arcstat_l2_hdr_size, space);
break;
case ARC_SPACE_ABD_CHUNK_WASTE:
/*
* Note: this includes space wasted by all scatter ABD's, not
* just those allocated by the ARC. But the vast majority of
* scatter ABD's come from the ARC, because other users are
* very short-lived.
*/
ARCSTAT_INCR(arcstat_abd_chunk_waste_size, space);
break;
}
if (type != ARC_SPACE_DATA && type != ARC_SPACE_ABD_CHUNK_WASTE)
aggsum_add(&arc_sums.arcstat_meta_used, space);
aggsum_add(&arc_sums.arcstat_size, space);
}
void
arc_space_return(uint64_t space, arc_space_type_t type)
{
ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
switch (type) {
default:
break;
case ARC_SPACE_DATA:
ARCSTAT_INCR(arcstat_data_size, -space);
break;
case ARC_SPACE_META:
ARCSTAT_INCR(arcstat_metadata_size, -space);
break;
case ARC_SPACE_BONUS:
ARCSTAT_INCR(arcstat_bonus_size, -space);
break;
case ARC_SPACE_DNODE:
aggsum_add(&arc_sums.arcstat_dnode_size, -space);
break;
case ARC_SPACE_DBUF:
ARCSTAT_INCR(arcstat_dbuf_size, -space);
break;
case ARC_SPACE_HDRS:
ARCSTAT_INCR(arcstat_hdr_size, -space);
break;
case ARC_SPACE_L2HDRS:
aggsum_add(&arc_sums.arcstat_l2_hdr_size, -space);
break;
case ARC_SPACE_ABD_CHUNK_WASTE:
ARCSTAT_INCR(arcstat_abd_chunk_waste_size, -space);
break;
}
if (type != ARC_SPACE_DATA && type != ARC_SPACE_ABD_CHUNK_WASTE) {
ASSERT(aggsum_compare(&arc_sums.arcstat_meta_used,
space) >= 0);
ARCSTAT_MAX(arcstat_meta_max,
aggsum_upper_bound(&arc_sums.arcstat_meta_used));
aggsum_add(&arc_sums.arcstat_meta_used, -space);
}
ASSERT(aggsum_compare(&arc_sums.arcstat_size, space) >= 0);
aggsum_add(&arc_sums.arcstat_size, -space);
}
/*
* Given a hdr and a buf, returns whether that buf can share its b_data buffer
* with the hdr's b_pabd.
*/
static boolean_t
arc_can_share(arc_buf_hdr_t *hdr, arc_buf_t *buf)
{
/*
* The criteria for sharing a hdr's data are:
* 1. the buffer is not encrypted
* 2. the hdr's compression matches the buf's compression
* 3. the hdr doesn't need to be byteswapped
* 4. the hdr isn't already being shared
* 5. the buf is either compressed or it is the last buf in the hdr list
*
* Criterion #5 maintains the invariant that shared uncompressed
* bufs must be the final buf in the hdr's b_buf list. Reading this, you
* might ask, "if a compressed buf is allocated first, won't that be the
* last thing in the list?", but in that case it's impossible to create
* a shared uncompressed buf anyway (because the hdr must be compressed
* to have the compressed buf). You might also think that #3 is
* sufficient to make this guarantee, however it's possible
* (specifically in the rare L2ARC write race mentioned in
* arc_buf_alloc_impl()) there will be an existing uncompressed buf that
* is shareable, but wasn't at the time of its allocation. Rather than
* allow a new shared uncompressed buf to be created and then shuffle
* the list around to make it the last element, this simply disallows
* sharing if the new buf isn't the first to be added.
*/
ASSERT3P(buf->b_hdr, ==, hdr);
boolean_t hdr_compressed =
arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF;
boolean_t buf_compressed = ARC_BUF_COMPRESSED(buf) != 0;
return (!ARC_BUF_ENCRYPTED(buf) &&
buf_compressed == hdr_compressed &&
hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS &&
!HDR_SHARED_DATA(hdr) &&
(ARC_BUF_LAST(buf) || ARC_BUF_COMPRESSED(buf)));
}
/*
* Allocate a buf for this hdr. If you care about the data that's in the hdr,
* or if you want a compressed buffer, pass those flags in. Returns 0 if the
* copy was made successfully, or an error code otherwise.
*/
static int
arc_buf_alloc_impl(arc_buf_hdr_t *hdr, spa_t *spa, const zbookmark_phys_t *zb,
void *tag, boolean_t encrypted, boolean_t compressed, boolean_t noauth,
boolean_t fill, arc_buf_t **ret)
{
arc_buf_t *buf;
arc_fill_flags_t flags = ARC_FILL_LOCKED;
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT3U(HDR_GET_LSIZE(hdr), >, 0);
VERIFY(hdr->b_type == ARC_BUFC_DATA ||
hdr->b_type == ARC_BUFC_METADATA);
ASSERT3P(ret, !=, NULL);
ASSERT3P(*ret, ==, NULL);
IMPLY(encrypted, compressed);
hdr->b_l1hdr.b_mru_hits = 0;
hdr->b_l1hdr.b_mru_ghost_hits = 0;
hdr->b_l1hdr.b_mfu_hits = 0;
hdr->b_l1hdr.b_mfu_ghost_hits = 0;
hdr->b_l1hdr.b_l2_hits = 0;
buf = *ret = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
buf->b_hdr = hdr;
buf->b_data = NULL;
buf->b_next = hdr->b_l1hdr.b_buf;
buf->b_flags = 0;
add_reference(hdr, tag);
/*
* We're about to change the hdr's b_flags. We must either
* hold the hash_lock or be undiscoverable.
*/
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
/*
* Only honor requests for compressed bufs if the hdr is actually
* compressed. This must be overridden if the buffer is encrypted since
* encrypted buffers cannot be decompressed.
*/
if (encrypted) {
buf->b_flags |= ARC_BUF_FLAG_COMPRESSED;
buf->b_flags |= ARC_BUF_FLAG_ENCRYPTED;
flags |= ARC_FILL_COMPRESSED | ARC_FILL_ENCRYPTED;
} else if (compressed &&
arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF) {
buf->b_flags |= ARC_BUF_FLAG_COMPRESSED;
flags |= ARC_FILL_COMPRESSED;
}
if (noauth) {
ASSERT0(encrypted);
flags |= ARC_FILL_NOAUTH;
}
/*
* If the hdr's data can be shared then we share the data buffer and
* set the appropriate bit in the hdr's b_flags to indicate the hdr is
* sharing it's b_pabd with the arc_buf_t. Otherwise, we allocate a new
* buffer to store the buf's data.
*
* There are two additional restrictions here because we're sharing
* hdr -> buf instead of the usual buf -> hdr. First, the hdr can't be
* actively involved in an L2ARC write, because if this buf is used by
* an arc_write() then the hdr's data buffer will be released when the
* write completes, even though the L2ARC write might still be using it.
* Second, the hdr's ABD must be linear so that the buf's user doesn't
* need to be ABD-aware. It must be allocated via
* zio_[data_]buf_alloc(), not as a page, because we need to be able
* to abd_release_ownership_of_buf(), which isn't allowed on "linear
* page" buffers because the ABD code needs to handle freeing them
* specially.
*/
boolean_t can_share = arc_can_share(hdr, buf) &&
!HDR_L2_WRITING(hdr) &&
hdr->b_l1hdr.b_pabd != NULL &&
abd_is_linear(hdr->b_l1hdr.b_pabd) &&
!abd_is_linear_page(hdr->b_l1hdr.b_pabd);
/* Set up b_data and sharing */
if (can_share) {
buf->b_data = abd_to_buf(hdr->b_l1hdr.b_pabd);
buf->b_flags |= ARC_BUF_FLAG_SHARED;
arc_hdr_set_flags(hdr, ARC_FLAG_SHARED_DATA);
} else {
buf->b_data =
arc_get_data_buf(hdr, arc_buf_size(buf), buf);
ARCSTAT_INCR(arcstat_overhead_size, arc_buf_size(buf));
}
VERIFY3P(buf->b_data, !=, NULL);
hdr->b_l1hdr.b_buf = buf;
hdr->b_l1hdr.b_bufcnt += 1;
if (encrypted)
hdr->b_crypt_hdr.b_ebufcnt += 1;
/*
* If the user wants the data from the hdr, we need to either copy or
* decompress the data.
*/
if (fill) {
ASSERT3P(zb, !=, NULL);
return (arc_buf_fill(buf, spa, zb, flags));
}
return (0);
}
static char *arc_onloan_tag = "onloan";
static inline void
arc_loaned_bytes_update(int64_t delta)
{
atomic_add_64(&arc_loaned_bytes, delta);
/* assert that it did not wrap around */
ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes, 0), >=, 0);
}
/*
* Loan out an anonymous arc buffer. Loaned buffers are not counted as in
* flight data by arc_tempreserve_space() until they are "returned". Loaned
* buffers must be returned to the arc before they can be used by the DMU or
* freed.
*/
arc_buf_t *
arc_loan_buf(spa_t *spa, boolean_t is_metadata, int size)
{
arc_buf_t *buf = arc_alloc_buf(spa, arc_onloan_tag,
is_metadata ? ARC_BUFC_METADATA : ARC_BUFC_DATA, size);
arc_loaned_bytes_update(arc_buf_size(buf));
return (buf);
}
arc_buf_t *
arc_loan_compressed_buf(spa_t *spa, uint64_t psize, uint64_t lsize,
enum zio_compress compression_type, uint8_t complevel)
{
arc_buf_t *buf = arc_alloc_compressed_buf(spa, arc_onloan_tag,
psize, lsize, compression_type, complevel);
arc_loaned_bytes_update(arc_buf_size(buf));
return (buf);
}
arc_buf_t *
arc_loan_raw_buf(spa_t *spa, uint64_t dsobj, boolean_t byteorder,
const uint8_t *salt, const uint8_t *iv, const uint8_t *mac,
dmu_object_type_t ot, uint64_t psize, uint64_t lsize,
enum zio_compress compression_type, uint8_t complevel)
{
arc_buf_t *buf = arc_alloc_raw_buf(spa, arc_onloan_tag, dsobj,
byteorder, salt, iv, mac, ot, psize, lsize, compression_type,
complevel);
atomic_add_64(&arc_loaned_bytes, psize);
return (buf);
}
/*
* Return a loaned arc buffer to the arc.
*/
void
arc_return_buf(arc_buf_t *buf, void *tag)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT3P(buf->b_data, !=, NULL);
ASSERT(HDR_HAS_L1HDR(hdr));
(void) zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, tag);
(void) zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
arc_loaned_bytes_update(-arc_buf_size(buf));
}
/* Detach an arc_buf from a dbuf (tag) */
void
arc_loan_inuse_buf(arc_buf_t *buf, void *tag)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT3P(buf->b_data, !=, NULL);
ASSERT(HDR_HAS_L1HDR(hdr));
(void) zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
(void) zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, tag);
arc_loaned_bytes_update(arc_buf_size(buf));
}
static void
l2arc_free_abd_on_write(abd_t *abd, size_t size, arc_buf_contents_t type)
{
l2arc_data_free_t *df = kmem_alloc(sizeof (*df), KM_SLEEP);
df->l2df_abd = abd;
df->l2df_size = size;
df->l2df_type = type;
mutex_enter(&l2arc_free_on_write_mtx);
list_insert_head(l2arc_free_on_write, df);
mutex_exit(&l2arc_free_on_write_mtx);
}
static void
arc_hdr_free_on_write(arc_buf_hdr_t *hdr, boolean_t free_rdata)
{
arc_state_t *state = hdr->b_l1hdr.b_state;
arc_buf_contents_t type = arc_buf_type(hdr);
uint64_t size = (free_rdata) ? HDR_GET_PSIZE(hdr) : arc_hdr_size(hdr);
/* protected by hash lock, if in the hash table */
if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
ASSERT(state != arc_anon && state != arc_l2c_only);
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
size, hdr);
}
(void) zfs_refcount_remove_many(&state->arcs_size, size, hdr);
if (type == ARC_BUFC_METADATA) {
arc_space_return(size, ARC_SPACE_META);
} else {
ASSERT(type == ARC_BUFC_DATA);
arc_space_return(size, ARC_SPACE_DATA);
}
if (free_rdata) {
l2arc_free_abd_on_write(hdr->b_crypt_hdr.b_rabd, size, type);
} else {
l2arc_free_abd_on_write(hdr->b_l1hdr.b_pabd, size, type);
}
}
/*
* Share the arc_buf_t's data with the hdr. Whenever we are sharing the
* data buffer, we transfer the refcount ownership to the hdr and update
* the appropriate kstats.
*/
static void
arc_share_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf)
{
ASSERT(arc_can_share(hdr, buf));
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!ARC_BUF_ENCRYPTED(buf));
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
/*
* Start sharing the data buffer. We transfer the
* refcount ownership to the hdr since it always owns
* the refcount whenever an arc_buf_t is shared.
*/
zfs_refcount_transfer_ownership_many(&hdr->b_l1hdr.b_state->arcs_size,
arc_hdr_size(hdr), buf, hdr);
hdr->b_l1hdr.b_pabd = abd_get_from_buf(buf->b_data, arc_buf_size(buf));
abd_take_ownership_of_buf(hdr->b_l1hdr.b_pabd,
HDR_ISTYPE_METADATA(hdr));
arc_hdr_set_flags(hdr, ARC_FLAG_SHARED_DATA);
buf->b_flags |= ARC_BUF_FLAG_SHARED;
/*
* Since we've transferred ownership to the hdr we need
* to increment its compressed and uncompressed kstats and
* decrement the overhead size.
*/
ARCSTAT_INCR(arcstat_compressed_size, arc_hdr_size(hdr));
ARCSTAT_INCR(arcstat_uncompressed_size, HDR_GET_LSIZE(hdr));
ARCSTAT_INCR(arcstat_overhead_size, -arc_buf_size(buf));
}
static void
arc_unshare_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf)
{
ASSERT(arc_buf_is_shared(buf));
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
/*
* We are no longer sharing this buffer so we need
* to transfer its ownership to the rightful owner.
*/
zfs_refcount_transfer_ownership_many(&hdr->b_l1hdr.b_state->arcs_size,
arc_hdr_size(hdr), hdr, buf);
arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA);
abd_release_ownership_of_buf(hdr->b_l1hdr.b_pabd);
abd_free(hdr->b_l1hdr.b_pabd);
hdr->b_l1hdr.b_pabd = NULL;
buf->b_flags &= ~ARC_BUF_FLAG_SHARED;
/*
* Since the buffer is no longer shared between
* the arc buf and the hdr, count it as overhead.
*/
ARCSTAT_INCR(arcstat_compressed_size, -arc_hdr_size(hdr));
ARCSTAT_INCR(arcstat_uncompressed_size, -HDR_GET_LSIZE(hdr));
ARCSTAT_INCR(arcstat_overhead_size, arc_buf_size(buf));
}
/*
* Remove an arc_buf_t from the hdr's buf list and return the last
* arc_buf_t on the list. If no buffers remain on the list then return
* NULL.
*/
static arc_buf_t *
arc_buf_remove(arc_buf_hdr_t *hdr, arc_buf_t *buf)
{
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
arc_buf_t **bufp = &hdr->b_l1hdr.b_buf;
arc_buf_t *lastbuf = NULL;
/*
* Remove the buf from the hdr list and locate the last
* remaining buffer on the list.
*/
while (*bufp != NULL) {
if (*bufp == buf)
*bufp = buf->b_next;
/*
* If we've removed a buffer in the middle of
* the list then update the lastbuf and update
* bufp.
*/
if (*bufp != NULL) {
lastbuf = *bufp;
bufp = &(*bufp)->b_next;
}
}
buf->b_next = NULL;
ASSERT3P(lastbuf, !=, buf);
IMPLY(hdr->b_l1hdr.b_bufcnt > 0, lastbuf != NULL);
IMPLY(hdr->b_l1hdr.b_bufcnt > 0, hdr->b_l1hdr.b_buf != NULL);
IMPLY(lastbuf != NULL, ARC_BUF_LAST(lastbuf));
return (lastbuf);
}
/*
* Free up buf->b_data and pull the arc_buf_t off of the arc_buf_hdr_t's
* list and free it.
*/
static void
arc_buf_destroy_impl(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
/*
* Free up the data associated with the buf but only if we're not
* sharing this with the hdr. If we are sharing it with the hdr, the
* hdr is responsible for doing the free.
*/
if (buf->b_data != NULL) {
/*
* We're about to change the hdr's b_flags. We must either
* hold the hash_lock or be undiscoverable.
*/
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
arc_cksum_verify(buf);
arc_buf_unwatch(buf);
if (arc_buf_is_shared(buf)) {
arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA);
} else {
uint64_t size = arc_buf_size(buf);
arc_free_data_buf(hdr, buf->b_data, size, buf);
ARCSTAT_INCR(arcstat_overhead_size, -size);
}
buf->b_data = NULL;
ASSERT(hdr->b_l1hdr.b_bufcnt > 0);
hdr->b_l1hdr.b_bufcnt -= 1;
if (ARC_BUF_ENCRYPTED(buf)) {
hdr->b_crypt_hdr.b_ebufcnt -= 1;
/*
* If we have no more encrypted buffers and we've
* already gotten a copy of the decrypted data we can
* free b_rabd to save some space.
*/
if (hdr->b_crypt_hdr.b_ebufcnt == 0 &&
HDR_HAS_RABD(hdr) && hdr->b_l1hdr.b_pabd != NULL &&
!HDR_IO_IN_PROGRESS(hdr)) {
arc_hdr_free_abd(hdr, B_TRUE);
}
}
}
arc_buf_t *lastbuf = arc_buf_remove(hdr, buf);
if (ARC_BUF_SHARED(buf) && !ARC_BUF_COMPRESSED(buf)) {
/*
* If the current arc_buf_t is sharing its data buffer with the
* hdr, then reassign the hdr's b_pabd to share it with the new
* buffer at the end of the list. The shared buffer is always
* the last one on the hdr's buffer list.
*
* There is an equivalent case for compressed bufs, but since
* they aren't guaranteed to be the last buf in the list and
* that is an exceedingly rare case, we just allow that space be
* wasted temporarily. We must also be careful not to share
* encrypted buffers, since they cannot be shared.
*/
if (lastbuf != NULL && !ARC_BUF_ENCRYPTED(lastbuf)) {
/* Only one buf can be shared at once */
VERIFY(!arc_buf_is_shared(lastbuf));
/* hdr is uncompressed so can't have compressed buf */
VERIFY(!ARC_BUF_COMPRESSED(lastbuf));
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
arc_hdr_free_abd(hdr, B_FALSE);
/*
* We must setup a new shared block between the
* last buffer and the hdr. The data would have
* been allocated by the arc buf so we need to transfer
* ownership to the hdr since it's now being shared.
*/
arc_share_buf(hdr, lastbuf);
}
} else if (HDR_SHARED_DATA(hdr)) {
/*
* Uncompressed shared buffers are always at the end
* of the list. Compressed buffers don't have the
* same requirements. This makes it hard to
* simply assert that the lastbuf is shared so
* we rely on the hdr's compression flags to determine
* if we have a compressed, shared buffer.
*/
ASSERT3P(lastbuf, !=, NULL);
ASSERT(arc_buf_is_shared(lastbuf) ||
arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF);
}
/*
* Free the checksum if we're removing the last uncompressed buf from
* this hdr.
*/
if (!arc_hdr_has_uncompressed_buf(hdr)) {
arc_cksum_free(hdr);
}
/* clean up the buf */
buf->b_hdr = NULL;
kmem_cache_free(buf_cache, buf);
}
static void
arc_hdr_alloc_abd(arc_buf_hdr_t *hdr, int alloc_flags)
{
uint64_t size;
boolean_t alloc_rdata = ((alloc_flags & ARC_HDR_ALLOC_RDATA) != 0);
boolean_t do_adapt = ((alloc_flags & ARC_HDR_DO_ADAPT) != 0);
ASSERT3U(HDR_GET_LSIZE(hdr), >, 0);
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(!HDR_SHARED_DATA(hdr) || alloc_rdata);
IMPLY(alloc_rdata, HDR_PROTECTED(hdr));
if (alloc_rdata) {
size = HDR_GET_PSIZE(hdr);
ASSERT3P(hdr->b_crypt_hdr.b_rabd, ==, NULL);
hdr->b_crypt_hdr.b_rabd = arc_get_data_abd(hdr, size, hdr,
do_adapt);
ASSERT3P(hdr->b_crypt_hdr.b_rabd, !=, NULL);
ARCSTAT_INCR(arcstat_raw_size, size);
} else {
size = arc_hdr_size(hdr);
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
hdr->b_l1hdr.b_pabd = arc_get_data_abd(hdr, size, hdr,
do_adapt);
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
}
ARCSTAT_INCR(arcstat_compressed_size, size);
ARCSTAT_INCR(arcstat_uncompressed_size, HDR_GET_LSIZE(hdr));
}
static void
arc_hdr_free_abd(arc_buf_hdr_t *hdr, boolean_t free_rdata)
{
uint64_t size = (free_rdata) ? HDR_GET_PSIZE(hdr) : arc_hdr_size(hdr);
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr));
IMPLY(free_rdata, HDR_HAS_RABD(hdr));
/*
* If the hdr is currently being written to the l2arc then
* we defer freeing the data by adding it to the l2arc_free_on_write
* list. The l2arc will free the data once it's finished
* writing it to the l2arc device.
*/
if (HDR_L2_WRITING(hdr)) {
arc_hdr_free_on_write(hdr, free_rdata);
ARCSTAT_BUMP(arcstat_l2_free_on_write);
} else if (free_rdata) {
arc_free_data_abd(hdr, hdr->b_crypt_hdr.b_rabd, size, hdr);
} else {
arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd, size, hdr);
}
if (free_rdata) {
hdr->b_crypt_hdr.b_rabd = NULL;
ARCSTAT_INCR(arcstat_raw_size, -size);
} else {
hdr->b_l1hdr.b_pabd = NULL;
}
if (hdr->b_l1hdr.b_pabd == NULL && !HDR_HAS_RABD(hdr))
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
ARCSTAT_INCR(arcstat_compressed_size, -size);
ARCSTAT_INCR(arcstat_uncompressed_size, -HDR_GET_LSIZE(hdr));
}
static arc_buf_hdr_t *
arc_hdr_alloc(uint64_t spa, int32_t psize, int32_t lsize,
boolean_t protected, enum zio_compress compression_type, uint8_t complevel,
arc_buf_contents_t type, boolean_t alloc_rdata)
{
arc_buf_hdr_t *hdr;
int flags = ARC_HDR_DO_ADAPT;
VERIFY(type == ARC_BUFC_DATA || type == ARC_BUFC_METADATA);
if (protected) {
hdr = kmem_cache_alloc(hdr_full_crypt_cache, KM_PUSHPAGE);
} else {
hdr = kmem_cache_alloc(hdr_full_cache, KM_PUSHPAGE);
}
flags |= alloc_rdata ? ARC_HDR_ALLOC_RDATA : 0;
ASSERT(HDR_EMPTY(hdr));
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
HDR_SET_PSIZE(hdr, psize);
HDR_SET_LSIZE(hdr, lsize);
hdr->b_spa = spa;
hdr->b_type = type;
hdr->b_flags = 0;
arc_hdr_set_flags(hdr, arc_bufc_to_flags(type) | ARC_FLAG_HAS_L1HDR);
arc_hdr_set_compress(hdr, compression_type);
hdr->b_complevel = complevel;
if (protected)
arc_hdr_set_flags(hdr, ARC_FLAG_PROTECTED);
hdr->b_l1hdr.b_state = arc_anon;
hdr->b_l1hdr.b_arc_access = 0;
hdr->b_l1hdr.b_bufcnt = 0;
hdr->b_l1hdr.b_buf = NULL;
/*
* Allocate the hdr's buffer. This will contain either
* the compressed or uncompressed data depending on the block
* it references and compressed arc enablement.
*/
arc_hdr_alloc_abd(hdr, flags);
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
return (hdr);
}
/*
* Transition between the two allocation states for the arc_buf_hdr struct.
* The arc_buf_hdr struct can be allocated with (hdr_full_cache) or without
* (hdr_l2only_cache) the fields necessary for the L1 cache - the smaller
* version is used when a cache buffer is only in the L2ARC in order to reduce
* memory usage.
*/
static arc_buf_hdr_t *
arc_hdr_realloc(arc_buf_hdr_t *hdr, kmem_cache_t *old, kmem_cache_t *new)
{
ASSERT(HDR_HAS_L2HDR(hdr));
arc_buf_hdr_t *nhdr;
l2arc_dev_t *dev = hdr->b_l2hdr.b_dev;
ASSERT((old == hdr_full_cache && new == hdr_l2only_cache) ||
(old == hdr_l2only_cache && new == hdr_full_cache));
/*
* if the caller wanted a new full header and the header is to be
* encrypted we will actually allocate the header from the full crypt
* cache instead. The same applies to freeing from the old cache.
*/
if (HDR_PROTECTED(hdr) && new == hdr_full_cache)
new = hdr_full_crypt_cache;
if (HDR_PROTECTED(hdr) && old == hdr_full_cache)
old = hdr_full_crypt_cache;
nhdr = kmem_cache_alloc(new, KM_PUSHPAGE);
ASSERT(MUTEX_HELD(HDR_LOCK(hdr)));
buf_hash_remove(hdr);
bcopy(hdr, nhdr, HDR_L2ONLY_SIZE);
if (new == hdr_full_cache || new == hdr_full_crypt_cache) {
arc_hdr_set_flags(nhdr, ARC_FLAG_HAS_L1HDR);
/*
* arc_access and arc_change_state need to be aware that a
* header has just come out of L2ARC, so we set its state to
* l2c_only even though it's about to change.
*/
nhdr->b_l1hdr.b_state = arc_l2c_only;
/* Verify previous threads set to NULL before freeing */
ASSERT3P(nhdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
} else {
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
ASSERT0(hdr->b_l1hdr.b_bufcnt);
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
/*
* If we've reached here, We must have been called from
* arc_evict_hdr(), as such we should have already been
* removed from any ghost list we were previously on
* (which protects us from racing with arc_evict_state),
* thus no locking is needed during this check.
*/
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
/*
* A buffer must not be moved into the arc_l2c_only
* state if it's not finished being written out to the
* l2arc device. Otherwise, the b_l1hdr.b_pabd field
* might try to be accessed, even though it was removed.
*/
VERIFY(!HDR_L2_WRITING(hdr));
VERIFY3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
arc_hdr_clear_flags(nhdr, ARC_FLAG_HAS_L1HDR);
}
/*
* The header has been reallocated so we need to re-insert it into any
* lists it was on.
*/
(void) buf_hash_insert(nhdr, NULL);
ASSERT(list_link_active(&hdr->b_l2hdr.b_l2node));
mutex_enter(&dev->l2ad_mtx);
/*
* We must place the realloc'ed header back into the list at
* the same spot. Otherwise, if it's placed earlier in the list,
* l2arc_write_buffers() could find it during the function's
* write phase, and try to write it out to the l2arc.
*/
list_insert_after(&dev->l2ad_buflist, hdr, nhdr);
list_remove(&dev->l2ad_buflist, hdr);
mutex_exit(&dev->l2ad_mtx);
/*
* Since we're using the pointer address as the tag when
* incrementing and decrementing the l2ad_alloc refcount, we
* must remove the old pointer (that we're about to destroy) and
* add the new pointer to the refcount. Otherwise we'd remove
* the wrong pointer address when calling arc_hdr_destroy() later.
*/
(void) zfs_refcount_remove_many(&dev->l2ad_alloc,
arc_hdr_size(hdr), hdr);
(void) zfs_refcount_add_many(&dev->l2ad_alloc,
arc_hdr_size(nhdr), nhdr);
buf_discard_identity(hdr);
kmem_cache_free(old, hdr);
return (nhdr);
}
/*
* This function allows an L1 header to be reallocated as a crypt
* header and vice versa. If we are going to a crypt header, the
* new fields will be zeroed out.
*/
static arc_buf_hdr_t *
arc_hdr_realloc_crypt(arc_buf_hdr_t *hdr, boolean_t need_crypt)
{
arc_buf_hdr_t *nhdr;
arc_buf_t *buf;
kmem_cache_t *ncache, *ocache;
/*
* This function requires that hdr is in the arc_anon state.
* Therefore it won't have any L2ARC data for us to worry
* about copying.
*/
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(!HDR_HAS_L2HDR(hdr));
ASSERT3U(!!HDR_PROTECTED(hdr), !=, need_crypt);
ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
ASSERT(!list_link_active(&hdr->b_l2hdr.b_l2node));
ASSERT3P(hdr->b_hash_next, ==, NULL);
if (need_crypt) {
ncache = hdr_full_crypt_cache;
ocache = hdr_full_cache;
} else {
ncache = hdr_full_cache;
ocache = hdr_full_crypt_cache;
}
nhdr = kmem_cache_alloc(ncache, KM_PUSHPAGE);
/*
* Copy all members that aren't locks or condvars to the new header.
* No lists are pointing to us (as we asserted above), so we don't
* need to worry about the list nodes.
*/
nhdr->b_dva = hdr->b_dva;
nhdr->b_birth = hdr->b_birth;
nhdr->b_type = hdr->b_type;
nhdr->b_flags = hdr->b_flags;
nhdr->b_psize = hdr->b_psize;
nhdr->b_lsize = hdr->b_lsize;
nhdr->b_spa = hdr->b_spa;
nhdr->b_l1hdr.b_freeze_cksum = hdr->b_l1hdr.b_freeze_cksum;
nhdr->b_l1hdr.b_bufcnt = hdr->b_l1hdr.b_bufcnt;
nhdr->b_l1hdr.b_byteswap = hdr->b_l1hdr.b_byteswap;
nhdr->b_l1hdr.b_state = hdr->b_l1hdr.b_state;
nhdr->b_l1hdr.b_arc_access = hdr->b_l1hdr.b_arc_access;
nhdr->b_l1hdr.b_mru_hits = hdr->b_l1hdr.b_mru_hits;
nhdr->b_l1hdr.b_mru_ghost_hits = hdr->b_l1hdr.b_mru_ghost_hits;
nhdr->b_l1hdr.b_mfu_hits = hdr->b_l1hdr.b_mfu_hits;
nhdr->b_l1hdr.b_mfu_ghost_hits = hdr->b_l1hdr.b_mfu_ghost_hits;
nhdr->b_l1hdr.b_l2_hits = hdr->b_l1hdr.b_l2_hits;
nhdr->b_l1hdr.b_acb = hdr->b_l1hdr.b_acb;
nhdr->b_l1hdr.b_pabd = hdr->b_l1hdr.b_pabd;
/*
* This zfs_refcount_add() exists only to ensure that the individual
* arc buffers always point to a header that is referenced, avoiding
* a small race condition that could trigger ASSERTs.
*/
(void) zfs_refcount_add(&nhdr->b_l1hdr.b_refcnt, FTAG);
nhdr->b_l1hdr.b_buf = hdr->b_l1hdr.b_buf;
for (buf = nhdr->b_l1hdr.b_buf; buf != NULL; buf = buf->b_next) {
mutex_enter(&buf->b_evict_lock);
buf->b_hdr = nhdr;
mutex_exit(&buf->b_evict_lock);
}
zfs_refcount_transfer(&nhdr->b_l1hdr.b_refcnt, &hdr->b_l1hdr.b_refcnt);
(void) zfs_refcount_remove(&nhdr->b_l1hdr.b_refcnt, FTAG);
ASSERT0(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt));
if (need_crypt) {
arc_hdr_set_flags(nhdr, ARC_FLAG_PROTECTED);
} else {
arc_hdr_clear_flags(nhdr, ARC_FLAG_PROTECTED);
}
/* unset all members of the original hdr */
bzero(&hdr->b_dva, sizeof (dva_t));
hdr->b_birth = 0;
hdr->b_type = ARC_BUFC_INVALID;
hdr->b_flags = 0;
hdr->b_psize = 0;
hdr->b_lsize = 0;
hdr->b_spa = 0;
hdr->b_l1hdr.b_freeze_cksum = NULL;
hdr->b_l1hdr.b_buf = NULL;
hdr->b_l1hdr.b_bufcnt = 0;
hdr->b_l1hdr.b_byteswap = 0;
hdr->b_l1hdr.b_state = NULL;
hdr->b_l1hdr.b_arc_access = 0;
hdr->b_l1hdr.b_mru_hits = 0;
hdr->b_l1hdr.b_mru_ghost_hits = 0;
hdr->b_l1hdr.b_mfu_hits = 0;
hdr->b_l1hdr.b_mfu_ghost_hits = 0;
hdr->b_l1hdr.b_l2_hits = 0;
hdr->b_l1hdr.b_acb = NULL;
hdr->b_l1hdr.b_pabd = NULL;
if (ocache == hdr_full_crypt_cache) {
ASSERT(!HDR_HAS_RABD(hdr));
hdr->b_crypt_hdr.b_ot = DMU_OT_NONE;
hdr->b_crypt_hdr.b_ebufcnt = 0;
hdr->b_crypt_hdr.b_dsobj = 0;
bzero(hdr->b_crypt_hdr.b_salt, ZIO_DATA_SALT_LEN);
bzero(hdr->b_crypt_hdr.b_iv, ZIO_DATA_IV_LEN);
bzero(hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN);
}
buf_discard_identity(hdr);
kmem_cache_free(ocache, hdr);
return (nhdr);
}
/*
* This function is used by the send / receive code to convert a newly
* allocated arc_buf_t to one that is suitable for a raw encrypted write. It
* is also used to allow the root objset block to be updated without altering
* its embedded MACs. Both block types will always be uncompressed so we do not
* have to worry about compression type or psize.
*/
void
arc_convert_to_raw(arc_buf_t *buf, uint64_t dsobj, boolean_t byteorder,
dmu_object_type_t ot, const uint8_t *salt, const uint8_t *iv,
const uint8_t *mac)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT(ot == DMU_OT_DNODE || ot == DMU_OT_OBJSET);
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
buf->b_flags |= (ARC_BUF_FLAG_COMPRESSED | ARC_BUF_FLAG_ENCRYPTED);
if (!HDR_PROTECTED(hdr))
hdr = arc_hdr_realloc_crypt(hdr, B_TRUE);
hdr->b_crypt_hdr.b_dsobj = dsobj;
hdr->b_crypt_hdr.b_ot = ot;
hdr->b_l1hdr.b_byteswap = (byteorder == ZFS_HOST_BYTEORDER) ?
DMU_BSWAP_NUMFUNCS : DMU_OT_BYTESWAP(ot);
if (!arc_hdr_has_uncompressed_buf(hdr))
arc_cksum_free(hdr);
if (salt != NULL)
bcopy(salt, hdr->b_crypt_hdr.b_salt, ZIO_DATA_SALT_LEN);
if (iv != NULL)
bcopy(iv, hdr->b_crypt_hdr.b_iv, ZIO_DATA_IV_LEN);
if (mac != NULL)
bcopy(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN);
}
/*
* Allocate a new arc_buf_hdr_t and arc_buf_t and return the buf to the caller.
* The buf is returned thawed since we expect the consumer to modify it.
*/
arc_buf_t *
arc_alloc_buf(spa_t *spa, void *tag, arc_buf_contents_t type, int32_t size)
{
arc_buf_hdr_t *hdr = arc_hdr_alloc(spa_load_guid(spa), size, size,
B_FALSE, ZIO_COMPRESS_OFF, 0, type, B_FALSE);
arc_buf_t *buf = NULL;
VERIFY0(arc_buf_alloc_impl(hdr, spa, NULL, tag, B_FALSE, B_FALSE,
B_FALSE, B_FALSE, &buf));
arc_buf_thaw(buf);
return (buf);
}
/*
* Allocate a compressed buf in the same manner as arc_alloc_buf. Don't use this
* for bufs containing metadata.
*/
arc_buf_t *
arc_alloc_compressed_buf(spa_t *spa, void *tag, uint64_t psize, uint64_t lsize,
enum zio_compress compression_type, uint8_t complevel)
{
ASSERT3U(lsize, >, 0);
ASSERT3U(lsize, >=, psize);
ASSERT3U(compression_type, >, ZIO_COMPRESS_OFF);
ASSERT3U(compression_type, <, ZIO_COMPRESS_FUNCTIONS);
arc_buf_hdr_t *hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize,
B_FALSE, compression_type, complevel, ARC_BUFC_DATA, B_FALSE);
arc_buf_t *buf = NULL;
VERIFY0(arc_buf_alloc_impl(hdr, spa, NULL, tag, B_FALSE,
B_TRUE, B_FALSE, B_FALSE, &buf));
arc_buf_thaw(buf);
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
if (!arc_buf_is_shared(buf)) {
/*
* To ensure that the hdr has the correct data in it if we call
* arc_untransform() on this buf before it's been written to
* disk, it's easiest if we just set up sharing between the
* buf and the hdr.
*/
arc_hdr_free_abd(hdr, B_FALSE);
arc_share_buf(hdr, buf);
}
return (buf);
}
arc_buf_t *
arc_alloc_raw_buf(spa_t *spa, void *tag, uint64_t dsobj, boolean_t byteorder,
const uint8_t *salt, const uint8_t *iv, const uint8_t *mac,
dmu_object_type_t ot, uint64_t psize, uint64_t lsize,
enum zio_compress compression_type, uint8_t complevel)
{
arc_buf_hdr_t *hdr;
arc_buf_t *buf;
arc_buf_contents_t type = DMU_OT_IS_METADATA(ot) ?
ARC_BUFC_METADATA : ARC_BUFC_DATA;
ASSERT3U(lsize, >, 0);
ASSERT3U(lsize, >=, psize);
ASSERT3U(compression_type, >=, ZIO_COMPRESS_OFF);
ASSERT3U(compression_type, <, ZIO_COMPRESS_FUNCTIONS);
hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize, B_TRUE,
compression_type, complevel, type, B_TRUE);
hdr->b_crypt_hdr.b_dsobj = dsobj;
hdr->b_crypt_hdr.b_ot = ot;
hdr->b_l1hdr.b_byteswap = (byteorder == ZFS_HOST_BYTEORDER) ?
DMU_BSWAP_NUMFUNCS : DMU_OT_BYTESWAP(ot);
bcopy(salt, hdr->b_crypt_hdr.b_salt, ZIO_DATA_SALT_LEN);
bcopy(iv, hdr->b_crypt_hdr.b_iv, ZIO_DATA_IV_LEN);
bcopy(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN);
/*
* This buffer will be considered encrypted even if the ot is not an
* encrypted type. It will become authenticated instead in
* arc_write_ready().
*/
buf = NULL;
VERIFY0(arc_buf_alloc_impl(hdr, spa, NULL, tag, B_TRUE, B_TRUE,
B_FALSE, B_FALSE, &buf));
arc_buf_thaw(buf);
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
return (buf);
}
static void
l2arc_hdr_arcstats_update(arc_buf_hdr_t *hdr, boolean_t incr,
boolean_t state_only)
{
l2arc_buf_hdr_t *l2hdr = &hdr->b_l2hdr;
l2arc_dev_t *dev = l2hdr->b_dev;
uint64_t lsize = HDR_GET_LSIZE(hdr);
uint64_t psize = HDR_GET_PSIZE(hdr);
uint64_t asize = vdev_psize_to_asize(dev->l2ad_vdev, psize);
arc_buf_contents_t type = hdr->b_type;
int64_t lsize_s;
int64_t psize_s;
int64_t asize_s;
if (incr) {
lsize_s = lsize;
psize_s = psize;
asize_s = asize;
} else {
lsize_s = -lsize;
psize_s = -psize;
asize_s = -asize;
}
/* If the buffer is a prefetch, count it as such. */
if (HDR_PREFETCH(hdr)) {
ARCSTAT_INCR(arcstat_l2_prefetch_asize, asize_s);
} else {
/*
* We use the value stored in the L2 header upon initial
* caching in L2ARC. This value will be updated in case
* an MRU/MRU_ghost buffer transitions to MFU but the L2ARC
* metadata (log entry) cannot currently be updated. Having
* the ARC state in the L2 header solves the problem of a
* possibly absent L1 header (apparent in buffers restored
* from persistent L2ARC).
*/
switch (hdr->b_l2hdr.b_arcs_state) {
case ARC_STATE_MRU_GHOST:
case ARC_STATE_MRU:
ARCSTAT_INCR(arcstat_l2_mru_asize, asize_s);
break;
case ARC_STATE_MFU_GHOST:
case ARC_STATE_MFU:
ARCSTAT_INCR(arcstat_l2_mfu_asize, asize_s);
break;
default:
break;
}
}
if (state_only)
return;
ARCSTAT_INCR(arcstat_l2_psize, psize_s);
ARCSTAT_INCR(arcstat_l2_lsize, lsize_s);
switch (type) {
case ARC_BUFC_DATA:
ARCSTAT_INCR(arcstat_l2_bufc_data_asize, asize_s);
break;
case ARC_BUFC_METADATA:
ARCSTAT_INCR(arcstat_l2_bufc_metadata_asize, asize_s);
break;
default:
break;
}
}
static void
arc_hdr_l2hdr_destroy(arc_buf_hdr_t *hdr)
{
l2arc_buf_hdr_t *l2hdr = &hdr->b_l2hdr;
l2arc_dev_t *dev = l2hdr->b_dev;
uint64_t psize = HDR_GET_PSIZE(hdr);
uint64_t asize = vdev_psize_to_asize(dev->l2ad_vdev, psize);
ASSERT(MUTEX_HELD(&dev->l2ad_mtx));
ASSERT(HDR_HAS_L2HDR(hdr));
list_remove(&dev->l2ad_buflist, hdr);
l2arc_hdr_arcstats_decrement(hdr);
vdev_space_update(dev->l2ad_vdev, -asize, 0, 0);
(void) zfs_refcount_remove_many(&dev->l2ad_alloc, arc_hdr_size(hdr),
hdr);
arc_hdr_clear_flags(hdr, ARC_FLAG_HAS_L2HDR);
}
static void
arc_hdr_destroy(arc_buf_hdr_t *hdr)
{
if (HDR_HAS_L1HDR(hdr)) {
ASSERT(hdr->b_l1hdr.b_buf == NULL ||
hdr->b_l1hdr.b_bufcnt > 0);
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
}
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT(!HDR_IN_HASH_TABLE(hdr));
if (HDR_HAS_L2HDR(hdr)) {
l2arc_dev_t *dev = hdr->b_l2hdr.b_dev;
boolean_t buflist_held = MUTEX_HELD(&dev->l2ad_mtx);
if (!buflist_held)
mutex_enter(&dev->l2ad_mtx);
/*
* Even though we checked this conditional above, we
* need to check this again now that we have the
* l2ad_mtx. This is because we could be racing with
* another thread calling l2arc_evict() which might have
* destroyed this header's L2 portion as we were waiting
* to acquire the l2ad_mtx. If that happens, we don't
* want to re-destroy the header's L2 portion.
*/
if (HDR_HAS_L2HDR(hdr))
arc_hdr_l2hdr_destroy(hdr);
if (!buflist_held)
mutex_exit(&dev->l2ad_mtx);
}
/*
* The header's identify can only be safely discarded once it is no
* longer discoverable. This requires removing it from the hash table
* and the l2arc header list. After this point the hash lock can not
* be used to protect the header.
*/
if (!HDR_EMPTY(hdr))
buf_discard_identity(hdr);
if (HDR_HAS_L1HDR(hdr)) {
arc_cksum_free(hdr);
while (hdr->b_l1hdr.b_buf != NULL)
arc_buf_destroy_impl(hdr->b_l1hdr.b_buf);
if (hdr->b_l1hdr.b_pabd != NULL)
arc_hdr_free_abd(hdr, B_FALSE);
if (HDR_HAS_RABD(hdr))
arc_hdr_free_abd(hdr, B_TRUE);
}
ASSERT3P(hdr->b_hash_next, ==, NULL);
if (HDR_HAS_L1HDR(hdr)) {
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
if (!HDR_PROTECTED(hdr)) {
kmem_cache_free(hdr_full_cache, hdr);
} else {
kmem_cache_free(hdr_full_crypt_cache, hdr);
}
} else {
kmem_cache_free(hdr_l2only_cache, hdr);
}
}
void
arc_buf_destroy(arc_buf_t *buf, void* tag)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
if (hdr->b_l1hdr.b_state == arc_anon) {
ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1);
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
VERIFY0(remove_reference(hdr, NULL, tag));
arc_hdr_destroy(hdr);
return;
}
kmutex_t *hash_lock = HDR_LOCK(hdr);
mutex_enter(hash_lock);
ASSERT3P(hdr, ==, buf->b_hdr);
ASSERT(hdr->b_l1hdr.b_bufcnt > 0);
ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
ASSERT3P(hdr->b_l1hdr.b_state, !=, arc_anon);
ASSERT3P(buf->b_data, !=, NULL);
(void) remove_reference(hdr, hash_lock, tag);
arc_buf_destroy_impl(buf);
mutex_exit(hash_lock);
}
/*
* Evict the arc_buf_hdr that is provided as a parameter. The resultant
* state of the header is dependent on its state prior to entering this
* function. The following transitions are possible:
*
* - arc_mru -> arc_mru_ghost
* - arc_mfu -> arc_mfu_ghost
* - arc_mru_ghost -> arc_l2c_only
* - arc_mru_ghost -> deleted
* - arc_mfu_ghost -> arc_l2c_only
* - arc_mfu_ghost -> deleted
*
* Return total size of evicted data buffers for eviction progress tracking.
* When evicting from ghost states return logical buffer size to make eviction
* progress at the same (or at least comparable) rate as from non-ghost states.
*
* Return *real_evicted for actual ARC size reduction to wake up threads
* waiting for it. For non-ghost states it includes size of evicted data
* buffers (the headers are not freed there). For ghost states it includes
* only the evicted headers size.
*/
static int64_t
arc_evict_hdr(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, uint64_t *real_evicted)
{
arc_state_t *evicted_state, *state;
int64_t bytes_evicted = 0;
int min_lifetime = HDR_PRESCIENT_PREFETCH(hdr) ?
arc_min_prescient_prefetch_ms : arc_min_prefetch_ms;
ASSERT(MUTEX_HELD(hash_lock));
ASSERT(HDR_HAS_L1HDR(hdr));
*real_evicted = 0;
state = hdr->b_l1hdr.b_state;
if (GHOST_STATE(state)) {
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
/*
* l2arc_write_buffers() relies on a header's L1 portion
* (i.e. its b_pabd field) during it's write phase.
* Thus, we cannot push a header onto the arc_l2c_only
* state (removing its L1 piece) until the header is
* done being written to the l2arc.
*/
if (HDR_HAS_L2HDR(hdr) && HDR_L2_WRITING(hdr)) {
ARCSTAT_BUMP(arcstat_evict_l2_skip);
return (bytes_evicted);
}
ARCSTAT_BUMP(arcstat_deleted);
bytes_evicted += HDR_GET_LSIZE(hdr);
DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, hdr);
if (HDR_HAS_L2HDR(hdr)) {
ASSERT(hdr->b_l1hdr.b_pabd == NULL);
ASSERT(!HDR_HAS_RABD(hdr));
/*
* This buffer is cached on the 2nd Level ARC;
* don't destroy the header.
*/
arc_change_state(arc_l2c_only, hdr, hash_lock);
/*
* dropping from L1+L2 cached to L2-only,
* realloc to remove the L1 header.
*/
hdr = arc_hdr_realloc(hdr, hdr_full_cache,
hdr_l2only_cache);
*real_evicted += HDR_FULL_SIZE - HDR_L2ONLY_SIZE;
} else {
arc_change_state(arc_anon, hdr, hash_lock);
arc_hdr_destroy(hdr);
*real_evicted += HDR_FULL_SIZE;
}
return (bytes_evicted);
}
ASSERT(state == arc_mru || state == arc_mfu);
evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
/* prefetch buffers have a minimum lifespan */
if (HDR_IO_IN_PROGRESS(hdr) ||
((hdr->b_flags & (ARC_FLAG_PREFETCH | ARC_FLAG_INDIRECT)) &&
ddi_get_lbolt() - hdr->b_l1hdr.b_arc_access <
MSEC_TO_TICK(min_lifetime))) {
ARCSTAT_BUMP(arcstat_evict_skip);
return (bytes_evicted);
}
ASSERT0(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt));
while (hdr->b_l1hdr.b_buf) {
arc_buf_t *buf = hdr->b_l1hdr.b_buf;
if (!mutex_tryenter(&buf->b_evict_lock)) {
ARCSTAT_BUMP(arcstat_mutex_miss);
break;
}
if (buf->b_data != NULL) {
bytes_evicted += HDR_GET_LSIZE(hdr);
*real_evicted += HDR_GET_LSIZE(hdr);
}
mutex_exit(&buf->b_evict_lock);
arc_buf_destroy_impl(buf);
}
if (HDR_HAS_L2HDR(hdr)) {
ARCSTAT_INCR(arcstat_evict_l2_cached, HDR_GET_LSIZE(hdr));
} else {
if (l2arc_write_eligible(hdr->b_spa, hdr)) {
ARCSTAT_INCR(arcstat_evict_l2_eligible,
HDR_GET_LSIZE(hdr));
switch (state->arcs_state) {
case ARC_STATE_MRU:
ARCSTAT_INCR(
arcstat_evict_l2_eligible_mru,
HDR_GET_LSIZE(hdr));
break;
case ARC_STATE_MFU:
ARCSTAT_INCR(
arcstat_evict_l2_eligible_mfu,
HDR_GET_LSIZE(hdr));
break;
default:
break;
}
} else {
ARCSTAT_INCR(arcstat_evict_l2_ineligible,
HDR_GET_LSIZE(hdr));
}
}
if (hdr->b_l1hdr.b_bufcnt == 0) {
arc_cksum_free(hdr);
bytes_evicted += arc_hdr_size(hdr);
*real_evicted += arc_hdr_size(hdr);
/*
* If this hdr is being evicted and has a compressed
* buffer then we discard it here before we change states.
* This ensures that the accounting is updated correctly
* in arc_free_data_impl().
*/
if (hdr->b_l1hdr.b_pabd != NULL)
arc_hdr_free_abd(hdr, B_FALSE);
if (HDR_HAS_RABD(hdr))
arc_hdr_free_abd(hdr, B_TRUE);
arc_change_state(evicted_state, hdr, hash_lock);
ASSERT(HDR_IN_HASH_TABLE(hdr));
arc_hdr_set_flags(hdr, ARC_FLAG_IN_HASH_TABLE);
DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, hdr);
}
return (bytes_evicted);
}
static void
arc_set_need_free(void)
{
ASSERT(MUTEX_HELD(&arc_evict_lock));
int64_t remaining = arc_free_memory() - arc_sys_free / 2;
arc_evict_waiter_t *aw = list_tail(&arc_evict_waiters);
if (aw == NULL) {
arc_need_free = MAX(-remaining, 0);
} else {
arc_need_free =
MAX(-remaining, (int64_t)(aw->aew_count - arc_evict_count));
}
}
static uint64_t
arc_evict_state_impl(multilist_t *ml, int idx, arc_buf_hdr_t *marker,
uint64_t spa, uint64_t bytes)
{
multilist_sublist_t *mls;
uint64_t bytes_evicted = 0, real_evicted = 0;
arc_buf_hdr_t *hdr;
kmutex_t *hash_lock;
int evict_count = zfs_arc_evict_batch_limit;
ASSERT3P(marker, !=, NULL);
mls = multilist_sublist_lock(ml, idx);
for (hdr = multilist_sublist_prev(mls, marker); likely(hdr != NULL);
hdr = multilist_sublist_prev(mls, marker)) {
if ((evict_count <= 0) || (bytes_evicted >= bytes))
break;
/*
* To keep our iteration location, move the marker
* forward. Since we're not holding hdr's hash lock, we
* must be very careful and not remove 'hdr' from the
* sublist. Otherwise, other consumers might mistake the
* 'hdr' as not being on a sublist when they call the
* multilist_link_active() function (they all rely on
* the hash lock protecting concurrent insertions and
* removals). multilist_sublist_move_forward() was
* specifically implemented to ensure this is the case
* (only 'marker' will be removed and re-inserted).
*/
multilist_sublist_move_forward(mls, marker);
/*
* The only case where the b_spa field should ever be
* zero, is the marker headers inserted by
* arc_evict_state(). It's possible for multiple threads
* to be calling arc_evict_state() concurrently (e.g.
* dsl_pool_close() and zio_inject_fault()), so we must
* skip any markers we see from these other threads.
*/
if (hdr->b_spa == 0)
continue;
/* we're only interested in evicting buffers of a certain spa */
if (spa != 0 && hdr->b_spa != spa) {
ARCSTAT_BUMP(arcstat_evict_skip);
continue;
}
hash_lock = HDR_LOCK(hdr);
/*
* We aren't calling this function from any code path
* that would already be holding a hash lock, so we're
* asserting on this assumption to be defensive in case
* this ever changes. Without this check, it would be
* possible to incorrectly increment arcstat_mutex_miss
* below (e.g. if the code changed such that we called
* this function with a hash lock held).
*/
ASSERT(!MUTEX_HELD(hash_lock));
if (mutex_tryenter(hash_lock)) {
uint64_t revicted;
uint64_t evicted = arc_evict_hdr(hdr, hash_lock,
&revicted);
mutex_exit(hash_lock);
bytes_evicted += evicted;
real_evicted += revicted;
/*
* If evicted is zero, arc_evict_hdr() must have
* decided to skip this header, don't increment
* evict_count in this case.
*/
if (evicted != 0)
evict_count--;
} else {
ARCSTAT_BUMP(arcstat_mutex_miss);
}
}
multilist_sublist_unlock(mls);
/*
* Increment the count of evicted bytes, and wake up any threads that
* are waiting for the count to reach this value. Since the list is
* ordered by ascending aew_count, we pop off the beginning of the
* list until we reach the end, or a waiter that's past the current
* "count". Doing this outside the loop reduces the number of times
* we need to acquire the global arc_evict_lock.
*
* Only wake when there's sufficient free memory in the system
* (specifically, arc_sys_free/2, which by default is a bit more than
* 1/64th of RAM). See the comments in arc_wait_for_eviction().
*/
mutex_enter(&arc_evict_lock);
arc_evict_count += real_evicted;
if (arc_free_memory() > arc_sys_free / 2) {
arc_evict_waiter_t *aw;
while ((aw = list_head(&arc_evict_waiters)) != NULL &&
aw->aew_count <= arc_evict_count) {
list_remove(&arc_evict_waiters, aw);
cv_broadcast(&aw->aew_cv);
}
}
arc_set_need_free();
mutex_exit(&arc_evict_lock);
/*
* If the ARC size is reduced from arc_c_max to arc_c_min (especially
* if the average cached block is small), eviction can be on-CPU for
* many seconds. To ensure that other threads that may be bound to
* this CPU are able to make progress, make a voluntary preemption
* call here.
*/
cond_resched();
return (bytes_evicted);
}
/*
* Evict buffers from the given arc state, until we've removed the
* specified number of bytes. Move the removed buffers to the
* appropriate evict state.
*
* This function makes a "best effort". It skips over any buffers
* it can't get a hash_lock on, and so, may not catch all candidates.
* It may also return without evicting as much space as requested.
*
* If bytes is specified using the special value ARC_EVICT_ALL, this
* will evict all available (i.e. unlocked and evictable) buffers from
* the given arc state; which is used by arc_flush().
*/
static uint64_t
arc_evict_state(arc_state_t *state, uint64_t spa, uint64_t bytes,
arc_buf_contents_t type)
{
uint64_t total_evicted = 0;
multilist_t *ml = &state->arcs_list[type];
int num_sublists;
arc_buf_hdr_t **markers;
num_sublists = multilist_get_num_sublists(ml);
/*
* If we've tried to evict from each sublist, made some
* progress, but still have not hit the target number of bytes
* to evict, we want to keep trying. The markers allow us to
* pick up where we left off for each individual sublist, rather
* than starting from the tail each time.
*/
markers = kmem_zalloc(sizeof (*markers) * num_sublists, KM_SLEEP);
for (int i = 0; i < num_sublists; i++) {
multilist_sublist_t *mls;
markers[i] = kmem_cache_alloc(hdr_full_cache, KM_SLEEP);
/*
* A b_spa of 0 is used to indicate that this header is
* a marker. This fact is used in arc_evict_type() and
* arc_evict_state_impl().
*/
markers[i]->b_spa = 0;
mls = multilist_sublist_lock(ml, i);
multilist_sublist_insert_tail(mls, markers[i]);
multilist_sublist_unlock(mls);
}
/*
* While we haven't hit our target number of bytes to evict, or
* we're evicting all available buffers.
*/
while (total_evicted < bytes) {
int sublist_idx = multilist_get_random_index(ml);
uint64_t scan_evicted = 0;
/*
* Try to reduce pinned dnodes with a floor of arc_dnode_limit.
* Request that 10% of the LRUs be scanned by the superblock
* shrinker.
*/
if (type == ARC_BUFC_DATA && aggsum_compare(
&arc_sums.arcstat_dnode_size, arc_dnode_size_limit) > 0) {
arc_prune_async((aggsum_upper_bound(
&arc_sums.arcstat_dnode_size) -
arc_dnode_size_limit) / sizeof (dnode_t) /
zfs_arc_dnode_reduce_percent);
}
/*
* Start eviction using a randomly selected sublist,
* this is to try and evenly balance eviction across all
* sublists. Always starting at the same sublist
* (e.g. index 0) would cause evictions to favor certain
* sublists over others.
*/
for (int i = 0; i < num_sublists; i++) {
uint64_t bytes_remaining;
uint64_t bytes_evicted;
if (total_evicted < bytes)
bytes_remaining = bytes - total_evicted;
else
break;
bytes_evicted = arc_evict_state_impl(ml, sublist_idx,
markers[sublist_idx], spa, bytes_remaining);
scan_evicted += bytes_evicted;
total_evicted += bytes_evicted;
/* we've reached the end, wrap to the beginning */
if (++sublist_idx >= num_sublists)
sublist_idx = 0;
}
/*
* If we didn't evict anything during this scan, we have
* no reason to believe we'll evict more during another
* scan, so break the loop.
*/
if (scan_evicted == 0) {
/* This isn't possible, let's make that obvious */
ASSERT3S(bytes, !=, 0);
/*
* When bytes is ARC_EVICT_ALL, the only way to
* break the loop is when scan_evicted is zero.
* In that case, we actually have evicted enough,
* so we don't want to increment the kstat.
*/
if (bytes != ARC_EVICT_ALL) {
ASSERT3S(total_evicted, <, bytes);
ARCSTAT_BUMP(arcstat_evict_not_enough);
}
break;
}
}
for (int i = 0; i < num_sublists; i++) {
multilist_sublist_t *mls = multilist_sublist_lock(ml, i);
multilist_sublist_remove(mls, markers[i]);
multilist_sublist_unlock(mls);
kmem_cache_free(hdr_full_cache, markers[i]);
}
kmem_free(markers, sizeof (*markers) * num_sublists);
return (total_evicted);
}
/*
* Flush all "evictable" data of the given type from the arc state
* specified. This will not evict any "active" buffers (i.e. referenced).
*
* When 'retry' is set to B_FALSE, the function will make a single pass
* over the state and evict any buffers that it can. Since it doesn't
* continually retry the eviction, it might end up leaving some buffers
* in the ARC due to lock misses.
*
* When 'retry' is set to B_TRUE, the function will continually retry the
* eviction until *all* evictable buffers have been removed from the
* state. As a result, if concurrent insertions into the state are
* allowed (e.g. if the ARC isn't shutting down), this function might
* wind up in an infinite loop, continually trying to evict buffers.
*/
static uint64_t
arc_flush_state(arc_state_t *state, uint64_t spa, arc_buf_contents_t type,
boolean_t retry)
{
uint64_t evicted = 0;
while (zfs_refcount_count(&state->arcs_esize[type]) != 0) {
evicted += arc_evict_state(state, spa, ARC_EVICT_ALL, type);
if (!retry)
break;
}
return (evicted);
}
/*
* Evict the specified number of bytes from the state specified,
* restricting eviction to the spa and type given. This function
* prevents us from trying to evict more from a state's list than
* is "evictable", and to skip evicting altogether when passed a
* negative value for "bytes". In contrast, arc_evict_state() will
* evict everything it can, when passed a negative value for "bytes".
*/
static uint64_t
arc_evict_impl(arc_state_t *state, uint64_t spa, int64_t bytes,
arc_buf_contents_t type)
{
uint64_t delta;
if (bytes > 0 && zfs_refcount_count(&state->arcs_esize[type]) > 0) {
delta = MIN(zfs_refcount_count(&state->arcs_esize[type]),
bytes);
return (arc_evict_state(state, spa, delta, type));
}
return (0);
}
/*
* The goal of this function is to evict enough meta data buffers from the
* ARC in order to enforce the arc_meta_limit. Achieving this is slightly
* more complicated than it appears because it is common for data buffers
* to have holds on meta data buffers. In addition, dnode meta data buffers
* will be held by the dnodes in the block preventing them from being freed.
* This means we can't simply traverse the ARC and expect to always find
* enough unheld meta data buffer to release.
*
* Therefore, this function has been updated to make alternating passes
* over the ARC releasing data buffers and then newly unheld meta data
* buffers. This ensures forward progress is maintained and meta_used
* will decrease. Normally this is sufficient, but if required the ARC
* will call the registered prune callbacks causing dentry and inodes to
* be dropped from the VFS cache. This will make dnode meta data buffers
* available for reclaim.
*/
static uint64_t
arc_evict_meta_balanced(uint64_t meta_used)
{
int64_t delta, prune = 0, adjustmnt;
uint64_t total_evicted = 0;
arc_buf_contents_t type = ARC_BUFC_DATA;
int restarts = MAX(zfs_arc_meta_adjust_restarts, 0);
restart:
/*
* This slightly differs than the way we evict from the mru in
* arc_evict because we don't have a "target" value (i.e. no
* "meta" arc_p). As a result, I think we can completely
* cannibalize the metadata in the MRU before we evict the
* metadata from the MFU. I think we probably need to implement a
* "metadata arc_p" value to do this properly.
*/
adjustmnt = meta_used - arc_meta_limit;
if (adjustmnt > 0 &&
zfs_refcount_count(&arc_mru->arcs_esize[type]) > 0) {
delta = MIN(zfs_refcount_count(&arc_mru->arcs_esize[type]),
adjustmnt);
total_evicted += arc_evict_impl(arc_mru, 0, delta, type);
adjustmnt -= delta;
}
/*
* We can't afford to recalculate adjustmnt here. If we do,
* new metadata buffers can sneak into the MRU or ANON lists,
* thus penalize the MFU metadata. Although the fudge factor is
* small, it has been empirically shown to be significant for
* certain workloads (e.g. creating many empty directories). As
* such, we use the original calculation for adjustmnt, and
* simply decrement the amount of data evicted from the MRU.
*/
if (adjustmnt > 0 &&
zfs_refcount_count(&arc_mfu->arcs_esize[type]) > 0) {
delta = MIN(zfs_refcount_count(&arc_mfu->arcs_esize[type]),
adjustmnt);
total_evicted += arc_evict_impl(arc_mfu, 0, delta, type);
}
adjustmnt = meta_used - arc_meta_limit;
if (adjustmnt > 0 &&
zfs_refcount_count(&arc_mru_ghost->arcs_esize[type]) > 0) {
delta = MIN(adjustmnt,
zfs_refcount_count(&arc_mru_ghost->arcs_esize[type]));
total_evicted += arc_evict_impl(arc_mru_ghost, 0, delta, type);
adjustmnt -= delta;
}
if (adjustmnt > 0 &&
zfs_refcount_count(&arc_mfu_ghost->arcs_esize[type]) > 0) {
delta = MIN(adjustmnt,
zfs_refcount_count(&arc_mfu_ghost->arcs_esize[type]));
total_evicted += arc_evict_impl(arc_mfu_ghost, 0, delta, type);
}
/*
* If after attempting to make the requested adjustment to the ARC
* the meta limit is still being exceeded then request that the
* higher layers drop some cached objects which have holds on ARC
* meta buffers. Requests to the upper layers will be made with
* increasingly large scan sizes until the ARC is below the limit.
*/
if (meta_used > arc_meta_limit) {
if (type == ARC_BUFC_DATA) {
type = ARC_BUFC_METADATA;
} else {
type = ARC_BUFC_DATA;
if (zfs_arc_meta_prune) {
prune += zfs_arc_meta_prune;
arc_prune_async(prune);
}
}
if (restarts > 0) {
restarts--;
goto restart;
}
}
return (total_evicted);
}
/*
* Evict metadata buffers from the cache, such that arcstat_meta_used is
* capped by the arc_meta_limit tunable.
*/
static uint64_t
arc_evict_meta_only(uint64_t meta_used)
{
uint64_t total_evicted = 0;
int64_t target;
/*
* If we're over the meta limit, we want to evict enough
* metadata to get back under the meta limit. We don't want to
* evict so much that we drop the MRU below arc_p, though. If
* we're over the meta limit more than we're over arc_p, we
* evict some from the MRU here, and some from the MFU below.
*/
target = MIN((int64_t)(meta_used - arc_meta_limit),
(int64_t)(zfs_refcount_count(&arc_anon->arcs_size) +
zfs_refcount_count(&arc_mru->arcs_size) - arc_p));
total_evicted += arc_evict_impl(arc_mru, 0, target, ARC_BUFC_METADATA);
/*
* Similar to the above, we want to evict enough bytes to get us
* below the meta limit, but not so much as to drop us below the
* space allotted to the MFU (which is defined as arc_c - arc_p).
*/
target = MIN((int64_t)(meta_used - arc_meta_limit),
(int64_t)(zfs_refcount_count(&arc_mfu->arcs_size) -
(arc_c - arc_p)));
total_evicted += arc_evict_impl(arc_mfu, 0, target, ARC_BUFC_METADATA);
return (total_evicted);
}
static uint64_t
arc_evict_meta(uint64_t meta_used)
{
if (zfs_arc_meta_strategy == ARC_STRATEGY_META_ONLY)
return (arc_evict_meta_only(meta_used));
else
return (arc_evict_meta_balanced(meta_used));
}
/*
* Return the type of the oldest buffer in the given arc state
*
* This function will select a random sublist of type ARC_BUFC_DATA and
* a random sublist of type ARC_BUFC_METADATA. The tail of each sublist
* is compared, and the type which contains the "older" buffer will be
* returned.
*/
static arc_buf_contents_t
arc_evict_type(arc_state_t *state)
{
multilist_t *data_ml = &state->arcs_list[ARC_BUFC_DATA];
multilist_t *meta_ml = &state->arcs_list[ARC_BUFC_METADATA];
int data_idx = multilist_get_random_index(data_ml);
int meta_idx = multilist_get_random_index(meta_ml);
multilist_sublist_t *data_mls;
multilist_sublist_t *meta_mls;
arc_buf_contents_t type;
arc_buf_hdr_t *data_hdr;
arc_buf_hdr_t *meta_hdr;
/*
* We keep the sublist lock until we're finished, to prevent
* the headers from being destroyed via arc_evict_state().
*/
data_mls = multilist_sublist_lock(data_ml, data_idx);
meta_mls = multilist_sublist_lock(meta_ml, meta_idx);
/*
* These two loops are to ensure we skip any markers that
* might be at the tail of the lists due to arc_evict_state().
*/
for (data_hdr = multilist_sublist_tail(data_mls); data_hdr != NULL;
data_hdr = multilist_sublist_prev(data_mls, data_hdr)) {
if (data_hdr->b_spa != 0)
break;
}
for (meta_hdr = multilist_sublist_tail(meta_mls); meta_hdr != NULL;
meta_hdr = multilist_sublist_prev(meta_mls, meta_hdr)) {
if (meta_hdr->b_spa != 0)
break;
}
if (data_hdr == NULL && meta_hdr == NULL) {
type = ARC_BUFC_DATA;
} else if (data_hdr == NULL) {
ASSERT3P(meta_hdr, !=, NULL);
type = ARC_BUFC_METADATA;
} else if (meta_hdr == NULL) {
ASSERT3P(data_hdr, !=, NULL);
type = ARC_BUFC_DATA;
} else {
ASSERT3P(data_hdr, !=, NULL);
ASSERT3P(meta_hdr, !=, NULL);
/* The headers can't be on the sublist without an L1 header */
ASSERT(HDR_HAS_L1HDR(data_hdr));
ASSERT(HDR_HAS_L1HDR(meta_hdr));
if (data_hdr->b_l1hdr.b_arc_access <
meta_hdr->b_l1hdr.b_arc_access) {
type = ARC_BUFC_DATA;
} else {
type = ARC_BUFC_METADATA;
}
}
multilist_sublist_unlock(meta_mls);
multilist_sublist_unlock(data_mls);
return (type);
}
/*
* Evict buffers from the cache, such that arcstat_size is capped by arc_c.
*/
static uint64_t
arc_evict(void)
{
uint64_t total_evicted = 0;
uint64_t bytes;
int64_t target;
uint64_t asize = aggsum_value(&arc_sums.arcstat_size);
uint64_t ameta = aggsum_value(&arc_sums.arcstat_meta_used);
/*
* If we're over arc_meta_limit, we want to correct that before
* potentially evicting data buffers below.
*/
total_evicted += arc_evict_meta(ameta);
/*
* Adjust MRU size
*
* If we're over the target cache size, we want to evict enough
* from the list to get back to our target size. We don't want
* to evict too much from the MRU, such that it drops below
* arc_p. So, if we're over our target cache size more than
* the MRU is over arc_p, we'll evict enough to get back to
* arc_p here, and then evict more from the MFU below.
*/
target = MIN((int64_t)(asize - arc_c),
(int64_t)(zfs_refcount_count(&arc_anon->arcs_size) +
zfs_refcount_count(&arc_mru->arcs_size) + ameta - arc_p));
/*
* If we're below arc_meta_min, always prefer to evict data.
* Otherwise, try to satisfy the requested number of bytes to
* evict from the type which contains older buffers; in an
* effort to keep newer buffers in the cache regardless of their
* type. If we cannot satisfy the number of bytes from this
* type, spill over into the next type.
*/
if (arc_evict_type(arc_mru) == ARC_BUFC_METADATA &&
ameta > arc_meta_min) {
bytes = arc_evict_impl(arc_mru, 0, target, ARC_BUFC_METADATA);
total_evicted += bytes;
/*
* If we couldn't evict our target number of bytes from
* metadata, we try to get the rest from data.
*/
target -= bytes;
total_evicted +=
arc_evict_impl(arc_mru, 0, target, ARC_BUFC_DATA);
} else {
bytes = arc_evict_impl(arc_mru, 0, target, ARC_BUFC_DATA);
total_evicted += bytes;
/*
* If we couldn't evict our target number of bytes from
* data, we try to get the rest from metadata.
*/
target -= bytes;
total_evicted +=
arc_evict_impl(arc_mru, 0, target, ARC_BUFC_METADATA);
}
/*
* Re-sum ARC stats after the first round of evictions.
*/
asize = aggsum_value(&arc_sums.arcstat_size);
ameta = aggsum_value(&arc_sums.arcstat_meta_used);
/*
* Adjust MFU size
*
* Now that we've tried to evict enough from the MRU to get its
* size back to arc_p, if we're still above the target cache
* size, we evict the rest from the MFU.
*/
target = asize - arc_c;
if (arc_evict_type(arc_mfu) == ARC_BUFC_METADATA &&
ameta > arc_meta_min) {
bytes = arc_evict_impl(arc_mfu, 0, target, ARC_BUFC_METADATA);
total_evicted += bytes;
/*
* If we couldn't evict our target number of bytes from
* metadata, we try to get the rest from data.
*/
target -= bytes;
total_evicted +=
arc_evict_impl(arc_mfu, 0, target, ARC_BUFC_DATA);
} else {
bytes = arc_evict_impl(arc_mfu, 0, target, ARC_BUFC_DATA);
total_evicted += bytes;
/*
* If we couldn't evict our target number of bytes from
* data, we try to get the rest from data.
*/
target -= bytes;
total_evicted +=
arc_evict_impl(arc_mfu, 0, target, ARC_BUFC_METADATA);
}
/*
* Adjust ghost lists
*
* In addition to the above, the ARC also defines target values
* for the ghost lists. The sum of the mru list and mru ghost
* list should never exceed the target size of the cache, and
* the sum of the mru list, mfu list, mru ghost list, and mfu
* ghost list should never exceed twice the target size of the
* cache. The following logic enforces these limits on the ghost
* caches, and evicts from them as needed.
*/
target = zfs_refcount_count(&arc_mru->arcs_size) +
zfs_refcount_count(&arc_mru_ghost->arcs_size) - arc_c;
bytes = arc_evict_impl(arc_mru_ghost, 0, target, ARC_BUFC_DATA);
total_evicted += bytes;
target -= bytes;
total_evicted +=
arc_evict_impl(arc_mru_ghost, 0, target, ARC_BUFC_METADATA);
/*
* We assume the sum of the mru list and mfu list is less than
* or equal to arc_c (we enforced this above), which means we
* can use the simpler of the two equations below:
*
* mru + mfu + mru ghost + mfu ghost <= 2 * arc_c
* mru ghost + mfu ghost <= arc_c
*/
target = zfs_refcount_count(&arc_mru_ghost->arcs_size) +
zfs_refcount_count(&arc_mfu_ghost->arcs_size) - arc_c;
bytes = arc_evict_impl(arc_mfu_ghost, 0, target, ARC_BUFC_DATA);
total_evicted += bytes;
target -= bytes;
total_evicted +=
arc_evict_impl(arc_mfu_ghost, 0, target, ARC_BUFC_METADATA);
return (total_evicted);
}
void
arc_flush(spa_t *spa, boolean_t retry)
{
uint64_t guid = 0;
/*
* If retry is B_TRUE, a spa must not be specified since we have
* no good way to determine if all of a spa's buffers have been
* evicted from an arc state.
*/
ASSERT(!retry || spa == 0);
if (spa != NULL)
guid = spa_load_guid(spa);
(void) arc_flush_state(arc_mru, guid, ARC_BUFC_DATA, retry);
(void) arc_flush_state(arc_mru, guid, ARC_BUFC_METADATA, retry);
(void) arc_flush_state(arc_mfu, guid, ARC_BUFC_DATA, retry);
(void) arc_flush_state(arc_mfu, guid, ARC_BUFC_METADATA, retry);
(void) arc_flush_state(arc_mru_ghost, guid, ARC_BUFC_DATA, retry);
(void) arc_flush_state(arc_mru_ghost, guid, ARC_BUFC_METADATA, retry);
(void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_DATA, retry);
(void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_METADATA, retry);
}
void
arc_reduce_target_size(int64_t to_free)
{
uint64_t asize = aggsum_value(&arc_sums.arcstat_size);
/*
* All callers want the ARC to actually evict (at least) this much
* memory. Therefore we reduce from the lower of the current size and
* the target size. This way, even if arc_c is much higher than
* arc_size (as can be the case after many calls to arc_freed(), we will
* immediately have arc_c < arc_size and therefore the arc_evict_zthr
* will evict.
*/
uint64_t c = MIN(arc_c, asize);
if (c > to_free && c - to_free > arc_c_min) {
arc_c = c - to_free;
atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift));
if (arc_p > arc_c)
arc_p = (arc_c >> 1);
ASSERT(arc_c >= arc_c_min);
ASSERT((int64_t)arc_p >= 0);
} else {
arc_c = arc_c_min;
}
if (asize > arc_c) {
/* See comment in arc_evict_cb_check() on why lock+flag */
mutex_enter(&arc_evict_lock);
arc_evict_needed = B_TRUE;
mutex_exit(&arc_evict_lock);
zthr_wakeup(arc_evict_zthr);
}
}
/*
* Determine if the system is under memory pressure and is asking
* to reclaim memory. A return value of B_TRUE indicates that the system
* is under memory pressure and that the arc should adjust accordingly.
*/
boolean_t
arc_reclaim_needed(void)
{
return (arc_available_memory() < 0);
}
void
arc_kmem_reap_soon(void)
{
size_t i;
kmem_cache_t *prev_cache = NULL;
kmem_cache_t *prev_data_cache = NULL;
extern kmem_cache_t *zio_buf_cache[];
extern kmem_cache_t *zio_data_buf_cache[];
#ifdef _KERNEL
if ((aggsum_compare(&arc_sums.arcstat_meta_used,
arc_meta_limit) >= 0) && zfs_arc_meta_prune) {
/*
* We are exceeding our meta-data cache limit.
* Prune some entries to release holds on meta-data.
*/
arc_prune_async(zfs_arc_meta_prune);
}
#if defined(_ILP32)
/*
* Reclaim unused memory from all kmem caches.
*/
kmem_reap();
#endif
#endif
for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
#if defined(_ILP32)
/* reach upper limit of cache size on 32-bit */
if (zio_buf_cache[i] == NULL)
break;
#endif
if (zio_buf_cache[i] != prev_cache) {
prev_cache = zio_buf_cache[i];
kmem_cache_reap_now(zio_buf_cache[i]);
}
if (zio_data_buf_cache[i] != prev_data_cache) {
prev_data_cache = zio_data_buf_cache[i];
kmem_cache_reap_now(zio_data_buf_cache[i]);
}
}
kmem_cache_reap_now(buf_cache);
kmem_cache_reap_now(hdr_full_cache);
kmem_cache_reap_now(hdr_l2only_cache);
kmem_cache_reap_now(zfs_btree_leaf_cache);
abd_cache_reap_now();
}
/* ARGSUSED */
static boolean_t
arc_evict_cb_check(void *arg, zthr_t *zthr)
{
#ifdef ZFS_DEBUG
/*
* This is necessary in order to keep the kstat information
* up to date for tools that display kstat data such as the
* mdb ::arc dcmd and the Linux crash utility. These tools
* typically do not call kstat's update function, but simply
* dump out stats from the most recent update. Without
* this call, these commands may show stale stats for the
* anon, mru, mru_ghost, mfu, and mfu_ghost lists. Even
* with this call, the data might be out of date if the
* evict thread hasn't been woken recently; but that should
* suffice. The arc_state_t structures can be queried
* directly if more accurate information is needed.
*/
if (arc_ksp != NULL)
arc_ksp->ks_update(arc_ksp, KSTAT_READ);
#endif
/*
* We have to rely on arc_wait_for_eviction() to tell us when to
* evict, rather than checking if we are overflowing here, so that we
* are sure to not leave arc_wait_for_eviction() waiting on aew_cv.
* If we have become "not overflowing" since arc_wait_for_eviction()
* checked, we need to wake it up. We could broadcast the CV here,
* but arc_wait_for_eviction() may have not yet gone to sleep. We
* would need to use a mutex to ensure that this function doesn't
* broadcast until arc_wait_for_eviction() has gone to sleep (e.g.
* the arc_evict_lock). However, the lock ordering of such a lock
* would necessarily be incorrect with respect to the zthr_lock,
* which is held before this function is called, and is held by
* arc_wait_for_eviction() when it calls zthr_wakeup().
*/
return (arc_evict_needed);
}
/*
* Keep arc_size under arc_c by running arc_evict which evicts data
* from the ARC.
*/
/* ARGSUSED */
static void
arc_evict_cb(void *arg, zthr_t *zthr)
{
uint64_t evicted = 0;
fstrans_cookie_t cookie = spl_fstrans_mark();
/* Evict from cache */
evicted = arc_evict();
/*
* If evicted is zero, we couldn't evict anything
* via arc_evict(). This could be due to hash lock
* collisions, but more likely due to the majority of
* arc buffers being unevictable. Therefore, even if
* arc_size is above arc_c, another pass is unlikely to
* be helpful and could potentially cause us to enter an
* infinite loop. Additionally, zthr_iscancelled() is
* checked here so that if the arc is shutting down, the
* broadcast will wake any remaining arc evict waiters.
*/
mutex_enter(&arc_evict_lock);
arc_evict_needed = !zthr_iscancelled(arc_evict_zthr) &&
evicted > 0 && aggsum_compare(&arc_sums.arcstat_size, arc_c) > 0;
if (!arc_evict_needed) {
/*
* We're either no longer overflowing, or we
* can't evict anything more, so we should wake
* arc_get_data_impl() sooner.
*/
arc_evict_waiter_t *aw;
while ((aw = list_remove_head(&arc_evict_waiters)) != NULL) {
cv_broadcast(&aw->aew_cv);
}
arc_set_need_free();
}
mutex_exit(&arc_evict_lock);
spl_fstrans_unmark(cookie);
}
/* ARGSUSED */
static boolean_t
arc_reap_cb_check(void *arg, zthr_t *zthr)
{
int64_t free_memory = arc_available_memory();
static int reap_cb_check_counter = 0;
/*
* If a kmem reap is already active, don't schedule more. We must
* check for this because kmem_cache_reap_soon() won't actually
* block on the cache being reaped (this is to prevent callers from
* becoming implicitly blocked by a system-wide kmem reap -- which,
* on a system with many, many full magazines, can take minutes).
*/
if (!kmem_cache_reap_active() && free_memory < 0) {
arc_no_grow = B_TRUE;
arc_warm = B_TRUE;
/*
* Wait at least zfs_grow_retry (default 5) seconds
* before considering growing.
*/
arc_growtime = gethrtime() + SEC2NSEC(arc_grow_retry);
return (B_TRUE);
} else if (free_memory < arc_c >> arc_no_grow_shift) {
arc_no_grow = B_TRUE;
} else if (gethrtime() >= arc_growtime) {
arc_no_grow = B_FALSE;
}
/*
* Called unconditionally every 60 seconds to reclaim unused
* zstd compression and decompression context. This is done
* here to avoid the need for an independent thread.
*/
if (!((reap_cb_check_counter++) % 60))
zfs_zstd_cache_reap_now();
return (B_FALSE);
}
/*
* Keep enough free memory in the system by reaping the ARC's kmem
* caches. To cause more slabs to be reapable, we may reduce the
* target size of the cache (arc_c), causing the arc_evict_cb()
* to free more buffers.
*/
/* ARGSUSED */
static void
arc_reap_cb(void *arg, zthr_t *zthr)
{
int64_t free_memory;
fstrans_cookie_t cookie = spl_fstrans_mark();
/*
* Kick off asynchronous kmem_reap()'s of all our caches.
*/
arc_kmem_reap_soon();
/*
* Wait at least arc_kmem_cache_reap_retry_ms between
* arc_kmem_reap_soon() calls. Without this check it is possible to
* end up in a situation where we spend lots of time reaping
* caches, while we're near arc_c_min. Waiting here also gives the
* subsequent free memory check a chance of finding that the
* asynchronous reap has already freed enough memory, and we don't
* need to call arc_reduce_target_size().
*/
delay((hz * arc_kmem_cache_reap_retry_ms + 999) / 1000);
/*
* Reduce the target size as needed to maintain the amount of free
* memory in the system at a fraction of the arc_size (1/128th by
* default). If oversubscribed (free_memory < 0) then reduce the
* target arc_size by the deficit amount plus the fractional
* amount. If free memory is positive but less than the fractional
* amount, reduce by what is needed to hit the fractional amount.
*/
free_memory = arc_available_memory();
int64_t to_free =
(arc_c >> arc_shrink_shift) - free_memory;
if (to_free > 0) {
arc_reduce_target_size(to_free);
}
spl_fstrans_unmark(cookie);
}
#ifdef _KERNEL
/*
* Determine the amount of memory eligible for eviction contained in the
* ARC. All clean data reported by the ghost lists can always be safely
* evicted. Due to arc_c_min, the same does not hold for all clean data
* contained by the regular mru and mfu lists.
*
* In the case of the regular mru and mfu lists, we need to report as
* much clean data as possible, such that evicting that same reported
* data will not bring arc_size below arc_c_min. Thus, in certain
* circumstances, the total amount of clean data in the mru and mfu
* lists might not actually be evictable.
*
* The following two distinct cases are accounted for:
*
* 1. The sum of the amount of dirty data contained by both the mru and
* mfu lists, plus the ARC's other accounting (e.g. the anon list),
* is greater than or equal to arc_c_min.
* (i.e. amount of dirty data >= arc_c_min)
*
* This is the easy case; all clean data contained by the mru and mfu
* lists is evictable. Evicting all clean data can only drop arc_size
* to the amount of dirty data, which is greater than arc_c_min.
*
* 2. The sum of the amount of dirty data contained by both the mru and
* mfu lists, plus the ARC's other accounting (e.g. the anon list),
* is less than arc_c_min.
* (i.e. arc_c_min > amount of dirty data)
*
* 2.1. arc_size is greater than or equal arc_c_min.
* (i.e. arc_size >= arc_c_min > amount of dirty data)
*
* In this case, not all clean data from the regular mru and mfu
* lists is actually evictable; we must leave enough clean data
* to keep arc_size above arc_c_min. Thus, the maximum amount of
* evictable data from the two lists combined, is exactly the
* difference between arc_size and arc_c_min.
*
* 2.2. arc_size is less than arc_c_min
* (i.e. arc_c_min > arc_size > amount of dirty data)
*
* In this case, none of the data contained in the mru and mfu
* lists is evictable, even if it's clean. Since arc_size is
* already below arc_c_min, evicting any more would only
* increase this negative difference.
*/
#endif /* _KERNEL */
/*
* Adapt arc info given the number of bytes we are trying to add and
* the state that we are coming from. This function is only called
* when we are adding new content to the cache.
*/
static void
arc_adapt(int bytes, arc_state_t *state)
{
int mult;
uint64_t arc_p_min = (arc_c >> arc_p_min_shift);
int64_t mrug_size = zfs_refcount_count(&arc_mru_ghost->arcs_size);
int64_t mfug_size = zfs_refcount_count(&arc_mfu_ghost->arcs_size);
ASSERT(bytes > 0);
/*
* Adapt the target size of the MRU list:
* - if we just hit in the MRU ghost list, then increase
* the target size of the MRU list.
* - if we just hit in the MFU ghost list, then increase
* the target size of the MFU list by decreasing the
* target size of the MRU list.
*/
if (state == arc_mru_ghost) {
mult = (mrug_size >= mfug_size) ? 1 : (mfug_size / mrug_size);
if (!zfs_arc_p_dampener_disable)
mult = MIN(mult, 10); /* avoid wild arc_p adjustment */
arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult);
} else if (state == arc_mfu_ghost) {
uint64_t delta;
mult = (mfug_size >= mrug_size) ? 1 : (mrug_size / mfug_size);
if (!zfs_arc_p_dampener_disable)
mult = MIN(mult, 10);
delta = MIN(bytes * mult, arc_p);
arc_p = MAX(arc_p_min, arc_p - delta);
}
ASSERT((int64_t)arc_p >= 0);
/*
* Wake reap thread if we do not have any available memory
*/
if (arc_reclaim_needed()) {
zthr_wakeup(arc_reap_zthr);
return;
}
if (arc_no_grow)
return;
if (arc_c >= arc_c_max)
return;
/*
* If we're within (2 * maxblocksize) bytes of the target
* cache size, increment the target cache size
*/
ASSERT3U(arc_c, >=, 2ULL << SPA_MAXBLOCKSHIFT);
if (aggsum_upper_bound(&arc_sums.arcstat_size) >=
arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) {
atomic_add_64(&arc_c, (int64_t)bytes);
if (arc_c > arc_c_max)
arc_c = arc_c_max;
else if (state == arc_anon)
atomic_add_64(&arc_p, (int64_t)bytes);
if (arc_p > arc_c)
arc_p = arc_c;
}
ASSERT((int64_t)arc_p >= 0);
}
/*
* Check if arc_size has grown past our upper threshold, determined by
* zfs_arc_overflow_shift.
*/
static arc_ovf_level_t
arc_is_overflowing(void)
{
/* Always allow at least one block of overflow */
int64_t overflow = MAX(SPA_MAXBLOCKSIZE,
arc_c >> zfs_arc_overflow_shift);
/*
* We just compare the lower bound here for performance reasons. Our
* primary goals are to make sure that the arc never grows without
* bound, and that it can reach its maximum size. This check
* accomplishes both goals. The maximum amount we could run over by is
* 2 * aggsum_borrow_multiplier * NUM_CPUS * the average size of a block
* in the ARC. In practice, that's in the tens of MB, which is low
* enough to be safe.
*/
int64_t over = aggsum_lower_bound(&arc_sums.arcstat_size) -
arc_c - overflow / 2;
return (over < 0 ? ARC_OVF_NONE :
over < overflow ? ARC_OVF_SOME : ARC_OVF_SEVERE);
}
static abd_t *
arc_get_data_abd(arc_buf_hdr_t *hdr, uint64_t size, void *tag,
boolean_t do_adapt)
{
arc_buf_contents_t type = arc_buf_type(hdr);
arc_get_data_impl(hdr, size, tag, do_adapt);
if (type == ARC_BUFC_METADATA) {
return (abd_alloc(size, B_TRUE));
} else {
ASSERT(type == ARC_BUFC_DATA);
return (abd_alloc(size, B_FALSE));
}
}
static void *
arc_get_data_buf(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
{
arc_buf_contents_t type = arc_buf_type(hdr);
arc_get_data_impl(hdr, size, tag, B_TRUE);
if (type == ARC_BUFC_METADATA) {
return (zio_buf_alloc(size));
} else {
ASSERT(type == ARC_BUFC_DATA);
return (zio_data_buf_alloc(size));
}
}
/*
* Wait for the specified amount of data (in bytes) to be evicted from the
* ARC, and for there to be sufficient free memory in the system. Waiting for
* eviction ensures that the memory used by the ARC decreases. Waiting for
* free memory ensures that the system won't run out of free pages, regardless
* of ARC behavior and settings. See arc_lowmem_init().
*/
void
arc_wait_for_eviction(uint64_t amount)
{
switch (arc_is_overflowing()) {
case ARC_OVF_NONE:
return;
case ARC_OVF_SOME:
/*
* This is a bit racy without taking arc_evict_lock, but the
* worst that can happen is we either call zthr_wakeup() extra
* time due to race with other thread here, or the set flag
* get cleared by arc_evict_cb(), which is unlikely due to
* big hysteresis, but also not important since at this level
* of overflow the eviction is purely advisory. Same time
* taking the global lock here every time without waiting for
* the actual eviction creates a significant lock contention.
*/
if (!arc_evict_needed) {
arc_evict_needed = B_TRUE;
zthr_wakeup(arc_evict_zthr);
}
return;
case ARC_OVF_SEVERE:
default:
{
arc_evict_waiter_t aw;
list_link_init(&aw.aew_node);
cv_init(&aw.aew_cv, NULL, CV_DEFAULT, NULL);
uint64_t last_count = 0;
mutex_enter(&arc_evict_lock);
if (!list_is_empty(&arc_evict_waiters)) {
arc_evict_waiter_t *last =
list_tail(&arc_evict_waiters);
last_count = last->aew_count;
} else if (!arc_evict_needed) {
arc_evict_needed = B_TRUE;
zthr_wakeup(arc_evict_zthr);
}
/*
* Note, the last waiter's count may be less than
* arc_evict_count if we are low on memory in which
* case arc_evict_state_impl() may have deferred
* wakeups (but still incremented arc_evict_count).
*/
aw.aew_count = MAX(last_count, arc_evict_count) + amount;
list_insert_tail(&arc_evict_waiters, &aw);
arc_set_need_free();
DTRACE_PROBE3(arc__wait__for__eviction,
uint64_t, amount,
uint64_t, arc_evict_count,
uint64_t, aw.aew_count);
/*
* We will be woken up either when arc_evict_count reaches
* aew_count, or when the ARC is no longer overflowing and
* eviction completes.
* In case of "false" wakeup, we will still be on the list.
*/
do {
cv_wait(&aw.aew_cv, &arc_evict_lock);
} while (list_link_active(&aw.aew_node));
mutex_exit(&arc_evict_lock);
cv_destroy(&aw.aew_cv);
}
}
}
/*
* Allocate a block and return it to the caller. If we are hitting the
* hard limit for the cache size, we must sleep, waiting for the eviction
* thread to catch up. If we're past the target size but below the hard
* limit, we'll only signal the reclaim thread and continue on.
*/
static void
arc_get_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag,
boolean_t do_adapt)
{
arc_state_t *state = hdr->b_l1hdr.b_state;
arc_buf_contents_t type = arc_buf_type(hdr);
if (do_adapt)
arc_adapt(size, state);
/*
* If arc_size is currently overflowing, we must be adding data
* faster than we are evicting. To ensure we don't compound the
* problem by adding more data and forcing arc_size to grow even
* further past it's target size, we wait for the eviction thread to
* make some progress. We also wait for there to be sufficient free
* memory in the system, as measured by arc_free_memory().
*
* Specifically, we wait for zfs_arc_eviction_pct percent of the
* requested size to be evicted. This should be more than 100%, to
* ensure that that progress is also made towards getting arc_size
* under arc_c. See the comment above zfs_arc_eviction_pct.
*/
arc_wait_for_eviction(size * zfs_arc_eviction_pct / 100);
VERIFY3U(hdr->b_type, ==, type);
if (type == ARC_BUFC_METADATA) {
arc_space_consume(size, ARC_SPACE_META);
} else {
arc_space_consume(size, ARC_SPACE_DATA);
}
/*
* Update the state size. Note that ghost states have a
* "ghost size" and so don't need to be updated.
*/
if (!GHOST_STATE(state)) {
(void) zfs_refcount_add_many(&state->arcs_size, size, tag);
/*
* If this is reached via arc_read, the link is
* protected by the hash lock. If reached via
* arc_buf_alloc, the header should not be accessed by
* any other thread. And, if reached via arc_read_done,
* the hash lock will protect it if it's found in the
* hash table; otherwise no other thread should be
* trying to [add|remove]_reference it.
*/
if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
(void) zfs_refcount_add_many(&state->arcs_esize[type],
size, tag);
}
/*
* If we are growing the cache, and we are adding anonymous
* data, and we have outgrown arc_p, update arc_p
*/
if (aggsum_upper_bound(&arc_sums.arcstat_size) < arc_c &&
hdr->b_l1hdr.b_state == arc_anon &&
(zfs_refcount_count(&arc_anon->arcs_size) +
zfs_refcount_count(&arc_mru->arcs_size) > arc_p))
arc_p = MIN(arc_c, arc_p + size);
}
}
static void
arc_free_data_abd(arc_buf_hdr_t *hdr, abd_t *abd, uint64_t size, void *tag)
{
arc_free_data_impl(hdr, size, tag);
abd_free(abd);
}
static void
arc_free_data_buf(arc_buf_hdr_t *hdr, void *buf, uint64_t size, void *tag)
{
arc_buf_contents_t type = arc_buf_type(hdr);
arc_free_data_impl(hdr, size, tag);
if (type == ARC_BUFC_METADATA) {
zio_buf_free(buf, size);
} else {
ASSERT(type == ARC_BUFC_DATA);
zio_data_buf_free(buf, size);
}
}
/*
* Free the arc data buffer.
*/
static void
arc_free_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
{
arc_state_t *state = hdr->b_l1hdr.b_state;
arc_buf_contents_t type = arc_buf_type(hdr);
/* protected by hash lock, if in the hash table */
if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
ASSERT(state != arc_anon && state != arc_l2c_only);
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
size, tag);
}
(void) zfs_refcount_remove_many(&state->arcs_size, size, tag);
VERIFY3U(hdr->b_type, ==, type);
if (type == ARC_BUFC_METADATA) {
arc_space_return(size, ARC_SPACE_META);
} else {
ASSERT(type == ARC_BUFC_DATA);
arc_space_return(size, ARC_SPACE_DATA);
}
}
/*
* This routine is called whenever a buffer is accessed.
* NOTE: the hash lock is dropped in this function.
*/
static void
arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
{
clock_t now;
ASSERT(MUTEX_HELD(hash_lock));
ASSERT(HDR_HAS_L1HDR(hdr));
if (hdr->b_l1hdr.b_state == arc_anon) {
/*
* This buffer is not in the cache, and does not
* appear in our "ghost" list. Add the new buffer
* to the MRU state.
*/
ASSERT0(hdr->b_l1hdr.b_arc_access);
hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr);
arc_change_state(arc_mru, hdr, hash_lock);
} else if (hdr->b_l1hdr.b_state == arc_mru) {
now = ddi_get_lbolt();
/*
* If this buffer is here because of a prefetch, then either:
* - clear the flag if this is a "referencing" read
* (any subsequent access will bump this into the MFU state).
* or
* - move the buffer to the head of the list if this is
* another prefetch (to make it less likely to be evicted).
*/
if (HDR_PREFETCH(hdr) || HDR_PRESCIENT_PREFETCH(hdr)) {
if (zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
/* link protected by hash lock */
ASSERT(multilist_link_active(
&hdr->b_l1hdr.b_arc_node));
} else {
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_decrement_state(hdr);
arc_hdr_clear_flags(hdr,
ARC_FLAG_PREFETCH |
ARC_FLAG_PRESCIENT_PREFETCH);
atomic_inc_32(&hdr->b_l1hdr.b_mru_hits);
ARCSTAT_BUMP(arcstat_mru_hits);
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_increment_state(hdr);
}
hdr->b_l1hdr.b_arc_access = now;
return;
}
/*
* This buffer has been "accessed" only once so far,
* but it is still in the cache. Move it to the MFU
* state.
*/
if (ddi_time_after(now, hdr->b_l1hdr.b_arc_access +
ARC_MINTIME)) {
/*
* More than 125ms have passed since we
* instantiated this buffer. Move it to the
* most frequently used state.
*/
hdr->b_l1hdr.b_arc_access = now;
DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
arc_change_state(arc_mfu, hdr, hash_lock);
}
atomic_inc_32(&hdr->b_l1hdr.b_mru_hits);
ARCSTAT_BUMP(arcstat_mru_hits);
} else if (hdr->b_l1hdr.b_state == arc_mru_ghost) {
arc_state_t *new_state;
/*
* This buffer has been "accessed" recently, but
* was evicted from the cache. Move it to the
* MFU state.
*/
if (HDR_PREFETCH(hdr) || HDR_PRESCIENT_PREFETCH(hdr)) {
new_state = arc_mru;
if (zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) > 0) {
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_decrement_state(hdr);
arc_hdr_clear_flags(hdr,
ARC_FLAG_PREFETCH |
ARC_FLAG_PRESCIENT_PREFETCH);
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_increment_state(hdr);
}
DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr);
} else {
new_state = arc_mfu;
DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
}
hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
arc_change_state(new_state, hdr, hash_lock);
atomic_inc_32(&hdr->b_l1hdr.b_mru_ghost_hits);
ARCSTAT_BUMP(arcstat_mru_ghost_hits);
} else if (hdr->b_l1hdr.b_state == arc_mfu) {
/*
* This buffer has been accessed more than once and is
* still in the cache. Keep it in the MFU state.
*
* NOTE: an add_reference() that occurred when we did
* the arc_read() will have kicked this off the list.
* If it was a prefetch, we will explicitly move it to
* the head of the list now.
*/
atomic_inc_32(&hdr->b_l1hdr.b_mfu_hits);
ARCSTAT_BUMP(arcstat_mfu_hits);
hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
} else if (hdr->b_l1hdr.b_state == arc_mfu_ghost) {
arc_state_t *new_state = arc_mfu;
/*
* This buffer has been accessed more than once but has
* been evicted from the cache. Move it back to the
* MFU state.
*/
if (HDR_PREFETCH(hdr) || HDR_PRESCIENT_PREFETCH(hdr)) {
/*
* This is a prefetch access...
* move this block back to the MRU state.
*/
new_state = arc_mru;
}
hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
arc_change_state(new_state, hdr, hash_lock);
atomic_inc_32(&hdr->b_l1hdr.b_mfu_ghost_hits);
ARCSTAT_BUMP(arcstat_mfu_ghost_hits);
} else if (hdr->b_l1hdr.b_state == arc_l2c_only) {
/*
* This buffer is on the 2nd Level ARC.
*/
hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
arc_change_state(arc_mfu, hdr, hash_lock);
} else {
cmn_err(CE_PANIC, "invalid arc state 0x%p",
hdr->b_l1hdr.b_state);
}
}
/*
* This routine is called by dbuf_hold() to update the arc_access() state
* which otherwise would be skipped for entries in the dbuf cache.
*/
void
arc_buf_access(arc_buf_t *buf)
{
mutex_enter(&buf->b_evict_lock);
arc_buf_hdr_t *hdr = buf->b_hdr;
/*
* Avoid taking the hash_lock when possible as an optimization.
* The header must be checked again under the hash_lock in order
* to handle the case where it is concurrently being released.
*/
if (hdr->b_l1hdr.b_state == arc_anon || HDR_EMPTY(hdr)) {
mutex_exit(&buf->b_evict_lock);
return;
}
kmutex_t *hash_lock = HDR_LOCK(hdr);
mutex_enter(hash_lock);
if (hdr->b_l1hdr.b_state == arc_anon || HDR_EMPTY(hdr)) {
mutex_exit(hash_lock);
mutex_exit(&buf->b_evict_lock);
ARCSTAT_BUMP(arcstat_access_skip);
return;
}
mutex_exit(&buf->b_evict_lock);
ASSERT(hdr->b_l1hdr.b_state == arc_mru ||
hdr->b_l1hdr.b_state == arc_mfu);
DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
arc_access(hdr, hash_lock);
mutex_exit(hash_lock);
ARCSTAT_BUMP(arcstat_hits);
ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr) && !HDR_PRESCIENT_PREFETCH(hdr),
demand, prefetch, !HDR_ISTYPE_METADATA(hdr), data, metadata, hits);
}
/* a generic arc_read_done_func_t which you can use */
/* ARGSUSED */
void
arc_bcopy_func(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
arc_buf_t *buf, void *arg)
{
if (buf == NULL)
return;
bcopy(buf->b_data, arg, arc_buf_size(buf));
arc_buf_destroy(buf, arg);
}
/* a generic arc_read_done_func_t */
/* ARGSUSED */
void
arc_getbuf_func(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
arc_buf_t *buf, void *arg)
{
arc_buf_t **bufp = arg;
if (buf == NULL) {
ASSERT(zio == NULL || zio->io_error != 0);
*bufp = NULL;
} else {
ASSERT(zio == NULL || zio->io_error == 0);
*bufp = buf;
ASSERT(buf->b_data != NULL);
}
}
static void
arc_hdr_verify(arc_buf_hdr_t *hdr, blkptr_t *bp)
{
if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) {
ASSERT3U(HDR_GET_PSIZE(hdr), ==, 0);
ASSERT3U(arc_hdr_get_compress(hdr), ==, ZIO_COMPRESS_OFF);
} else {
if (HDR_COMPRESSION_ENABLED(hdr)) {
ASSERT3U(arc_hdr_get_compress(hdr), ==,
BP_GET_COMPRESS(bp));
}
ASSERT3U(HDR_GET_LSIZE(hdr), ==, BP_GET_LSIZE(bp));
ASSERT3U(HDR_GET_PSIZE(hdr), ==, BP_GET_PSIZE(bp));
ASSERT3U(!!HDR_PROTECTED(hdr), ==, BP_IS_PROTECTED(bp));
}
}
static void
arc_read_done(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
arc_buf_hdr_t *hdr = zio->io_private;
kmutex_t *hash_lock = NULL;
arc_callback_t *callback_list;
arc_callback_t *acb;
boolean_t freeable = B_FALSE;
/*
* The hdr was inserted into hash-table and removed from lists
* prior to starting I/O. We should find this header, since
* it's in the hash table, and it should be legit since it's
* not possible to evict it during the I/O. The only possible
* reason for it not to be found is if we were freed during the
* read.
*/
if (HDR_IN_HASH_TABLE(hdr)) {
arc_buf_hdr_t *found;
ASSERT3U(hdr->b_birth, ==, BP_PHYSICAL_BIRTH(zio->io_bp));
ASSERT3U(hdr->b_dva.dva_word[0], ==,
BP_IDENTITY(zio->io_bp)->dva_word[0]);
ASSERT3U(hdr->b_dva.dva_word[1], ==,
BP_IDENTITY(zio->io_bp)->dva_word[1]);
found = buf_hash_find(hdr->b_spa, zio->io_bp, &hash_lock);
ASSERT((found == hdr &&
DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) ||
(found == hdr && HDR_L2_READING(hdr)));
ASSERT3P(hash_lock, !=, NULL);
}
if (BP_IS_PROTECTED(bp)) {
hdr->b_crypt_hdr.b_ot = BP_GET_TYPE(bp);
hdr->b_crypt_hdr.b_dsobj = zio->io_bookmark.zb_objset;
zio_crypt_decode_params_bp(bp, hdr->b_crypt_hdr.b_salt,
hdr->b_crypt_hdr.b_iv);
if (BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG) {
void *tmpbuf;
tmpbuf = abd_borrow_buf_copy(zio->io_abd,
sizeof (zil_chain_t));
zio_crypt_decode_mac_zil(tmpbuf,
hdr->b_crypt_hdr.b_mac);
abd_return_buf(zio->io_abd, tmpbuf,
sizeof (zil_chain_t));
} else {
zio_crypt_decode_mac_bp(bp, hdr->b_crypt_hdr.b_mac);
}
}
if (zio->io_error == 0) {
/* byteswap if necessary */
if (BP_SHOULD_BYTESWAP(zio->io_bp)) {
if (BP_GET_LEVEL(zio->io_bp) > 0) {
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_UINT64;
} else {
hdr->b_l1hdr.b_byteswap =
DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp));
}
} else {
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
}
if (!HDR_L2_READING(hdr)) {
hdr->b_complevel = zio->io_prop.zp_complevel;
}
}
arc_hdr_clear_flags(hdr, ARC_FLAG_L2_EVICTED);
if (l2arc_noprefetch && HDR_PREFETCH(hdr))
arc_hdr_clear_flags(hdr, ARC_FLAG_L2CACHE);
callback_list = hdr->b_l1hdr.b_acb;
ASSERT3P(callback_list, !=, NULL);
if (hash_lock && zio->io_error == 0 &&
hdr->b_l1hdr.b_state == arc_anon) {
/*
* Only call arc_access on anonymous buffers. This is because
* if we've issued an I/O for an evicted buffer, we've already
* called arc_access (to prevent any simultaneous readers from
* getting confused).
*/
arc_access(hdr, hash_lock);
}
/*
* If a read request has a callback (i.e. acb_done is not NULL), then we
* make a buf containing the data according to the parameters which were
* passed in. The implementation of arc_buf_alloc_impl() ensures that we
* aren't needlessly decompressing the data multiple times.
*/
int callback_cnt = 0;
for (acb = callback_list; acb != NULL; acb = acb->acb_next) {
if (!acb->acb_done || acb->acb_nobuf)
continue;
callback_cnt++;
if (zio->io_error != 0)
continue;
int error = arc_buf_alloc_impl(hdr, zio->io_spa,
&acb->acb_zb, acb->acb_private, acb->acb_encrypted,
acb->acb_compressed, acb->acb_noauth, B_TRUE,
&acb->acb_buf);
/*
* Assert non-speculative zios didn't fail because an
* encryption key wasn't loaded
*/
ASSERT((zio->io_flags & ZIO_FLAG_SPECULATIVE) ||
error != EACCES);
/*
* If we failed to decrypt, report an error now (as the zio
* layer would have done if it had done the transforms).
*/
if (error == ECKSUM) {
ASSERT(BP_IS_PROTECTED(bp));
error = SET_ERROR(EIO);
if ((zio->io_flags & ZIO_FLAG_SPECULATIVE) == 0) {
spa_log_error(zio->io_spa, &acb->acb_zb);
(void) zfs_ereport_post(
FM_EREPORT_ZFS_AUTHENTICATION,
zio->io_spa, NULL, &acb->acb_zb, zio, 0);
}
}
if (error != 0) {
/*
* Decompression or decryption failed. Set
* io_error so that when we call acb_done
* (below), we will indicate that the read
* failed. Note that in the unusual case
* where one callback is compressed and another
* uncompressed, we will mark all of them
* as failed, even though the uncompressed
* one can't actually fail. In this case,
* the hdr will not be anonymous, because
* if there are multiple callbacks, it's
* because multiple threads found the same
* arc buf in the hash table.
*/
zio->io_error = error;
}
}
/*
* If there are multiple callbacks, we must have the hash lock,
* because the only way for multiple threads to find this hdr is
* in the hash table. This ensures that if there are multiple
* callbacks, the hdr is not anonymous. If it were anonymous,
* we couldn't use arc_buf_destroy() in the error case below.
*/
ASSERT(callback_cnt < 2 || hash_lock != NULL);
hdr->b_l1hdr.b_acb = NULL;
arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
if (callback_cnt == 0)
ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr));
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt) ||
callback_list != NULL);
if (zio->io_error == 0) {
arc_hdr_verify(hdr, zio->io_bp);
} else {
arc_hdr_set_flags(hdr, ARC_FLAG_IO_ERROR);
if (hdr->b_l1hdr.b_state != arc_anon)
arc_change_state(arc_anon, hdr, hash_lock);
if (HDR_IN_HASH_TABLE(hdr))
buf_hash_remove(hdr);
freeable = zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
}
/*
* Broadcast before we drop the hash_lock to avoid the possibility
* that the hdr (and hence the cv) might be freed before we get to
* the cv_broadcast().
*/
cv_broadcast(&hdr->b_l1hdr.b_cv);
if (hash_lock != NULL) {
mutex_exit(hash_lock);
} else {
/*
* This block was freed while we waited for the read to
* complete. It has been removed from the hash table and
* moved to the anonymous state (so that it won't show up
* in the cache).
*/
ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
freeable = zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
}
/* execute each callback and free its structure */
while ((acb = callback_list) != NULL) {
if (acb->acb_done != NULL) {
if (zio->io_error != 0 && acb->acb_buf != NULL) {
/*
* If arc_buf_alloc_impl() fails during
* decompression, the buf will still be
* allocated, and needs to be freed here.
*/
arc_buf_destroy(acb->acb_buf,
acb->acb_private);
acb->acb_buf = NULL;
}
acb->acb_done(zio, &zio->io_bookmark, zio->io_bp,
acb->acb_buf, acb->acb_private);
}
if (acb->acb_zio_dummy != NULL) {
acb->acb_zio_dummy->io_error = zio->io_error;
zio_nowait(acb->acb_zio_dummy);
}
callback_list = acb->acb_next;
kmem_free(acb, sizeof (arc_callback_t));
}
if (freeable)
arc_hdr_destroy(hdr);
}
/*
* "Read" the block at the specified DVA (in bp) via the
* cache. If the block is found in the cache, invoke the provided
* callback immediately and return. Note that the `zio' parameter
* in the callback will be NULL in this case, since no IO was
* required. If the block is not in the cache pass the read request
* on to the spa with a substitute callback function, so that the
* requested block will be added to the cache.
*
* If a read request arrives for a block that has a read in-progress,
* either wait for the in-progress read to complete (and return the
* results); or, if this is a read with a "done" func, add a record
* to the read to invoke the "done" func when the read completes,
* and return; or just return.
*
* arc_read_done() will invoke all the requested "done" functions
* for readers of this block.
*/
int
arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
arc_read_done_func_t *done, void *private, zio_priority_t priority,
int zio_flags, arc_flags_t *arc_flags, const zbookmark_phys_t *zb)
{
arc_buf_hdr_t *hdr = NULL;
kmutex_t *hash_lock = NULL;
zio_t *rzio;
uint64_t guid = spa_load_guid(spa);
boolean_t compressed_read = (zio_flags & ZIO_FLAG_RAW_COMPRESS) != 0;
boolean_t encrypted_read = BP_IS_ENCRYPTED(bp) &&
(zio_flags & ZIO_FLAG_RAW_ENCRYPT) != 0;
boolean_t noauth_read = BP_IS_AUTHENTICATED(bp) &&
(zio_flags & ZIO_FLAG_RAW_ENCRYPT) != 0;
boolean_t embedded_bp = !!BP_IS_EMBEDDED(bp);
boolean_t no_buf = *arc_flags & ARC_FLAG_NO_BUF;
int rc = 0;
ASSERT(!embedded_bp ||
BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA);
ASSERT(!BP_IS_HOLE(bp));
ASSERT(!BP_IS_REDACTED(bp));
/*
* Normally SPL_FSTRANS will already be set since kernel threads which
* expect to call the DMU interfaces will set it when created. System
* calls are similarly handled by setting/cleaning the bit in the
* registered callback (module/os/.../zfs/zpl_*).
*
* External consumers such as Lustre which call the exported DMU
* interfaces may not have set SPL_FSTRANS. To avoid a deadlock
* on the hash_lock always set and clear the bit.
*/
fstrans_cookie_t cookie = spl_fstrans_mark();
top:
if (!embedded_bp) {
/*
* Embedded BP's have no DVA and require no I/O to "read".
* Create an anonymous arc buf to back it.
*/
if (!zfs_blkptr_verify(spa, bp, zio_flags &
ZIO_FLAG_CONFIG_WRITER, BLK_VERIFY_LOG)) {
rc = SET_ERROR(ECKSUM);
goto out;
}
hdr = buf_hash_find(guid, bp, &hash_lock);
}
/*
* Determine if we have an L1 cache hit or a cache miss. For simplicity
* we maintain encrypted data separately from compressed / uncompressed
* data. If the user is requesting raw encrypted data and we don't have
* that in the header we will read from disk to guarantee that we can
* get it even if the encryption keys aren't loaded.
*/
if (hdr != NULL && HDR_HAS_L1HDR(hdr) && (HDR_HAS_RABD(hdr) ||
(hdr->b_l1hdr.b_pabd != NULL && !encrypted_read))) {
arc_buf_t *buf = NULL;
*arc_flags |= ARC_FLAG_CACHED;
if (HDR_IO_IN_PROGRESS(hdr)) {
zio_t *head_zio = hdr->b_l1hdr.b_acb->acb_zio_head;
if (*arc_flags & ARC_FLAG_CACHED_ONLY) {
mutex_exit(hash_lock);
ARCSTAT_BUMP(arcstat_cached_only_in_progress);
rc = SET_ERROR(ENOENT);
goto out;
}
ASSERT3P(head_zio, !=, NULL);
if ((hdr->b_flags & ARC_FLAG_PRIO_ASYNC_READ) &&
priority == ZIO_PRIORITY_SYNC_READ) {
/*
* This is a sync read that needs to wait for
* an in-flight async read. Request that the
* zio have its priority upgraded.
*/
zio_change_priority(head_zio, priority);
DTRACE_PROBE1(arc__async__upgrade__sync,
arc_buf_hdr_t *, hdr);
ARCSTAT_BUMP(arcstat_async_upgrade_sync);
}
if (hdr->b_flags & ARC_FLAG_PREDICTIVE_PREFETCH) {
arc_hdr_clear_flags(hdr,
ARC_FLAG_PREDICTIVE_PREFETCH);
}
if (*arc_flags & ARC_FLAG_WAIT) {
cv_wait(&hdr->b_l1hdr.b_cv, hash_lock);
mutex_exit(hash_lock);
goto top;
}
ASSERT(*arc_flags & ARC_FLAG_NOWAIT);
if (done) {
arc_callback_t *acb = NULL;
acb = kmem_zalloc(sizeof (arc_callback_t),
KM_SLEEP);
acb->acb_done = done;
acb->acb_private = private;
acb->acb_compressed = compressed_read;
acb->acb_encrypted = encrypted_read;
acb->acb_noauth = noauth_read;
acb->acb_nobuf = no_buf;
acb->acb_zb = *zb;
if (pio != NULL)
acb->acb_zio_dummy = zio_null(pio,
spa, NULL, NULL, NULL, zio_flags);
ASSERT3P(acb->acb_done, !=, NULL);
acb->acb_zio_head = head_zio;
acb->acb_next = hdr->b_l1hdr.b_acb;
hdr->b_l1hdr.b_acb = acb;
}
mutex_exit(hash_lock);
goto out;
}
ASSERT(hdr->b_l1hdr.b_state == arc_mru ||
hdr->b_l1hdr.b_state == arc_mfu);
if (done && !no_buf) {
if (hdr->b_flags & ARC_FLAG_PREDICTIVE_PREFETCH) {
/*
* This is a demand read which does not have to
* wait for i/o because we did a predictive
* prefetch i/o for it, which has completed.
*/
DTRACE_PROBE1(
arc__demand__hit__predictive__prefetch,
arc_buf_hdr_t *, hdr);
ARCSTAT_BUMP(
arcstat_demand_hit_predictive_prefetch);
arc_hdr_clear_flags(hdr,
ARC_FLAG_PREDICTIVE_PREFETCH);
}
if (hdr->b_flags & ARC_FLAG_PRESCIENT_PREFETCH) {
ARCSTAT_BUMP(
arcstat_demand_hit_prescient_prefetch);
arc_hdr_clear_flags(hdr,
ARC_FLAG_PRESCIENT_PREFETCH);
}
ASSERT(!embedded_bp || !BP_IS_HOLE(bp));
/* Get a buf with the desired data in it. */
rc = arc_buf_alloc_impl(hdr, spa, zb, private,
encrypted_read, compressed_read, noauth_read,
B_TRUE, &buf);
if (rc == ECKSUM) {
/*
* Convert authentication and decryption errors
* to EIO (and generate an ereport if needed)
* before leaving the ARC.
*/
rc = SET_ERROR(EIO);
if ((zio_flags & ZIO_FLAG_SPECULATIVE) == 0) {
spa_log_error(spa, zb);
(void) zfs_ereport_post(
FM_EREPORT_ZFS_AUTHENTICATION,
spa, NULL, zb, NULL, 0);
}
}
if (rc != 0) {
(void) remove_reference(hdr, hash_lock,
private);
arc_buf_destroy_impl(buf);
buf = NULL;
}
/* assert any errors weren't due to unloaded keys */
ASSERT((zio_flags & ZIO_FLAG_SPECULATIVE) ||
rc != EACCES);
} else if (*arc_flags & ARC_FLAG_PREFETCH &&
zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) {
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_decrement_state(hdr);
arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH);
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_increment_state(hdr);
}
DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
arc_access(hdr, hash_lock);
if (*arc_flags & ARC_FLAG_PRESCIENT_PREFETCH)
arc_hdr_set_flags(hdr, ARC_FLAG_PRESCIENT_PREFETCH);
if (*arc_flags & ARC_FLAG_L2CACHE)
arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE);
mutex_exit(hash_lock);
ARCSTAT_BUMP(arcstat_hits);
ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr),
demand, prefetch, !HDR_ISTYPE_METADATA(hdr),
data, metadata, hits);
if (done)
done(NULL, zb, bp, buf, private);
} else {
uint64_t lsize = BP_GET_LSIZE(bp);
uint64_t psize = BP_GET_PSIZE(bp);
arc_callback_t *acb;
vdev_t *vd = NULL;
uint64_t addr = 0;
boolean_t devw = B_FALSE;
uint64_t size;
abd_t *hdr_abd;
int alloc_flags = encrypted_read ? ARC_HDR_ALLOC_RDATA : 0;
if (*arc_flags & ARC_FLAG_CACHED_ONLY) {
rc = SET_ERROR(ENOENT);
if (hash_lock != NULL)
mutex_exit(hash_lock);
goto out;
}
if (hdr == NULL) {
/*
* This block is not in the cache or it has
* embedded data.
*/
arc_buf_hdr_t *exists = NULL;
arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp);
hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize,
BP_IS_PROTECTED(bp), BP_GET_COMPRESS(bp), 0, type,
encrypted_read);
if (!embedded_bp) {
hdr->b_dva = *BP_IDENTITY(bp);
hdr->b_birth = BP_PHYSICAL_BIRTH(bp);
exists = buf_hash_insert(hdr, &hash_lock);
}
if (exists != NULL) {
/* somebody beat us to the hash insert */
mutex_exit(hash_lock);
buf_discard_identity(hdr);
arc_hdr_destroy(hdr);
goto top; /* restart the IO request */
}
} else {
/*
* This block is in the ghost cache or encrypted data
* was requested and we didn't have it. If it was
* L2-only (and thus didn't have an L1 hdr),
* we realloc the header to add an L1 hdr.
*/
if (!HDR_HAS_L1HDR(hdr)) {
hdr = arc_hdr_realloc(hdr, hdr_l2only_cache,
hdr_full_cache);
}
if (GHOST_STATE(hdr->b_l1hdr.b_state)) {
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT0(zfs_refcount_count(
&hdr->b_l1hdr.b_refcnt));
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
} else if (HDR_IO_IN_PROGRESS(hdr)) {
/*
* If this header already had an IO in progress
* and we are performing another IO to fetch
* encrypted data we must wait until the first
* IO completes so as not to confuse
* arc_read_done(). This should be very rare
* and so the performance impact shouldn't
* matter.
*/
cv_wait(&hdr->b_l1hdr.b_cv, hash_lock);
mutex_exit(hash_lock);
goto top;
}
/*
* This is a delicate dance that we play here.
* This hdr might be in the ghost list so we access
* it to move it out of the ghost list before we
* initiate the read. If it's a prefetch then
* it won't have a callback so we'll remove the
* reference that arc_buf_alloc_impl() created. We
* do this after we've called arc_access() to
* avoid hitting an assert in remove_reference().
*/
arc_adapt(arc_hdr_size(hdr), hdr->b_l1hdr.b_state);
arc_access(hdr, hash_lock);
arc_hdr_alloc_abd(hdr, alloc_flags);
}
if (encrypted_read) {
ASSERT(HDR_HAS_RABD(hdr));
size = HDR_GET_PSIZE(hdr);
hdr_abd = hdr->b_crypt_hdr.b_rabd;
zio_flags |= ZIO_FLAG_RAW;
} else {
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
size = arc_hdr_size(hdr);
hdr_abd = hdr->b_l1hdr.b_pabd;
if (arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF) {
zio_flags |= ZIO_FLAG_RAW_COMPRESS;
}
/*
* For authenticated bp's, we do not ask the ZIO layer
* to authenticate them since this will cause the entire
* IO to fail if the key isn't loaded. Instead, we
* defer authentication until arc_buf_fill(), which will
* verify the data when the key is available.
*/
if (BP_IS_AUTHENTICATED(bp))
zio_flags |= ZIO_FLAG_RAW_ENCRYPT;
}
if (*arc_flags & ARC_FLAG_PREFETCH &&
zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) {
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_decrement_state(hdr);
arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH);
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_increment_state(hdr);
}
if (*arc_flags & ARC_FLAG_PRESCIENT_PREFETCH)
arc_hdr_set_flags(hdr, ARC_FLAG_PRESCIENT_PREFETCH);
if (*arc_flags & ARC_FLAG_L2CACHE)
arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE);
if (BP_IS_AUTHENTICATED(bp))
arc_hdr_set_flags(hdr, ARC_FLAG_NOAUTH);
if (BP_GET_LEVEL(bp) > 0)
arc_hdr_set_flags(hdr, ARC_FLAG_INDIRECT);
if (*arc_flags & ARC_FLAG_PREDICTIVE_PREFETCH)
arc_hdr_set_flags(hdr, ARC_FLAG_PREDICTIVE_PREFETCH);
ASSERT(!GHOST_STATE(hdr->b_l1hdr.b_state));
acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
acb->acb_done = done;
acb->acb_private = private;
acb->acb_compressed = compressed_read;
acb->acb_encrypted = encrypted_read;
acb->acb_noauth = noauth_read;
acb->acb_zb = *zb;
ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
hdr->b_l1hdr.b_acb = acb;
arc_hdr_set_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
if (HDR_HAS_L2HDR(hdr) &&
(vd = hdr->b_l2hdr.b_dev->l2ad_vdev) != NULL) {
devw = hdr->b_l2hdr.b_dev->l2ad_writing;
addr = hdr->b_l2hdr.b_daddr;
/*
* Lock out L2ARC device removal.
*/
if (vdev_is_dead(vd) ||
!spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER))
vd = NULL;
}
/*
* We count both async reads and scrub IOs as asynchronous so
* that both can be upgraded in the event of a cache hit while
* the read IO is still in-flight.
*/
if (priority == ZIO_PRIORITY_ASYNC_READ ||
priority == ZIO_PRIORITY_SCRUB)
arc_hdr_set_flags(hdr, ARC_FLAG_PRIO_ASYNC_READ);
else
arc_hdr_clear_flags(hdr, ARC_FLAG_PRIO_ASYNC_READ);
/*
* At this point, we have a level 1 cache miss or a blkptr
* with embedded data. Try again in L2ARC if possible.
*/
ASSERT3U(HDR_GET_LSIZE(hdr), ==, lsize);
/*
* Skip ARC stat bump for block pointers with embedded
* data. The data are read from the blkptr itself via
* decode_embedded_bp_compressed().
*/
if (!embedded_bp) {
DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr,
blkptr_t *, bp, uint64_t, lsize,
zbookmark_phys_t *, zb);
ARCSTAT_BUMP(arcstat_misses);
ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr),
demand, prefetch, !HDR_ISTYPE_METADATA(hdr), data,
metadata, misses);
zfs_racct_read(size, 1);
}
/* Check if the spa even has l2 configured */
const boolean_t spa_has_l2 = l2arc_ndev != 0 &&
spa->spa_l2cache.sav_count > 0;
if (vd != NULL && spa_has_l2 && !(l2arc_norw && devw)) {
/*
* Read from the L2ARC if the following are true:
* 1. The L2ARC vdev was previously cached.
* 2. This buffer still has L2ARC metadata.
* 3. This buffer isn't currently writing to the L2ARC.
* 4. The L2ARC entry wasn't evicted, which may
* also have invalidated the vdev.
* 5. This isn't prefetch or l2arc_noprefetch is 0.
*/
if (HDR_HAS_L2HDR(hdr) &&
!HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) &&
!(l2arc_noprefetch && HDR_PREFETCH(hdr))) {
l2arc_read_callback_t *cb;
abd_t *abd;
uint64_t asize;
DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr);
ARCSTAT_BUMP(arcstat_l2_hits);
atomic_inc_32(&hdr->b_l2hdr.b_hits);
cb = kmem_zalloc(sizeof (l2arc_read_callback_t),
KM_SLEEP);
cb->l2rcb_hdr = hdr;
cb->l2rcb_bp = *bp;
cb->l2rcb_zb = *zb;
cb->l2rcb_flags = zio_flags;
/*
* When Compressed ARC is disabled, but the
* L2ARC block is compressed, arc_hdr_size()
* will have returned LSIZE rather than PSIZE.
*/
if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
!HDR_COMPRESSION_ENABLED(hdr) &&
HDR_GET_PSIZE(hdr) != 0) {
size = HDR_GET_PSIZE(hdr);
}
asize = vdev_psize_to_asize(vd, size);
if (asize != size) {
abd = abd_alloc_for_io(asize,
HDR_ISTYPE_METADATA(hdr));
cb->l2rcb_abd = abd;
} else {
abd = hdr_abd;
}
ASSERT(addr >= VDEV_LABEL_START_SIZE &&
addr + asize <= vd->vdev_psize -
VDEV_LABEL_END_SIZE);
/*
* l2arc read. The SCL_L2ARC lock will be
* released by l2arc_read_done().
* Issue a null zio if the underlying buffer
* was squashed to zero size by compression.
*/
ASSERT3U(arc_hdr_get_compress(hdr), !=,
ZIO_COMPRESS_EMPTY);
rzio = zio_read_phys(pio, vd, addr,
asize, abd,
ZIO_CHECKSUM_OFF,
l2arc_read_done, cb, priority,
zio_flags | ZIO_FLAG_DONT_CACHE |
ZIO_FLAG_CANFAIL |
ZIO_FLAG_DONT_PROPAGATE |
ZIO_FLAG_DONT_RETRY, B_FALSE);
acb->acb_zio_head = rzio;
if (hash_lock != NULL)
mutex_exit(hash_lock);
DTRACE_PROBE2(l2arc__read, vdev_t *, vd,
zio_t *, rzio);
ARCSTAT_INCR(arcstat_l2_read_bytes,
HDR_GET_PSIZE(hdr));
if (*arc_flags & ARC_FLAG_NOWAIT) {
zio_nowait(rzio);
goto out;
}
ASSERT(*arc_flags & ARC_FLAG_WAIT);
if (zio_wait(rzio) == 0)
goto out;
/* l2arc read error; goto zio_read() */
if (hash_lock != NULL)
mutex_enter(hash_lock);
} else {
DTRACE_PROBE1(l2arc__miss,
arc_buf_hdr_t *, hdr);
ARCSTAT_BUMP(arcstat_l2_misses);
if (HDR_L2_WRITING(hdr))
ARCSTAT_BUMP(arcstat_l2_rw_clash);
spa_config_exit(spa, SCL_L2ARC, vd);
}
} else {
if (vd != NULL)
spa_config_exit(spa, SCL_L2ARC, vd);
/*
* Only a spa with l2 should contribute to l2
* miss stats. (Including the case of having a
* faulted cache device - that's also a miss.)
*/
if (spa_has_l2) {
/*
* Skip ARC stat bump for block pointers with
* embedded data. The data are read from the
* blkptr itself via
* decode_embedded_bp_compressed().
*/
if (!embedded_bp) {
DTRACE_PROBE1(l2arc__miss,
arc_buf_hdr_t *, hdr);
ARCSTAT_BUMP(arcstat_l2_misses);
}
}
}
rzio = zio_read(pio, spa, bp, hdr_abd, size,
arc_read_done, hdr, priority, zio_flags, zb);
acb->acb_zio_head = rzio;
if (hash_lock != NULL)
mutex_exit(hash_lock);
if (*arc_flags & ARC_FLAG_WAIT) {
rc = zio_wait(rzio);
goto out;
}
ASSERT(*arc_flags & ARC_FLAG_NOWAIT);
zio_nowait(rzio);
}
out:
/* embedded bps don't actually go to disk */
if (!embedded_bp)
spa_read_history_add(spa, zb, *arc_flags);
spl_fstrans_unmark(cookie);
return (rc);
}
arc_prune_t *
arc_add_prune_callback(arc_prune_func_t *func, void *private)
{
arc_prune_t *p;
p = kmem_alloc(sizeof (*p), KM_SLEEP);
p->p_pfunc = func;
p->p_private = private;
list_link_init(&p->p_node);
zfs_refcount_create(&p->p_refcnt);
mutex_enter(&arc_prune_mtx);
zfs_refcount_add(&p->p_refcnt, &arc_prune_list);
list_insert_head(&arc_prune_list, p);
mutex_exit(&arc_prune_mtx);
return (p);
}
void
arc_remove_prune_callback(arc_prune_t *p)
{
boolean_t wait = B_FALSE;
mutex_enter(&arc_prune_mtx);
list_remove(&arc_prune_list, p);
if (zfs_refcount_remove(&p->p_refcnt, &arc_prune_list) > 0)
wait = B_TRUE;
mutex_exit(&arc_prune_mtx);
/* wait for arc_prune_task to finish */
if (wait)
taskq_wait_outstanding(arc_prune_taskq, 0);
ASSERT0(zfs_refcount_count(&p->p_refcnt));
zfs_refcount_destroy(&p->p_refcnt);
kmem_free(p, sizeof (*p));
}
/*
* Notify the arc that a block was freed, and thus will never be used again.
*/
void
arc_freed(spa_t *spa, const blkptr_t *bp)
{
arc_buf_hdr_t *hdr;
kmutex_t *hash_lock;
uint64_t guid = spa_load_guid(spa);
ASSERT(!BP_IS_EMBEDDED(bp));
hdr = buf_hash_find(guid, bp, &hash_lock);
if (hdr == NULL)
return;
/*
* We might be trying to free a block that is still doing I/O
* (i.e. prefetch) or has a reference (i.e. a dedup-ed,
* dmu_sync-ed block). If this block is being prefetched, then it
* would still have the ARC_FLAG_IO_IN_PROGRESS flag set on the hdr
* until the I/O completes. A block may also have a reference if it is
* part of a dedup-ed, dmu_synced write. The dmu_sync() function would
* have written the new block to its final resting place on disk but
* without the dedup flag set. This would have left the hdr in the MRU
* state and discoverable. When the txg finally syncs it detects that
* the block was overridden in open context and issues an override I/O.
* Since this is a dedup block, the override I/O will determine if the
* block is already in the DDT. If so, then it will replace the io_bp
* with the bp from the DDT and allow the I/O to finish. When the I/O
* reaches the done callback, dbuf_write_override_done, it will
* check to see if the io_bp and io_bp_override are identical.
* If they are not, then it indicates that the bp was replaced with
* the bp in the DDT and the override bp is freed. This allows
* us to arrive here with a reference on a block that is being
* freed. So if we have an I/O in progress, or a reference to
* this hdr, then we don't destroy the hdr.
*/
if (!HDR_HAS_L1HDR(hdr) || (!HDR_IO_IN_PROGRESS(hdr) &&
zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt))) {
arc_change_state(arc_anon, hdr, hash_lock);
arc_hdr_destroy(hdr);
mutex_exit(hash_lock);
} else {
mutex_exit(hash_lock);
}
}
/*
* Release this buffer from the cache, making it an anonymous buffer. This
* must be done after a read and prior to modifying the buffer contents.
* If the buffer has more than one reference, we must make
* a new hdr for the buffer.
*/
void
arc_release(arc_buf_t *buf, void *tag)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
/*
* It would be nice to assert that if its DMU metadata (level >
* 0 || it's the dnode file), then it must be syncing context.
* But we don't know that information at this level.
*/
mutex_enter(&buf->b_evict_lock);
ASSERT(HDR_HAS_L1HDR(hdr));
/*
* We don't grab the hash lock prior to this check, because if
* the buffer's header is in the arc_anon state, it won't be
* linked into the hash table.
*/
if (hdr->b_l1hdr.b_state == arc_anon) {
mutex_exit(&buf->b_evict_lock);
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT(!HDR_IN_HASH_TABLE(hdr));
ASSERT(!HDR_HAS_L2HDR(hdr));
ASSERT(HDR_EMPTY(hdr));
ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1);
ASSERT3S(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt), ==, 1);
ASSERT(!list_link_active(&hdr->b_l1hdr.b_arc_node));
hdr->b_l1hdr.b_arc_access = 0;
/*
* If the buf is being overridden then it may already
* have a hdr that is not empty.
*/
buf_discard_identity(hdr);
arc_buf_thaw(buf);
return;
}
kmutex_t *hash_lock = HDR_LOCK(hdr);
mutex_enter(hash_lock);
/*
* This assignment is only valid as long as the hash_lock is
* held, we must be careful not to reference state or the
* b_state field after dropping the lock.
*/
arc_state_t *state = hdr->b_l1hdr.b_state;
ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
ASSERT3P(state, !=, arc_anon);
/* this buffer is not on any list */
ASSERT3S(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt), >, 0);
if (HDR_HAS_L2HDR(hdr)) {
mutex_enter(&hdr->b_l2hdr.b_dev->l2ad_mtx);
/*
* We have to recheck this conditional again now that
* we're holding the l2ad_mtx to prevent a race with
* another thread which might be concurrently calling
* l2arc_evict(). In that case, l2arc_evict() might have
* destroyed the header's L2 portion as we were waiting
* to acquire the l2ad_mtx.
*/
if (HDR_HAS_L2HDR(hdr))
arc_hdr_l2hdr_destroy(hdr);
mutex_exit(&hdr->b_l2hdr.b_dev->l2ad_mtx);
}
/*
* Do we have more than one buf?
*/
if (hdr->b_l1hdr.b_bufcnt > 1) {
arc_buf_hdr_t *nhdr;
uint64_t spa = hdr->b_spa;
uint64_t psize = HDR_GET_PSIZE(hdr);
uint64_t lsize = HDR_GET_LSIZE(hdr);
boolean_t protected = HDR_PROTECTED(hdr);
enum zio_compress compress = arc_hdr_get_compress(hdr);
arc_buf_contents_t type = arc_buf_type(hdr);
VERIFY3U(hdr->b_type, ==, type);
ASSERT(hdr->b_l1hdr.b_buf != buf || buf->b_next != NULL);
(void) remove_reference(hdr, hash_lock, tag);
if (arc_buf_is_shared(buf) && !ARC_BUF_COMPRESSED(buf)) {
ASSERT3P(hdr->b_l1hdr.b_buf, !=, buf);
ASSERT(ARC_BUF_LAST(buf));
}
/*
* Pull the data off of this hdr and attach it to
* a new anonymous hdr. Also find the last buffer
* in the hdr's buffer list.
*/
arc_buf_t *lastbuf = arc_buf_remove(hdr, buf);
ASSERT3P(lastbuf, !=, NULL);
/*
* If the current arc_buf_t and the hdr are sharing their data
* buffer, then we must stop sharing that block.
*/
if (arc_buf_is_shared(buf)) {
ASSERT3P(hdr->b_l1hdr.b_buf, !=, buf);
VERIFY(!arc_buf_is_shared(lastbuf));
/*
* First, sever the block sharing relationship between
* buf and the arc_buf_hdr_t.
*/
arc_unshare_buf(hdr, buf);
/*
* Now we need to recreate the hdr's b_pabd. Since we
* have lastbuf handy, we try to share with it, but if
* we can't then we allocate a new b_pabd and copy the
* data from buf into it.
*/
if (arc_can_share(hdr, lastbuf)) {
arc_share_buf(hdr, lastbuf);
} else {
arc_hdr_alloc_abd(hdr, ARC_HDR_DO_ADAPT);
abd_copy_from_buf(hdr->b_l1hdr.b_pabd,
buf->b_data, psize);
}
VERIFY3P(lastbuf->b_data, !=, NULL);
} else if (HDR_SHARED_DATA(hdr)) {
/*
* Uncompressed shared buffers are always at the end
* of the list. Compressed buffers don't have the
* same requirements. This makes it hard to
* simply assert that the lastbuf is shared so
* we rely on the hdr's compression flags to determine
* if we have a compressed, shared buffer.
*/
ASSERT(arc_buf_is_shared(lastbuf) ||
arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF);
ASSERT(!ARC_BUF_SHARED(buf));
}
ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr));
ASSERT3P(state, !=, arc_l2c_only);
(void) zfs_refcount_remove_many(&state->arcs_size,
arc_buf_size(buf), buf);
if (zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) {
ASSERT3P(state, !=, arc_l2c_only);
(void) zfs_refcount_remove_many(
&state->arcs_esize[type],
arc_buf_size(buf), buf);
}
hdr->b_l1hdr.b_bufcnt -= 1;
if (ARC_BUF_ENCRYPTED(buf))
hdr->b_crypt_hdr.b_ebufcnt -= 1;
arc_cksum_verify(buf);
arc_buf_unwatch(buf);
/* if this is the last uncompressed buf free the checksum */
if (!arc_hdr_has_uncompressed_buf(hdr))
arc_cksum_free(hdr);
mutex_exit(hash_lock);
/*
* Allocate a new hdr. The new hdr will contain a b_pabd
* buffer which will be freed in arc_write().
*/
nhdr = arc_hdr_alloc(spa, psize, lsize, protected,
compress, hdr->b_complevel, type, HDR_HAS_RABD(hdr));
ASSERT3P(nhdr->b_l1hdr.b_buf, ==, NULL);
ASSERT0(nhdr->b_l1hdr.b_bufcnt);
ASSERT0(zfs_refcount_count(&nhdr->b_l1hdr.b_refcnt));
VERIFY3U(nhdr->b_type, ==, type);
ASSERT(!HDR_SHARED_DATA(nhdr));
nhdr->b_l1hdr.b_buf = buf;
nhdr->b_l1hdr.b_bufcnt = 1;
if (ARC_BUF_ENCRYPTED(buf))
nhdr->b_crypt_hdr.b_ebufcnt = 1;
nhdr->b_l1hdr.b_mru_hits = 0;
nhdr->b_l1hdr.b_mru_ghost_hits = 0;
nhdr->b_l1hdr.b_mfu_hits = 0;
nhdr->b_l1hdr.b_mfu_ghost_hits = 0;
nhdr->b_l1hdr.b_l2_hits = 0;
(void) zfs_refcount_add(&nhdr->b_l1hdr.b_refcnt, tag);
buf->b_hdr = nhdr;
mutex_exit(&buf->b_evict_lock);
(void) zfs_refcount_add_many(&arc_anon->arcs_size,
arc_buf_size(buf), buf);
} else {
mutex_exit(&buf->b_evict_lock);
ASSERT(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) == 1);
/* protected by hash lock, or hdr is on arc_anon */
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
hdr->b_l1hdr.b_mru_hits = 0;
hdr->b_l1hdr.b_mru_ghost_hits = 0;
hdr->b_l1hdr.b_mfu_hits = 0;
hdr->b_l1hdr.b_mfu_ghost_hits = 0;
hdr->b_l1hdr.b_l2_hits = 0;
arc_change_state(arc_anon, hdr, hash_lock);
hdr->b_l1hdr.b_arc_access = 0;
mutex_exit(hash_lock);
buf_discard_identity(hdr);
arc_buf_thaw(buf);
}
}
int
arc_released(arc_buf_t *buf)
{
int released;
mutex_enter(&buf->b_evict_lock);
released = (buf->b_data != NULL &&
buf->b_hdr->b_l1hdr.b_state == arc_anon);
mutex_exit(&buf->b_evict_lock);
return (released);
}
#ifdef ZFS_DEBUG
int
arc_referenced(arc_buf_t *buf)
{
int referenced;
mutex_enter(&buf->b_evict_lock);
referenced = (zfs_refcount_count(&buf->b_hdr->b_l1hdr.b_refcnt));
mutex_exit(&buf->b_evict_lock);
return (referenced);
}
#endif
static void
arc_write_ready(zio_t *zio)
{
arc_write_callback_t *callback = zio->io_private;
arc_buf_t *buf = callback->awcb_buf;
arc_buf_hdr_t *hdr = buf->b_hdr;
blkptr_t *bp = zio->io_bp;
uint64_t psize = BP_IS_HOLE(bp) ? 0 : BP_GET_PSIZE(bp);
fstrans_cookie_t cookie = spl_fstrans_mark();
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(!zfs_refcount_is_zero(&buf->b_hdr->b_l1hdr.b_refcnt));
ASSERT(hdr->b_l1hdr.b_bufcnt > 0);
/*
* If we're reexecuting this zio because the pool suspended, then
* cleanup any state that was previously set the first time the
* callback was invoked.
*/
if (zio->io_flags & ZIO_FLAG_REEXECUTED) {
arc_cksum_free(hdr);
arc_buf_unwatch(buf);
if (hdr->b_l1hdr.b_pabd != NULL) {
if (arc_buf_is_shared(buf)) {
arc_unshare_buf(hdr, buf);
} else {
arc_hdr_free_abd(hdr, B_FALSE);
}
}
if (HDR_HAS_RABD(hdr))
arc_hdr_free_abd(hdr, B_TRUE);
}
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
ASSERT(!HDR_SHARED_DATA(hdr));
ASSERT(!arc_buf_is_shared(buf));
callback->awcb_ready(zio, buf, callback->awcb_private);
if (HDR_IO_IN_PROGRESS(hdr))
ASSERT(zio->io_flags & ZIO_FLAG_REEXECUTED);
arc_hdr_set_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
if (BP_IS_PROTECTED(bp) != !!HDR_PROTECTED(hdr))
hdr = arc_hdr_realloc_crypt(hdr, BP_IS_PROTECTED(bp));
if (BP_IS_PROTECTED(bp)) {
/* ZIL blocks are written through zio_rewrite */
ASSERT3U(BP_GET_TYPE(bp), !=, DMU_OT_INTENT_LOG);
ASSERT(HDR_PROTECTED(hdr));
if (BP_SHOULD_BYTESWAP(bp)) {
if (BP_GET_LEVEL(bp) > 0) {
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_UINT64;
} else {
hdr->b_l1hdr.b_byteswap =
DMU_OT_BYTESWAP(BP_GET_TYPE(bp));
}
} else {
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
}
hdr->b_crypt_hdr.b_ot = BP_GET_TYPE(bp);
hdr->b_crypt_hdr.b_dsobj = zio->io_bookmark.zb_objset;
zio_crypt_decode_params_bp(bp, hdr->b_crypt_hdr.b_salt,
hdr->b_crypt_hdr.b_iv);
zio_crypt_decode_mac_bp(bp, hdr->b_crypt_hdr.b_mac);
}
/*
* If this block was written for raw encryption but the zio layer
* ended up only authenticating it, adjust the buffer flags now.
*/
if (BP_IS_AUTHENTICATED(bp) && ARC_BUF_ENCRYPTED(buf)) {
arc_hdr_set_flags(hdr, ARC_FLAG_NOAUTH);
buf->b_flags &= ~ARC_BUF_FLAG_ENCRYPTED;
if (BP_GET_COMPRESS(bp) == ZIO_COMPRESS_OFF)
buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED;
} else if (BP_IS_HOLE(bp) && ARC_BUF_ENCRYPTED(buf)) {
buf->b_flags &= ~ARC_BUF_FLAG_ENCRYPTED;
buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED;
}
/* this must be done after the buffer flags are adjusted */
arc_cksum_compute(buf);
enum zio_compress compress;
if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) {
compress = ZIO_COMPRESS_OFF;
} else {
ASSERT3U(HDR_GET_LSIZE(hdr), ==, BP_GET_LSIZE(bp));
compress = BP_GET_COMPRESS(bp);
}
HDR_SET_PSIZE(hdr, psize);
arc_hdr_set_compress(hdr, compress);
hdr->b_complevel = zio->io_prop.zp_complevel;
if (zio->io_error != 0 || psize == 0)
goto out;
/*
* Fill the hdr with data. If the buffer is encrypted we have no choice
* but to copy the data into b_radb. If the hdr is compressed, the data
* we want is available from the zio, otherwise we can take it from
* the buf.
*
* We might be able to share the buf's data with the hdr here. However,
* doing so would cause the ARC to be full of linear ABDs if we write a
* lot of shareable data. As a compromise, we check whether scattered
* ABDs are allowed, and assume that if they are then the user wants
* the ARC to be primarily filled with them regardless of the data being
* written. Therefore, if they're allowed then we allocate one and copy
* the data into it; otherwise, we share the data directly if we can.
*/
if (ARC_BUF_ENCRYPTED(buf)) {
ASSERT3U(psize, >, 0);
ASSERT(ARC_BUF_COMPRESSED(buf));
arc_hdr_alloc_abd(hdr, ARC_HDR_DO_ADAPT|ARC_HDR_ALLOC_RDATA);
abd_copy(hdr->b_crypt_hdr.b_rabd, zio->io_abd, psize);
- } else if (zfs_abd_scatter_enabled || !arc_can_share(hdr, buf)) {
+ } else if (!abd_size_alloc_linear(arc_buf_size(buf)) ||
+ !arc_can_share(hdr, buf)) {
/*
* Ideally, we would always copy the io_abd into b_pabd, but the
* user may have disabled compressed ARC, thus we must check the
* hdr's compression setting rather than the io_bp's.
*/
if (BP_IS_ENCRYPTED(bp)) {
ASSERT3U(psize, >, 0);
arc_hdr_alloc_abd(hdr,
ARC_HDR_DO_ADAPT|ARC_HDR_ALLOC_RDATA);
abd_copy(hdr->b_crypt_hdr.b_rabd, zio->io_abd, psize);
} else if (arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF &&
!ARC_BUF_COMPRESSED(buf)) {
ASSERT3U(psize, >, 0);
arc_hdr_alloc_abd(hdr, ARC_HDR_DO_ADAPT);
abd_copy(hdr->b_l1hdr.b_pabd, zio->io_abd, psize);
} else {
ASSERT3U(zio->io_orig_size, ==, arc_hdr_size(hdr));
arc_hdr_alloc_abd(hdr, ARC_HDR_DO_ADAPT);
abd_copy_from_buf(hdr->b_l1hdr.b_pabd, buf->b_data,
arc_buf_size(buf));
}
} else {
ASSERT3P(buf->b_data, ==, abd_to_buf(zio->io_orig_abd));
ASSERT3U(zio->io_orig_size, ==, arc_buf_size(buf));
ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1);
arc_share_buf(hdr, buf);
}
out:
arc_hdr_verify(hdr, bp);
spl_fstrans_unmark(cookie);
}
static void
arc_write_children_ready(zio_t *zio)
{
arc_write_callback_t *callback = zio->io_private;
arc_buf_t *buf = callback->awcb_buf;
callback->awcb_children_ready(zio, buf, callback->awcb_private);
}
/*
* The SPA calls this callback for each physical write that happens on behalf
* of a logical write. See the comment in dbuf_write_physdone() for details.
*/
static void
arc_write_physdone(zio_t *zio)
{
arc_write_callback_t *cb = zio->io_private;
if (cb->awcb_physdone != NULL)
cb->awcb_physdone(zio, cb->awcb_buf, cb->awcb_private);
}
static void
arc_write_done(zio_t *zio)
{
arc_write_callback_t *callback = zio->io_private;
arc_buf_t *buf = callback->awcb_buf;
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
if (zio->io_error == 0) {
arc_hdr_verify(hdr, zio->io_bp);
if (BP_IS_HOLE(zio->io_bp) || BP_IS_EMBEDDED(zio->io_bp)) {
buf_discard_identity(hdr);
} else {
hdr->b_dva = *BP_IDENTITY(zio->io_bp);
hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp);
}
} else {
ASSERT(HDR_EMPTY(hdr));
}
/*
* If the block to be written was all-zero or compressed enough to be
* embedded in the BP, no write was performed so there will be no
* dva/birth/checksum. The buffer must therefore remain anonymous
* (and uncached).
*/
if (!HDR_EMPTY(hdr)) {
arc_buf_hdr_t *exists;
kmutex_t *hash_lock;
ASSERT3U(zio->io_error, ==, 0);
arc_cksum_verify(buf);
exists = buf_hash_insert(hdr, &hash_lock);
if (exists != NULL) {
/*
* This can only happen if we overwrite for
* sync-to-convergence, because we remove
* buffers from the hash table when we arc_free().
*/
if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
panic("bad overwrite, hdr=%p exists=%p",
(void *)hdr, (void *)exists);
ASSERT(zfs_refcount_is_zero(
&exists->b_l1hdr.b_refcnt));
arc_change_state(arc_anon, exists, hash_lock);
arc_hdr_destroy(exists);
mutex_exit(hash_lock);
exists = buf_hash_insert(hdr, &hash_lock);
ASSERT3P(exists, ==, NULL);
} else if (zio->io_flags & ZIO_FLAG_NOPWRITE) {
/* nopwrite */
ASSERT(zio->io_prop.zp_nopwrite);
if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
panic("bad nopwrite, hdr=%p exists=%p",
(void *)hdr, (void *)exists);
} else {
/* Dedup */
ASSERT(hdr->b_l1hdr.b_bufcnt == 1);
ASSERT(hdr->b_l1hdr.b_state == arc_anon);
ASSERT(BP_GET_DEDUP(zio->io_bp));
ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
}
}
arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
/* if it's not anon, we are doing a scrub */
if (exists == NULL && hdr->b_l1hdr.b_state == arc_anon)
arc_access(hdr, hash_lock);
mutex_exit(hash_lock);
} else {
arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
}
ASSERT(!zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
callback->awcb_done(zio, buf, callback->awcb_private);
abd_free(zio->io_abd);
kmem_free(callback, sizeof (arc_write_callback_t));
}
zio_t *
arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc,
const zio_prop_t *zp, arc_write_done_func_t *ready,
arc_write_done_func_t *children_ready, arc_write_done_func_t *physdone,
arc_write_done_func_t *done, void *private, zio_priority_t priority,
int zio_flags, const zbookmark_phys_t *zb)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
arc_write_callback_t *callback;
zio_t *zio;
zio_prop_t localprop = *zp;
ASSERT3P(ready, !=, NULL);
ASSERT3P(done, !=, NULL);
ASSERT(!HDR_IO_ERROR(hdr));
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
ASSERT3U(hdr->b_l1hdr.b_bufcnt, >, 0);
if (l2arc)
arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE);
if (ARC_BUF_ENCRYPTED(buf)) {
ASSERT(ARC_BUF_COMPRESSED(buf));
localprop.zp_encrypt = B_TRUE;
localprop.zp_compress = HDR_GET_COMPRESS(hdr);
localprop.zp_complevel = hdr->b_complevel;
localprop.zp_byteorder =
(hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS) ?
ZFS_HOST_BYTEORDER : !ZFS_HOST_BYTEORDER;
bcopy(hdr->b_crypt_hdr.b_salt, localprop.zp_salt,
ZIO_DATA_SALT_LEN);
bcopy(hdr->b_crypt_hdr.b_iv, localprop.zp_iv,
ZIO_DATA_IV_LEN);
bcopy(hdr->b_crypt_hdr.b_mac, localprop.zp_mac,
ZIO_DATA_MAC_LEN);
if (DMU_OT_IS_ENCRYPTED(localprop.zp_type)) {
localprop.zp_nopwrite = B_FALSE;
localprop.zp_copies =
MIN(localprop.zp_copies, SPA_DVAS_PER_BP - 1);
}
zio_flags |= ZIO_FLAG_RAW;
} else if (ARC_BUF_COMPRESSED(buf)) {
ASSERT3U(HDR_GET_LSIZE(hdr), !=, arc_buf_size(buf));
localprop.zp_compress = HDR_GET_COMPRESS(hdr);
localprop.zp_complevel = hdr->b_complevel;
zio_flags |= ZIO_FLAG_RAW_COMPRESS;
}
callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP);
callback->awcb_ready = ready;
callback->awcb_children_ready = children_ready;
callback->awcb_physdone = physdone;
callback->awcb_done = done;
callback->awcb_private = private;
callback->awcb_buf = buf;
/*
* The hdr's b_pabd is now stale, free it now. A new data block
* will be allocated when the zio pipeline calls arc_write_ready().
*/
if (hdr->b_l1hdr.b_pabd != NULL) {
/*
* If the buf is currently sharing the data block with
* the hdr then we need to break that relationship here.
* The hdr will remain with a NULL data pointer and the
* buf will take sole ownership of the block.
*/
if (arc_buf_is_shared(buf)) {
arc_unshare_buf(hdr, buf);
} else {
arc_hdr_free_abd(hdr, B_FALSE);
}
VERIFY3P(buf->b_data, !=, NULL);
}
if (HDR_HAS_RABD(hdr))
arc_hdr_free_abd(hdr, B_TRUE);
if (!(zio_flags & ZIO_FLAG_RAW))
arc_hdr_set_compress(hdr, ZIO_COMPRESS_OFF);
ASSERT(!arc_buf_is_shared(buf));
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
zio = zio_write(pio, spa, txg, bp,
abd_get_from_buf(buf->b_data, HDR_GET_LSIZE(hdr)),
HDR_GET_LSIZE(hdr), arc_buf_size(buf), &localprop, arc_write_ready,
(children_ready != NULL) ? arc_write_children_ready : NULL,
arc_write_physdone, arc_write_done, callback,
priority, zio_flags, zb);
return (zio);
}
void
arc_tempreserve_clear(uint64_t reserve)
{
atomic_add_64(&arc_tempreserve, -reserve);
ASSERT((int64_t)arc_tempreserve >= 0);
}
int
arc_tempreserve_space(spa_t *spa, uint64_t reserve, uint64_t txg)
{
int error;
uint64_t anon_size;
if (!arc_no_grow &&
reserve > arc_c/4 &&
reserve * 4 > (2ULL << SPA_MAXBLOCKSHIFT))
arc_c = MIN(arc_c_max, reserve * 4);
/*
* Throttle when the calculated memory footprint for the TXG
* exceeds the target ARC size.
*/
if (reserve > arc_c) {
DMU_TX_STAT_BUMP(dmu_tx_memory_reserve);
return (SET_ERROR(ERESTART));
}
/*
* Don't count loaned bufs as in flight dirty data to prevent long
* network delays from blocking transactions that are ready to be
* assigned to a txg.
*/
/* assert that it has not wrapped around */
ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes, 0), >=, 0);
anon_size = MAX((int64_t)(zfs_refcount_count(&arc_anon->arcs_size) -
arc_loaned_bytes), 0);
/*
* Writes will, almost always, require additional memory allocations
* in order to compress/encrypt/etc the data. We therefore need to
* make sure that there is sufficient available memory for this.
*/
error = arc_memory_throttle(spa, reserve, txg);
if (error != 0)
return (error);
/*
* Throttle writes when the amount of dirty data in the cache
* gets too large. We try to keep the cache less than half full
* of dirty blocks so that our sync times don't grow too large.
*
* In the case of one pool being built on another pool, we want
* to make sure we don't end up throttling the lower (backing)
* pool when the upper pool is the majority contributor to dirty
* data. To insure we make forward progress during throttling, we
* also check the current pool's net dirty data and only throttle
* if it exceeds zfs_arc_pool_dirty_percent of the anonymous dirty
* data in the cache.
*
* Note: if two requests come in concurrently, we might let them
* both succeed, when one of them should fail. Not a huge deal.
*/
uint64_t total_dirty = reserve + arc_tempreserve + anon_size;
uint64_t spa_dirty_anon = spa_dirty_data(spa);
uint64_t rarc_c = arc_warm ? arc_c : arc_c_max;
if (total_dirty > rarc_c * zfs_arc_dirty_limit_percent / 100 &&
anon_size > rarc_c * zfs_arc_anon_limit_percent / 100 &&
spa_dirty_anon > anon_size * zfs_arc_pool_dirty_percent / 100) {
#ifdef ZFS_DEBUG
uint64_t meta_esize = zfs_refcount_count(
&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
uint64_t data_esize =
zfs_refcount_count(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
"anon_data=%lluK tempreserve=%lluK rarc_c=%lluK\n",
(u_longlong_t)arc_tempreserve >> 10,
(u_longlong_t)meta_esize >> 10,
(u_longlong_t)data_esize >> 10,
(u_longlong_t)reserve >> 10,
(u_longlong_t)rarc_c >> 10);
#endif
DMU_TX_STAT_BUMP(dmu_tx_dirty_throttle);
return (SET_ERROR(ERESTART));
}
atomic_add_64(&arc_tempreserve, reserve);
return (0);
}
static void
arc_kstat_update_state(arc_state_t *state, kstat_named_t *size,
kstat_named_t *evict_data, kstat_named_t *evict_metadata)
{
size->value.ui64 = zfs_refcount_count(&state->arcs_size);
evict_data->value.ui64 =
zfs_refcount_count(&state->arcs_esize[ARC_BUFC_DATA]);
evict_metadata->value.ui64 =
zfs_refcount_count(&state->arcs_esize[ARC_BUFC_METADATA]);
}
static int
arc_kstat_update(kstat_t *ksp, int rw)
{
arc_stats_t *as = ksp->ks_data;
if (rw == KSTAT_WRITE)
return (SET_ERROR(EACCES));
as->arcstat_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_hits);
as->arcstat_misses.value.ui64 =
wmsum_value(&arc_sums.arcstat_misses);
as->arcstat_demand_data_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_data_hits);
as->arcstat_demand_data_misses.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_data_misses);
as->arcstat_demand_metadata_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_metadata_hits);
as->arcstat_demand_metadata_misses.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_metadata_misses);
as->arcstat_prefetch_data_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_prefetch_data_hits);
as->arcstat_prefetch_data_misses.value.ui64 =
wmsum_value(&arc_sums.arcstat_prefetch_data_misses);
as->arcstat_prefetch_metadata_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_prefetch_metadata_hits);
as->arcstat_prefetch_metadata_misses.value.ui64 =
wmsum_value(&arc_sums.arcstat_prefetch_metadata_misses);
as->arcstat_mru_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_mru_hits);
as->arcstat_mru_ghost_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_mru_ghost_hits);
as->arcstat_mfu_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_mfu_hits);
as->arcstat_mfu_ghost_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_mfu_ghost_hits);
as->arcstat_deleted.value.ui64 =
wmsum_value(&arc_sums.arcstat_deleted);
as->arcstat_mutex_miss.value.ui64 =
wmsum_value(&arc_sums.arcstat_mutex_miss);
as->arcstat_access_skip.value.ui64 =
wmsum_value(&arc_sums.arcstat_access_skip);
as->arcstat_evict_skip.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_skip);
as->arcstat_evict_not_enough.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_not_enough);
as->arcstat_evict_l2_cached.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_l2_cached);
as->arcstat_evict_l2_eligible.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_l2_eligible);
as->arcstat_evict_l2_eligible_mfu.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_l2_eligible_mfu);
as->arcstat_evict_l2_eligible_mru.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_l2_eligible_mru);
as->arcstat_evict_l2_ineligible.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_l2_ineligible);
as->arcstat_evict_l2_skip.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_l2_skip);
as->arcstat_hash_collisions.value.ui64 =
wmsum_value(&arc_sums.arcstat_hash_collisions);
as->arcstat_hash_chains.value.ui64 =
wmsum_value(&arc_sums.arcstat_hash_chains);
as->arcstat_size.value.ui64 =
aggsum_value(&arc_sums.arcstat_size);
as->arcstat_compressed_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_compressed_size);
as->arcstat_uncompressed_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_uncompressed_size);
as->arcstat_overhead_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_overhead_size);
as->arcstat_hdr_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_hdr_size);
as->arcstat_data_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_data_size);
as->arcstat_metadata_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_metadata_size);
as->arcstat_dbuf_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_dbuf_size);
#if defined(COMPAT_FREEBSD11)
as->arcstat_other_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_bonus_size) +
aggsum_value(&arc_sums.arcstat_dnode_size) +
wmsum_value(&arc_sums.arcstat_dbuf_size);
#endif
arc_kstat_update_state(arc_anon,
&as->arcstat_anon_size,
&as->arcstat_anon_evictable_data,
&as->arcstat_anon_evictable_metadata);
arc_kstat_update_state(arc_mru,
&as->arcstat_mru_size,
&as->arcstat_mru_evictable_data,
&as->arcstat_mru_evictable_metadata);
arc_kstat_update_state(arc_mru_ghost,
&as->arcstat_mru_ghost_size,
&as->arcstat_mru_ghost_evictable_data,
&as->arcstat_mru_ghost_evictable_metadata);
arc_kstat_update_state(arc_mfu,
&as->arcstat_mfu_size,
&as->arcstat_mfu_evictable_data,
&as->arcstat_mfu_evictable_metadata);
arc_kstat_update_state(arc_mfu_ghost,
&as->arcstat_mfu_ghost_size,
&as->arcstat_mfu_ghost_evictable_data,
&as->arcstat_mfu_ghost_evictable_metadata);
as->arcstat_dnode_size.value.ui64 =
aggsum_value(&arc_sums.arcstat_dnode_size);
as->arcstat_bonus_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_bonus_size);
as->arcstat_l2_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_hits);
as->arcstat_l2_misses.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_misses);
as->arcstat_l2_prefetch_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_prefetch_asize);
as->arcstat_l2_mru_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_mru_asize);
as->arcstat_l2_mfu_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_mfu_asize);
as->arcstat_l2_bufc_data_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_bufc_data_asize);
as->arcstat_l2_bufc_metadata_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_bufc_metadata_asize);
as->arcstat_l2_feeds.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_feeds);
as->arcstat_l2_rw_clash.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rw_clash);
as->arcstat_l2_read_bytes.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_read_bytes);
as->arcstat_l2_write_bytes.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_write_bytes);
as->arcstat_l2_writes_sent.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_writes_sent);
as->arcstat_l2_writes_done.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_writes_done);
as->arcstat_l2_writes_error.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_writes_error);
as->arcstat_l2_writes_lock_retry.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_writes_lock_retry);
as->arcstat_l2_evict_lock_retry.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_evict_lock_retry);
as->arcstat_l2_evict_reading.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_evict_reading);
as->arcstat_l2_evict_l1cached.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_evict_l1cached);
as->arcstat_l2_free_on_write.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_free_on_write);
as->arcstat_l2_abort_lowmem.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_abort_lowmem);
as->arcstat_l2_cksum_bad.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_cksum_bad);
as->arcstat_l2_io_error.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_io_error);
as->arcstat_l2_lsize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_lsize);
as->arcstat_l2_psize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_psize);
as->arcstat_l2_hdr_size.value.ui64 =
aggsum_value(&arc_sums.arcstat_l2_hdr_size);
as->arcstat_l2_log_blk_writes.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_log_blk_writes);
as->arcstat_l2_log_blk_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_log_blk_asize);
as->arcstat_l2_log_blk_count.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_log_blk_count);
as->arcstat_l2_rebuild_success.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_success);
as->arcstat_l2_rebuild_abort_unsupported.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_unsupported);
as->arcstat_l2_rebuild_abort_io_errors.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_io_errors);
as->arcstat_l2_rebuild_abort_dh_errors.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_dh_errors);
as->arcstat_l2_rebuild_abort_cksum_lb_errors.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_cksum_lb_errors);
as->arcstat_l2_rebuild_abort_lowmem.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_lowmem);
as->arcstat_l2_rebuild_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_size);
as->arcstat_l2_rebuild_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_asize);
as->arcstat_l2_rebuild_bufs.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_bufs);
as->arcstat_l2_rebuild_bufs_precached.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_bufs_precached);
as->arcstat_l2_rebuild_log_blks.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_log_blks);
as->arcstat_memory_throttle_count.value.ui64 =
wmsum_value(&arc_sums.arcstat_memory_throttle_count);
as->arcstat_memory_direct_count.value.ui64 =
wmsum_value(&arc_sums.arcstat_memory_direct_count);
as->arcstat_memory_indirect_count.value.ui64 =
wmsum_value(&arc_sums.arcstat_memory_indirect_count);
as->arcstat_memory_all_bytes.value.ui64 =
arc_all_memory();
as->arcstat_memory_free_bytes.value.ui64 =
arc_free_memory();
as->arcstat_memory_available_bytes.value.i64 =
arc_available_memory();
as->arcstat_prune.value.ui64 =
wmsum_value(&arc_sums.arcstat_prune);
as->arcstat_meta_used.value.ui64 =
aggsum_value(&arc_sums.arcstat_meta_used);
as->arcstat_async_upgrade_sync.value.ui64 =
wmsum_value(&arc_sums.arcstat_async_upgrade_sync);
as->arcstat_demand_hit_predictive_prefetch.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_hit_predictive_prefetch);
as->arcstat_demand_hit_prescient_prefetch.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_hit_prescient_prefetch);
as->arcstat_raw_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_raw_size);
as->arcstat_cached_only_in_progress.value.ui64 =
wmsum_value(&arc_sums.arcstat_cached_only_in_progress);
as->arcstat_abd_chunk_waste_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_abd_chunk_waste_size);
return (0);
}
/*
* This function *must* return indices evenly distributed between all
* sublists of the multilist. This is needed due to how the ARC eviction
* code is laid out; arc_evict_state() assumes ARC buffers are evenly
* distributed between all sublists and uses this assumption when
* deciding which sublist to evict from and how much to evict from it.
*/
static unsigned int
arc_state_multilist_index_func(multilist_t *ml, void *obj)
{
arc_buf_hdr_t *hdr = obj;
/*
* We rely on b_dva to generate evenly distributed index
* numbers using buf_hash below. So, as an added precaution,
* let's make sure we never add empty buffers to the arc lists.
*/
ASSERT(!HDR_EMPTY(hdr));
/*
* The assumption here, is the hash value for a given
* arc_buf_hdr_t will remain constant throughout its lifetime
* (i.e. its b_spa, b_dva, and b_birth fields don't change).
* Thus, we don't need to store the header's sublist index
* on insertion, as this index can be recalculated on removal.
*
* Also, the low order bits of the hash value are thought to be
* distributed evenly. Otherwise, in the case that the multilist
* has a power of two number of sublists, each sublists' usage
* would not be evenly distributed. In this context full 64bit
* division would be a waste of time, so limit it to 32 bits.
*/
return ((unsigned int)buf_hash(hdr->b_spa, &hdr->b_dva, hdr->b_birth) %
multilist_get_num_sublists(ml));
}
#define WARN_IF_TUNING_IGNORED(tuning, value, do_warn) do { \
if ((do_warn) && (tuning) && ((tuning) != (value))) { \
cmn_err(CE_WARN, \
"ignoring tunable %s (using %llu instead)", \
- (#tuning), (value)); \
+ (#tuning), (u_longlong_t)(value)); \
} \
} while (0)
/*
* Called during module initialization and periodically thereafter to
* apply reasonable changes to the exposed performance tunings. Can also be
* called explicitly by param_set_arc_*() functions when ARC tunables are
* updated manually. Non-zero zfs_* values which differ from the currently set
* values will be applied.
*/
void
arc_tuning_update(boolean_t verbose)
{
uint64_t allmem = arc_all_memory();
unsigned long limit;
/* Valid range: 32M - <arc_c_max> */
if ((zfs_arc_min) && (zfs_arc_min != arc_c_min) &&
(zfs_arc_min >= 2ULL << SPA_MAXBLOCKSHIFT) &&
(zfs_arc_min <= arc_c_max)) {
arc_c_min = zfs_arc_min;
arc_c = MAX(arc_c, arc_c_min);
}
WARN_IF_TUNING_IGNORED(zfs_arc_min, arc_c_min, verbose);
/* Valid range: 64M - <all physical memory> */
if ((zfs_arc_max) && (zfs_arc_max != arc_c_max) &&
(zfs_arc_max >= 64 << 20) && (zfs_arc_max < allmem) &&
(zfs_arc_max > arc_c_min)) {
arc_c_max = zfs_arc_max;
arc_c = MIN(arc_c, arc_c_max);
arc_p = (arc_c >> 1);
if (arc_meta_limit > arc_c_max)
arc_meta_limit = arc_c_max;
if (arc_dnode_size_limit > arc_meta_limit)
arc_dnode_size_limit = arc_meta_limit;
}
WARN_IF_TUNING_IGNORED(zfs_arc_max, arc_c_max, verbose);
/* Valid range: 16M - <arc_c_max> */
if ((zfs_arc_meta_min) && (zfs_arc_meta_min != arc_meta_min) &&
(zfs_arc_meta_min >= 1ULL << SPA_MAXBLOCKSHIFT) &&
(zfs_arc_meta_min <= arc_c_max)) {
arc_meta_min = zfs_arc_meta_min;
if (arc_meta_limit < arc_meta_min)
arc_meta_limit = arc_meta_min;
if (arc_dnode_size_limit < arc_meta_min)
arc_dnode_size_limit = arc_meta_min;
}
WARN_IF_TUNING_IGNORED(zfs_arc_meta_min, arc_meta_min, verbose);
/* Valid range: <arc_meta_min> - <arc_c_max> */
limit = zfs_arc_meta_limit ? zfs_arc_meta_limit :
MIN(zfs_arc_meta_limit_percent, 100) * arc_c_max / 100;
if ((limit != arc_meta_limit) &&
(limit >= arc_meta_min) &&
(limit <= arc_c_max))
arc_meta_limit = limit;
WARN_IF_TUNING_IGNORED(zfs_arc_meta_limit, arc_meta_limit, verbose);
/* Valid range: <arc_meta_min> - <arc_meta_limit> */
limit = zfs_arc_dnode_limit ? zfs_arc_dnode_limit :
MIN(zfs_arc_dnode_limit_percent, 100) * arc_meta_limit / 100;
if ((limit != arc_dnode_size_limit) &&
(limit >= arc_meta_min) &&
(limit <= arc_meta_limit))
arc_dnode_size_limit = limit;
WARN_IF_TUNING_IGNORED(zfs_arc_dnode_limit, arc_dnode_size_limit,
verbose);
/* Valid range: 1 - N */
if (zfs_arc_grow_retry)
arc_grow_retry = zfs_arc_grow_retry;
/* Valid range: 1 - N */
if (zfs_arc_shrink_shift) {
arc_shrink_shift = zfs_arc_shrink_shift;
arc_no_grow_shift = MIN(arc_no_grow_shift, arc_shrink_shift -1);
}
/* Valid range: 1 - N */
if (zfs_arc_p_min_shift)
arc_p_min_shift = zfs_arc_p_min_shift;
/* Valid range: 1 - N ms */
if (zfs_arc_min_prefetch_ms)
arc_min_prefetch_ms = zfs_arc_min_prefetch_ms;
/* Valid range: 1 - N ms */
if (zfs_arc_min_prescient_prefetch_ms) {
arc_min_prescient_prefetch_ms =
zfs_arc_min_prescient_prefetch_ms;
}
/* Valid range: 0 - 100 */
if ((zfs_arc_lotsfree_percent >= 0) &&
(zfs_arc_lotsfree_percent <= 100))
arc_lotsfree_percent = zfs_arc_lotsfree_percent;
WARN_IF_TUNING_IGNORED(zfs_arc_lotsfree_percent, arc_lotsfree_percent,
verbose);
/* Valid range: 0 - <all physical memory> */
if ((zfs_arc_sys_free) && (zfs_arc_sys_free != arc_sys_free))
arc_sys_free = MIN(MAX(zfs_arc_sys_free, 0), allmem);
WARN_IF_TUNING_IGNORED(zfs_arc_sys_free, arc_sys_free, verbose);
}
static void
arc_state_init(void)
{
multilist_create(&arc_mru->arcs_list[ARC_BUFC_METADATA],
sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
arc_state_multilist_index_func);
multilist_create(&arc_mru->arcs_list[ARC_BUFC_DATA],
sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
arc_state_multilist_index_func);
multilist_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA],
sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
arc_state_multilist_index_func);
multilist_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA],
sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
arc_state_multilist_index_func);
multilist_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA],
sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
arc_state_multilist_index_func);
multilist_create(&arc_mfu->arcs_list[ARC_BUFC_DATA],
sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
arc_state_multilist_index_func);
multilist_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA],
sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
arc_state_multilist_index_func);
multilist_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA],
sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
arc_state_multilist_index_func);
multilist_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA],
sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
arc_state_multilist_index_func);
multilist_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA],
sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
arc_state_multilist_index_func);
zfs_refcount_create(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_anon->arcs_size);
zfs_refcount_create(&arc_mru->arcs_size);
zfs_refcount_create(&arc_mru_ghost->arcs_size);
zfs_refcount_create(&arc_mfu->arcs_size);
zfs_refcount_create(&arc_mfu_ghost->arcs_size);
zfs_refcount_create(&arc_l2c_only->arcs_size);
wmsum_init(&arc_sums.arcstat_hits, 0);
wmsum_init(&arc_sums.arcstat_misses, 0);
wmsum_init(&arc_sums.arcstat_demand_data_hits, 0);
wmsum_init(&arc_sums.arcstat_demand_data_misses, 0);
wmsum_init(&arc_sums.arcstat_demand_metadata_hits, 0);
wmsum_init(&arc_sums.arcstat_demand_metadata_misses, 0);
wmsum_init(&arc_sums.arcstat_prefetch_data_hits, 0);
wmsum_init(&arc_sums.arcstat_prefetch_data_misses, 0);
wmsum_init(&arc_sums.arcstat_prefetch_metadata_hits, 0);
wmsum_init(&arc_sums.arcstat_prefetch_metadata_misses, 0);
wmsum_init(&arc_sums.arcstat_mru_hits, 0);
wmsum_init(&arc_sums.arcstat_mru_ghost_hits, 0);
wmsum_init(&arc_sums.arcstat_mfu_hits, 0);
wmsum_init(&arc_sums.arcstat_mfu_ghost_hits, 0);
wmsum_init(&arc_sums.arcstat_deleted, 0);
wmsum_init(&arc_sums.arcstat_mutex_miss, 0);
wmsum_init(&arc_sums.arcstat_access_skip, 0);
wmsum_init(&arc_sums.arcstat_evict_skip, 0);
wmsum_init(&arc_sums.arcstat_evict_not_enough, 0);
wmsum_init(&arc_sums.arcstat_evict_l2_cached, 0);
wmsum_init(&arc_sums.arcstat_evict_l2_eligible, 0);
wmsum_init(&arc_sums.arcstat_evict_l2_eligible_mfu, 0);
wmsum_init(&arc_sums.arcstat_evict_l2_eligible_mru, 0);
wmsum_init(&arc_sums.arcstat_evict_l2_ineligible, 0);
wmsum_init(&arc_sums.arcstat_evict_l2_skip, 0);
wmsum_init(&arc_sums.arcstat_hash_collisions, 0);
wmsum_init(&arc_sums.arcstat_hash_chains, 0);
aggsum_init(&arc_sums.arcstat_size, 0);
wmsum_init(&arc_sums.arcstat_compressed_size, 0);
wmsum_init(&arc_sums.arcstat_uncompressed_size, 0);
wmsum_init(&arc_sums.arcstat_overhead_size, 0);
wmsum_init(&arc_sums.arcstat_hdr_size, 0);
wmsum_init(&arc_sums.arcstat_data_size, 0);
wmsum_init(&arc_sums.arcstat_metadata_size, 0);
wmsum_init(&arc_sums.arcstat_dbuf_size, 0);
aggsum_init(&arc_sums.arcstat_dnode_size, 0);
wmsum_init(&arc_sums.arcstat_bonus_size, 0);
wmsum_init(&arc_sums.arcstat_l2_hits, 0);
wmsum_init(&arc_sums.arcstat_l2_misses, 0);
wmsum_init(&arc_sums.arcstat_l2_prefetch_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_mru_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_mfu_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_bufc_data_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_bufc_metadata_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_feeds, 0);
wmsum_init(&arc_sums.arcstat_l2_rw_clash, 0);
wmsum_init(&arc_sums.arcstat_l2_read_bytes, 0);
wmsum_init(&arc_sums.arcstat_l2_write_bytes, 0);
wmsum_init(&arc_sums.arcstat_l2_writes_sent, 0);
wmsum_init(&arc_sums.arcstat_l2_writes_done, 0);
wmsum_init(&arc_sums.arcstat_l2_writes_error, 0);
wmsum_init(&arc_sums.arcstat_l2_writes_lock_retry, 0);
wmsum_init(&arc_sums.arcstat_l2_evict_lock_retry, 0);
wmsum_init(&arc_sums.arcstat_l2_evict_reading, 0);
wmsum_init(&arc_sums.arcstat_l2_evict_l1cached, 0);
wmsum_init(&arc_sums.arcstat_l2_free_on_write, 0);
wmsum_init(&arc_sums.arcstat_l2_abort_lowmem, 0);
wmsum_init(&arc_sums.arcstat_l2_cksum_bad, 0);
wmsum_init(&arc_sums.arcstat_l2_io_error, 0);
wmsum_init(&arc_sums.arcstat_l2_lsize, 0);
wmsum_init(&arc_sums.arcstat_l2_psize, 0);
aggsum_init(&arc_sums.arcstat_l2_hdr_size, 0);
wmsum_init(&arc_sums.arcstat_l2_log_blk_writes, 0);
wmsum_init(&arc_sums.arcstat_l2_log_blk_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_log_blk_count, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_success, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_unsupported, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_io_errors, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_dh_errors, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_cksum_lb_errors, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_lowmem, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_size, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_bufs, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_bufs_precached, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_log_blks, 0);
wmsum_init(&arc_sums.arcstat_memory_throttle_count, 0);
wmsum_init(&arc_sums.arcstat_memory_direct_count, 0);
wmsum_init(&arc_sums.arcstat_memory_indirect_count, 0);
wmsum_init(&arc_sums.arcstat_prune, 0);
aggsum_init(&arc_sums.arcstat_meta_used, 0);
wmsum_init(&arc_sums.arcstat_async_upgrade_sync, 0);
wmsum_init(&arc_sums.arcstat_demand_hit_predictive_prefetch, 0);
wmsum_init(&arc_sums.arcstat_demand_hit_prescient_prefetch, 0);
wmsum_init(&arc_sums.arcstat_raw_size, 0);
wmsum_init(&arc_sums.arcstat_cached_only_in_progress, 0);
wmsum_init(&arc_sums.arcstat_abd_chunk_waste_size, 0);
arc_anon->arcs_state = ARC_STATE_ANON;
arc_mru->arcs_state = ARC_STATE_MRU;
arc_mru_ghost->arcs_state = ARC_STATE_MRU_GHOST;
arc_mfu->arcs_state = ARC_STATE_MFU;
arc_mfu_ghost->arcs_state = ARC_STATE_MFU_GHOST;
arc_l2c_only->arcs_state = ARC_STATE_L2C_ONLY;
}
static void
arc_state_fini(void)
{
zfs_refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_anon->arcs_size);
zfs_refcount_destroy(&arc_mru->arcs_size);
zfs_refcount_destroy(&arc_mru_ghost->arcs_size);
zfs_refcount_destroy(&arc_mfu->arcs_size);
zfs_refcount_destroy(&arc_mfu_ghost->arcs_size);
zfs_refcount_destroy(&arc_l2c_only->arcs_size);
multilist_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]);
multilist_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]);
multilist_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]);
multilist_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]);
multilist_destroy(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(&arc_l2c_only->arcs_list[ARC_BUFC_DATA]);
wmsum_fini(&arc_sums.arcstat_hits);
wmsum_fini(&arc_sums.arcstat_misses);
wmsum_fini(&arc_sums.arcstat_demand_data_hits);
wmsum_fini(&arc_sums.arcstat_demand_data_misses);
wmsum_fini(&arc_sums.arcstat_demand_metadata_hits);
wmsum_fini(&arc_sums.arcstat_demand_metadata_misses);
wmsum_fini(&arc_sums.arcstat_prefetch_data_hits);
wmsum_fini(&arc_sums.arcstat_prefetch_data_misses);
wmsum_fini(&arc_sums.arcstat_prefetch_metadata_hits);
wmsum_fini(&arc_sums.arcstat_prefetch_metadata_misses);
wmsum_fini(&arc_sums.arcstat_mru_hits);
wmsum_fini(&arc_sums.arcstat_mru_ghost_hits);
wmsum_fini(&arc_sums.arcstat_mfu_hits);
wmsum_fini(&arc_sums.arcstat_mfu_ghost_hits);
wmsum_fini(&arc_sums.arcstat_deleted);
wmsum_fini(&arc_sums.arcstat_mutex_miss);
wmsum_fini(&arc_sums.arcstat_access_skip);
wmsum_fini(&arc_sums.arcstat_evict_skip);
wmsum_fini(&arc_sums.arcstat_evict_not_enough);
wmsum_fini(&arc_sums.arcstat_evict_l2_cached);
wmsum_fini(&arc_sums.arcstat_evict_l2_eligible);
wmsum_fini(&arc_sums.arcstat_evict_l2_eligible_mfu);
wmsum_fini(&arc_sums.arcstat_evict_l2_eligible_mru);
wmsum_fini(&arc_sums.arcstat_evict_l2_ineligible);
wmsum_fini(&arc_sums.arcstat_evict_l2_skip);
wmsum_fini(&arc_sums.arcstat_hash_collisions);
wmsum_fini(&arc_sums.arcstat_hash_chains);
aggsum_fini(&arc_sums.arcstat_size);
wmsum_fini(&arc_sums.arcstat_compressed_size);
wmsum_fini(&arc_sums.arcstat_uncompressed_size);
wmsum_fini(&arc_sums.arcstat_overhead_size);
wmsum_fini(&arc_sums.arcstat_hdr_size);
wmsum_fini(&arc_sums.arcstat_data_size);
wmsum_fini(&arc_sums.arcstat_metadata_size);
wmsum_fini(&arc_sums.arcstat_dbuf_size);
aggsum_fini(&arc_sums.arcstat_dnode_size);
wmsum_fini(&arc_sums.arcstat_bonus_size);
wmsum_fini(&arc_sums.arcstat_l2_hits);
wmsum_fini(&arc_sums.arcstat_l2_misses);
wmsum_fini(&arc_sums.arcstat_l2_prefetch_asize);
wmsum_fini(&arc_sums.arcstat_l2_mru_asize);
wmsum_fini(&arc_sums.arcstat_l2_mfu_asize);
wmsum_fini(&arc_sums.arcstat_l2_bufc_data_asize);
wmsum_fini(&arc_sums.arcstat_l2_bufc_metadata_asize);
wmsum_fini(&arc_sums.arcstat_l2_feeds);
wmsum_fini(&arc_sums.arcstat_l2_rw_clash);
wmsum_fini(&arc_sums.arcstat_l2_read_bytes);
wmsum_fini(&arc_sums.arcstat_l2_write_bytes);
wmsum_fini(&arc_sums.arcstat_l2_writes_sent);
wmsum_fini(&arc_sums.arcstat_l2_writes_done);
wmsum_fini(&arc_sums.arcstat_l2_writes_error);
wmsum_fini(&arc_sums.arcstat_l2_writes_lock_retry);
wmsum_fini(&arc_sums.arcstat_l2_evict_lock_retry);
wmsum_fini(&arc_sums.arcstat_l2_evict_reading);
wmsum_fini(&arc_sums.arcstat_l2_evict_l1cached);
wmsum_fini(&arc_sums.arcstat_l2_free_on_write);
wmsum_fini(&arc_sums.arcstat_l2_abort_lowmem);
wmsum_fini(&arc_sums.arcstat_l2_cksum_bad);
wmsum_fini(&arc_sums.arcstat_l2_io_error);
wmsum_fini(&arc_sums.arcstat_l2_lsize);
wmsum_fini(&arc_sums.arcstat_l2_psize);
aggsum_fini(&arc_sums.arcstat_l2_hdr_size);
wmsum_fini(&arc_sums.arcstat_l2_log_blk_writes);
wmsum_fini(&arc_sums.arcstat_l2_log_blk_asize);
wmsum_fini(&arc_sums.arcstat_l2_log_blk_count);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_success);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_unsupported);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_io_errors);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_dh_errors);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_cksum_lb_errors);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_lowmem);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_size);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_asize);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_bufs);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_bufs_precached);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_log_blks);
wmsum_fini(&arc_sums.arcstat_memory_throttle_count);
wmsum_fini(&arc_sums.arcstat_memory_direct_count);
wmsum_fini(&arc_sums.arcstat_memory_indirect_count);
wmsum_fini(&arc_sums.arcstat_prune);
aggsum_fini(&arc_sums.arcstat_meta_used);
wmsum_fini(&arc_sums.arcstat_async_upgrade_sync);
wmsum_fini(&arc_sums.arcstat_demand_hit_predictive_prefetch);
wmsum_fini(&arc_sums.arcstat_demand_hit_prescient_prefetch);
wmsum_fini(&arc_sums.arcstat_raw_size);
wmsum_fini(&arc_sums.arcstat_cached_only_in_progress);
wmsum_fini(&arc_sums.arcstat_abd_chunk_waste_size);
}
uint64_t
arc_target_bytes(void)
{
return (arc_c);
}
void
arc_set_limits(uint64_t allmem)
{
/* Set min cache to 1/32 of all memory, or 32MB, whichever is more. */
arc_c_min = MAX(allmem / 32, 2ULL << SPA_MAXBLOCKSHIFT);
/* How to set default max varies by platform. */
arc_c_max = arc_default_max(arc_c_min, allmem);
}
void
arc_init(void)
{
uint64_t percent, allmem = arc_all_memory();
mutex_init(&arc_evict_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&arc_evict_waiters, sizeof (arc_evict_waiter_t),
offsetof(arc_evict_waiter_t, aew_node));
arc_min_prefetch_ms = 1000;
arc_min_prescient_prefetch_ms = 6000;
#if defined(_KERNEL)
arc_lowmem_init();
#endif
arc_set_limits(allmem);
#ifndef _KERNEL
/*
* In userland, there's only the memory pressure that we artificially
* create (see arc_available_memory()). Don't let arc_c get too
* small, because it can cause transactions to be larger than
* arc_c, causing arc_tempreserve_space() to fail.
*/
arc_c_min = MAX(arc_c_max / 2, 2ULL << SPA_MAXBLOCKSHIFT);
#endif
arc_c = arc_c_min;
arc_p = (arc_c >> 1);
/* Set min to 1/2 of arc_c_min */
arc_meta_min = 1ULL << SPA_MAXBLOCKSHIFT;
/*
* Set arc_meta_limit to a percent of arc_c_max with a floor of
* arc_meta_min, and a ceiling of arc_c_max.
*/
percent = MIN(zfs_arc_meta_limit_percent, 100);
arc_meta_limit = MAX(arc_meta_min, (percent * arc_c_max) / 100);
percent = MIN(zfs_arc_dnode_limit_percent, 100);
arc_dnode_size_limit = (percent * arc_meta_limit) / 100;
/* Apply user specified tunings */
arc_tuning_update(B_TRUE);
/* if kmem_flags are set, lets try to use less memory */
if (kmem_debugging())
arc_c = arc_c / 2;
if (arc_c < arc_c_min)
arc_c = arc_c_min;
arc_register_hotplug();
arc_state_init();
buf_init();
list_create(&arc_prune_list, sizeof (arc_prune_t),
offsetof(arc_prune_t, p_node));
mutex_init(&arc_prune_mtx, NULL, MUTEX_DEFAULT, NULL);
arc_prune_taskq = taskq_create("arc_prune", 100, defclsyspri,
boot_ncpus, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC |
TASKQ_THREADS_CPU_PCT);
arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED,
sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
if (arc_ksp != NULL) {
arc_ksp->ks_data = &arc_stats;
arc_ksp->ks_update = arc_kstat_update;
kstat_install(arc_ksp);
}
arc_evict_zthr = zthr_create("arc_evict",
arc_evict_cb_check, arc_evict_cb, NULL);
arc_reap_zthr = zthr_create_timer("arc_reap",
arc_reap_cb_check, arc_reap_cb, NULL, SEC2NSEC(1));
arc_warm = B_FALSE;
/*
* Calculate maximum amount of dirty data per pool.
*
* If it has been set by a module parameter, take that.
* Otherwise, use a percentage of physical memory defined by
* zfs_dirty_data_max_percent (default 10%) with a cap at
* zfs_dirty_data_max_max (default 4G or 25% of physical memory).
*/
#ifdef __LP64__
if (zfs_dirty_data_max_max == 0)
zfs_dirty_data_max_max = MIN(4ULL * 1024 * 1024 * 1024,
allmem * zfs_dirty_data_max_max_percent / 100);
#else
if (zfs_dirty_data_max_max == 0)
zfs_dirty_data_max_max = MIN(1ULL * 1024 * 1024 * 1024,
allmem * zfs_dirty_data_max_max_percent / 100);
#endif
if (zfs_dirty_data_max == 0) {
zfs_dirty_data_max = allmem *
zfs_dirty_data_max_percent / 100;
zfs_dirty_data_max = MIN(zfs_dirty_data_max,
zfs_dirty_data_max_max);
}
if (zfs_wrlog_data_max == 0) {
/*
* dp_wrlog_total is reduced for each txg at the end of
* spa_sync(). However, dp_dirty_total is reduced every time
* a block is written out. Thus under normal operation,
* dp_wrlog_total could grow 2 times as big as
* zfs_dirty_data_max.
*/
zfs_wrlog_data_max = zfs_dirty_data_max * 2;
}
}
void
arc_fini(void)
{
arc_prune_t *p;
#ifdef _KERNEL
arc_lowmem_fini();
#endif /* _KERNEL */
/* Use B_TRUE to ensure *all* buffers are evicted */
arc_flush(NULL, B_TRUE);
if (arc_ksp != NULL) {
kstat_delete(arc_ksp);
arc_ksp = NULL;
}
taskq_wait(arc_prune_taskq);
taskq_destroy(arc_prune_taskq);
mutex_enter(&arc_prune_mtx);
while ((p = list_head(&arc_prune_list)) != NULL) {
list_remove(&arc_prune_list, p);
zfs_refcount_remove(&p->p_refcnt, &arc_prune_list);
zfs_refcount_destroy(&p->p_refcnt);
kmem_free(p, sizeof (*p));
}
mutex_exit(&arc_prune_mtx);
list_destroy(&arc_prune_list);
mutex_destroy(&arc_prune_mtx);
(void) zthr_cancel(arc_evict_zthr);
(void) zthr_cancel(arc_reap_zthr);
mutex_destroy(&arc_evict_lock);
list_destroy(&arc_evict_waiters);
/*
* Free any buffers that were tagged for destruction. This needs
* to occur before arc_state_fini() runs and destroys the aggsum
* values which are updated when freeing scatter ABDs.
*/
l2arc_do_free_on_write();
/*
* buf_fini() must proceed arc_state_fini() because buf_fin() may
* trigger the release of kmem magazines, which can callback to
* arc_space_return() which accesses aggsums freed in act_state_fini().
*/
buf_fini();
arc_state_fini();
arc_unregister_hotplug();
/*
* We destroy the zthrs after all the ARC state has been
* torn down to avoid the case of them receiving any
* wakeup() signals after they are destroyed.
*/
zthr_destroy(arc_evict_zthr);
zthr_destroy(arc_reap_zthr);
ASSERT0(arc_loaned_bytes);
}
/*
* Level 2 ARC
*
* The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
* It uses dedicated storage devices to hold cached data, which are populated
* using large infrequent writes. The main role of this cache is to boost
* the performance of random read workloads. The intended L2ARC devices
* include short-stroked disks, solid state disks, and other media with
* substantially faster read latency than disk.
*
* +-----------------------+
* | ARC |
* +-----------------------+
* | ^ ^
* | | |
* l2arc_feed_thread() arc_read()
* | | |
* | l2arc read |
* V | |
* +---------------+ |
* | L2ARC | |
* +---------------+ |
* | ^ |
* l2arc_write() | |
* | | |
* V | |
* +-------+ +-------+
* | vdev | | vdev |
* | cache | | cache |
* +-------+ +-------+
* +=========+ .-----.
* : L2ARC : |-_____-|
* : devices : | Disks |
* +=========+ `-_____-'
*
* Read requests are satisfied from the following sources, in order:
*
* 1) ARC
* 2) vdev cache of L2ARC devices
* 3) L2ARC devices
* 4) vdev cache of disks
* 5) disks
*
* Some L2ARC device types exhibit extremely slow write performance.
* To accommodate for this there are some significant differences between
* the L2ARC and traditional cache design:
*
* 1. There is no eviction path from the ARC to the L2ARC. Evictions from
* the ARC behave as usual, freeing buffers and placing headers on ghost
* lists. The ARC does not send buffers to the L2ARC during eviction as
* this would add inflated write latencies for all ARC memory pressure.
*
* 2. The L2ARC attempts to cache data from the ARC before it is evicted.
* It does this by periodically scanning buffers from the eviction-end of
* the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
* not already there. It scans until a headroom of buffers is satisfied,
* which itself is a buffer for ARC eviction. If a compressible buffer is
* found during scanning and selected for writing to an L2ARC device, we
* temporarily boost scanning headroom during the next scan cycle to make
* sure we adapt to compression effects (which might significantly reduce
* the data volume we write to L2ARC). The thread that does this is
* l2arc_feed_thread(), illustrated below; example sizes are included to
* provide a better sense of ratio than this diagram:
*
* head --> tail
* +---------------------+----------+
* ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC
* +---------------------+----------+ | o L2ARC eligible
* ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer
* +---------------------+----------+ |
* 15.9 Gbytes ^ 32 Mbytes |
* headroom |
* l2arc_feed_thread()
* |
* l2arc write hand <--[oooo]--'
* | 8 Mbyte
* | write max
* V
* +==============================+
* L2ARC dev |####|#|###|###| |####| ... |
* +==============================+
* 32 Gbytes
*
* 3. If an ARC buffer is copied to the L2ARC but then hit instead of
* evicted, then the L2ARC has cached a buffer much sooner than it probably
* needed to, potentially wasting L2ARC device bandwidth and storage. It is
* safe to say that this is an uncommon case, since buffers at the end of
* the ARC lists have moved there due to inactivity.
*
* 4. If the ARC evicts faster than the L2ARC can maintain a headroom,
* then the L2ARC simply misses copying some buffers. This serves as a
* pressure valve to prevent heavy read workloads from both stalling the ARC
* with waits and clogging the L2ARC with writes. This also helps prevent
* the potential for the L2ARC to churn if it attempts to cache content too
* quickly, such as during backups of the entire pool.
*
* 5. After system boot and before the ARC has filled main memory, there are
* no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru
* lists can remain mostly static. Instead of searching from tail of these
* lists as pictured, the l2arc_feed_thread() will search from the list heads
* for eligible buffers, greatly increasing its chance of finding them.
*
* The L2ARC device write speed is also boosted during this time so that
* the L2ARC warms up faster. Since there have been no ARC evictions yet,
* there are no L2ARC reads, and no fear of degrading read performance
* through increased writes.
*
* 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that
* the vdev queue can aggregate them into larger and fewer writes. Each
* device is written to in a rotor fashion, sweeping writes through
* available space then repeating.
*
* 7. The L2ARC does not store dirty content. It never needs to flush
* write buffers back to disk based storage.
*
* 8. If an ARC buffer is written (and dirtied) which also exists in the
* L2ARC, the now stale L2ARC buffer is immediately dropped.
*
* The performance of the L2ARC can be tweaked by a number of tunables, which
* may be necessary for different workloads:
*
* l2arc_write_max max write bytes per interval
* l2arc_write_boost extra write bytes during device warmup
* l2arc_noprefetch skip caching prefetched buffers
* l2arc_headroom number of max device writes to precache
* l2arc_headroom_boost when we find compressed buffers during ARC
* scanning, we multiply headroom by this
* percentage factor for the next scan cycle,
* since more compressed buffers are likely to
* be present
* l2arc_feed_secs seconds between L2ARC writing
*
* Tunables may be removed or added as future performance improvements are
* integrated, and also may become zpool properties.
*
* There are three key functions that control how the L2ARC warms up:
*
* l2arc_write_eligible() check if a buffer is eligible to cache
* l2arc_write_size() calculate how much to write
* l2arc_write_interval() calculate sleep delay between writes
*
* These three functions determine what to write, how much, and how quickly
* to send writes.
*
* L2ARC persistence:
*
* When writing buffers to L2ARC, we periodically add some metadata to
* make sure we can pick them up after reboot, thus dramatically reducing
* the impact that any downtime has on the performance of storage systems
* with large caches.
*
* The implementation works fairly simply by integrating the following two
* modifications:
*
* *) When writing to the L2ARC, we occasionally write a "l2arc log block",
* which is an additional piece of metadata which describes what's been
* written. This allows us to rebuild the arc_buf_hdr_t structures of the
* main ARC buffers. There are 2 linked-lists of log blocks headed by
* dh_start_lbps[2]. We alternate which chain we append to, so they are
* time-wise and offset-wise interleaved, but that is an optimization rather
* than for correctness. The log block also includes a pointer to the
* previous block in its chain.
*
* *) We reserve SPA_MINBLOCKSIZE of space at the start of each L2ARC device
* for our header bookkeeping purposes. This contains a device header,
* which contains our top-level reference structures. We update it each
* time we write a new log block, so that we're able to locate it in the
* L2ARC device. If this write results in an inconsistent device header
* (e.g. due to power failure), we detect this by verifying the header's
* checksum and simply fail to reconstruct the L2ARC after reboot.
*
* Implementation diagram:
*
* +=== L2ARC device (not to scale) ======================================+
* | ___two newest log block pointers__.__________ |
* | / \dh_start_lbps[1] |
* | / \ \dh_start_lbps[0]|
* |.___/__. V V |
* ||L2 dev|....|lb |bufs |lb |bufs |lb |bufs |lb |bufs |lb |---(empty)---|
* || hdr| ^ /^ /^ / / |
* |+------+ ...--\-------/ \-----/--\------/ / |
* | \--------------/ \--------------/ |
* +======================================================================+
*
* As can be seen on the diagram, rather than using a simple linked list,
* we use a pair of linked lists with alternating elements. This is a
* performance enhancement due to the fact that we only find out the
* address of the next log block access once the current block has been
* completely read in. Obviously, this hurts performance, because we'd be
* keeping the device's I/O queue at only a 1 operation deep, thus
* incurring a large amount of I/O round-trip latency. Having two lists
* allows us to fetch two log blocks ahead of where we are currently
* rebuilding L2ARC buffers.
*
* On-device data structures:
*
* L2ARC device header: l2arc_dev_hdr_phys_t
* L2ARC log block: l2arc_log_blk_phys_t
*
* L2ARC reconstruction:
*
* When writing data, we simply write in the standard rotary fashion,
* evicting buffers as we go and simply writing new data over them (writing
* a new log block every now and then). This obviously means that once we
* loop around the end of the device, we will start cutting into an already
* committed log block (and its referenced data buffers), like so:
*
* current write head__ __old tail
* \ /
* V V
* <--|bufs |lb |bufs |lb | |bufs |lb |bufs |lb |-->
* ^ ^^^^^^^^^___________________________________
* | \
* <<nextwrite>> may overwrite this blk and/or its bufs --'
*
* When importing the pool, we detect this situation and use it to stop
* our scanning process (see l2arc_rebuild).
*
* There is one significant caveat to consider when rebuilding ARC contents
* from an L2ARC device: what about invalidated buffers? Given the above
* construction, we cannot update blocks which we've already written to amend
* them to remove buffers which were invalidated. Thus, during reconstruction,
* we might be populating the cache with buffers for data that's not on the
* main pool anymore, or may have been overwritten!
*
* As it turns out, this isn't a problem. Every arc_read request includes
* both the DVA and, crucially, the birth TXG of the BP the caller is
* looking for. So even if the cache were populated by completely rotten
* blocks for data that had been long deleted and/or overwritten, we'll
* never actually return bad data from the cache, since the DVA with the
* birth TXG uniquely identify a block in space and time - once created,
* a block is immutable on disk. The worst thing we have done is wasted
* some time and memory at l2arc rebuild to reconstruct outdated ARC
* entries that will get dropped from the l2arc as it is being updated
* with new blocks.
*
* L2ARC buffers that have been evicted by l2arc_evict() ahead of the write
* hand are not restored. This is done by saving the offset (in bytes)
* l2arc_evict() has evicted to in the L2ARC device header and taking it
* into account when restoring buffers.
*/
static boolean_t
l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *hdr)
{
/*
* A buffer is *not* eligible for the L2ARC if it:
* 1. belongs to a different spa.
* 2. is already cached on the L2ARC.
* 3. has an I/O in progress (it may be an incomplete read).
* 4. is flagged not eligible (zfs property).
*/
if (hdr->b_spa != spa_guid || HDR_HAS_L2HDR(hdr) ||
HDR_IO_IN_PROGRESS(hdr) || !HDR_L2CACHE(hdr))
return (B_FALSE);
return (B_TRUE);
}
static uint64_t
l2arc_write_size(l2arc_dev_t *dev)
{
uint64_t size, dev_size, tsize;
/*
* Make sure our globals have meaningful values in case the user
* altered them.
*/
size = l2arc_write_max;
if (size == 0) {
cmn_err(CE_NOTE, "Bad value for l2arc_write_max, value must "
"be greater than zero, resetting it to the default (%d)",
L2ARC_WRITE_SIZE);
size = l2arc_write_max = L2ARC_WRITE_SIZE;
}
if (arc_warm == B_FALSE)
size += l2arc_write_boost;
/*
* Make sure the write size does not exceed the size of the cache
* device. This is important in l2arc_evict(), otherwise infinite
* iteration can occur.
*/
dev_size = dev->l2ad_end - dev->l2ad_start;
tsize = size + l2arc_log_blk_overhead(size, dev);
if (dev->l2ad_vdev->vdev_has_trim && l2arc_trim_ahead > 0)
tsize += MAX(64 * 1024 * 1024,
(tsize * l2arc_trim_ahead) / 100);
if (tsize >= dev_size) {
cmn_err(CE_NOTE, "l2arc_write_max or l2arc_write_boost "
"plus the overhead of log blocks (persistent L2ARC, "
"%llu bytes) exceeds the size of the cache device "
"(guid %llu), resetting them to the default (%d)",
- l2arc_log_blk_overhead(size, dev),
- dev->l2ad_vdev->vdev_guid, L2ARC_WRITE_SIZE);
+ (u_longlong_t)l2arc_log_blk_overhead(size, dev),
+ (u_longlong_t)dev->l2ad_vdev->vdev_guid, L2ARC_WRITE_SIZE);
size = l2arc_write_max = l2arc_write_boost = L2ARC_WRITE_SIZE;
if (arc_warm == B_FALSE)
size += l2arc_write_boost;
}
return (size);
}
static clock_t
l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote)
{
clock_t interval, next, now;
/*
* If the ARC lists are busy, increase our write rate; if the
* lists are stale, idle back. This is achieved by checking
* how much we previously wrote - if it was more than half of
* what we wanted, schedule the next write much sooner.
*/
if (l2arc_feed_again && wrote > (wanted / 2))
interval = (hz * l2arc_feed_min_ms) / 1000;
else
interval = hz * l2arc_feed_secs;
now = ddi_get_lbolt();
next = MAX(now, MIN(now + interval, began + interval));
return (next);
}
/*
* Cycle through L2ARC devices. This is how L2ARC load balances.
* If a device is returned, this also returns holding the spa config lock.
*/
static l2arc_dev_t *
l2arc_dev_get_next(void)
{
l2arc_dev_t *first, *next = NULL;
/*
* Lock out the removal of spas (spa_namespace_lock), then removal
* of cache devices (l2arc_dev_mtx). Once a device has been selected,
* both locks will be dropped and a spa config lock held instead.
*/
mutex_enter(&spa_namespace_lock);
mutex_enter(&l2arc_dev_mtx);
/* if there are no vdevs, there is nothing to do */
if (l2arc_ndev == 0)
goto out;
first = NULL;
next = l2arc_dev_last;
do {
/* loop around the list looking for a non-faulted vdev */
if (next == NULL) {
next = list_head(l2arc_dev_list);
} else {
next = list_next(l2arc_dev_list, next);
if (next == NULL)
next = list_head(l2arc_dev_list);
}
/* if we have come back to the start, bail out */
if (first == NULL)
first = next;
else if (next == first)
break;
} while (vdev_is_dead(next->l2ad_vdev) || next->l2ad_rebuild ||
next->l2ad_trim_all);
/* if we were unable to find any usable vdevs, return NULL */
if (vdev_is_dead(next->l2ad_vdev) || next->l2ad_rebuild ||
next->l2ad_trim_all)
next = NULL;
l2arc_dev_last = next;
out:
mutex_exit(&l2arc_dev_mtx);
/*
* Grab the config lock to prevent the 'next' device from being
* removed while we are writing to it.
*/
if (next != NULL)
spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER);
mutex_exit(&spa_namespace_lock);
return (next);
}
/*
* Free buffers that were tagged for destruction.
*/
static void
l2arc_do_free_on_write(void)
{
list_t *buflist;
l2arc_data_free_t *df, *df_prev;
mutex_enter(&l2arc_free_on_write_mtx);
buflist = l2arc_free_on_write;
for (df = list_tail(buflist); df; df = df_prev) {
df_prev = list_prev(buflist, df);
ASSERT3P(df->l2df_abd, !=, NULL);
abd_free(df->l2df_abd);
list_remove(buflist, df);
kmem_free(df, sizeof (l2arc_data_free_t));
}
mutex_exit(&l2arc_free_on_write_mtx);
}
/*
* A write to a cache device has completed. Update all headers to allow
* reads from these buffers to begin.
*/
static void
l2arc_write_done(zio_t *zio)
{
l2arc_write_callback_t *cb;
l2arc_lb_abd_buf_t *abd_buf;
l2arc_lb_ptr_buf_t *lb_ptr_buf;
l2arc_dev_t *dev;
l2arc_dev_hdr_phys_t *l2dhdr;
list_t *buflist;
arc_buf_hdr_t *head, *hdr, *hdr_prev;
kmutex_t *hash_lock;
int64_t bytes_dropped = 0;
cb = zio->io_private;
ASSERT3P(cb, !=, NULL);
dev = cb->l2wcb_dev;
l2dhdr = dev->l2ad_dev_hdr;
ASSERT3P(dev, !=, NULL);
head = cb->l2wcb_head;
ASSERT3P(head, !=, NULL);
buflist = &dev->l2ad_buflist;
ASSERT3P(buflist, !=, NULL);
DTRACE_PROBE2(l2arc__iodone, zio_t *, zio,
l2arc_write_callback_t *, cb);
/*
* All writes completed, or an error was hit.
*/
top:
mutex_enter(&dev->l2ad_mtx);
for (hdr = list_prev(buflist, head); hdr; hdr = hdr_prev) {
hdr_prev = list_prev(buflist, hdr);
hash_lock = HDR_LOCK(hdr);
/*
* We cannot use mutex_enter or else we can deadlock
* with l2arc_write_buffers (due to swapping the order
* the hash lock and l2ad_mtx are taken).
*/
if (!mutex_tryenter(hash_lock)) {
/*
* Missed the hash lock. We must retry so we
* don't leave the ARC_FLAG_L2_WRITING bit set.
*/
ARCSTAT_BUMP(arcstat_l2_writes_lock_retry);
/*
* We don't want to rescan the headers we've
* already marked as having been written out, so
* we reinsert the head node so we can pick up
* where we left off.
*/
list_remove(buflist, head);
list_insert_after(buflist, hdr, head);
mutex_exit(&dev->l2ad_mtx);
/*
* We wait for the hash lock to become available
* to try and prevent busy waiting, and increase
* the chance we'll be able to acquire the lock
* the next time around.
*/
mutex_enter(hash_lock);
mutex_exit(hash_lock);
goto top;
}
/*
* We could not have been moved into the arc_l2c_only
* state while in-flight due to our ARC_FLAG_L2_WRITING
* bit being set. Let's just ensure that's being enforced.
*/
ASSERT(HDR_HAS_L1HDR(hdr));
/*
* Skipped - drop L2ARC entry and mark the header as no
* longer L2 eligibile.
*/
if (zio->io_error != 0) {
/*
* Error - drop L2ARC entry.
*/
list_remove(buflist, hdr);
arc_hdr_clear_flags(hdr, ARC_FLAG_HAS_L2HDR);
uint64_t psize = HDR_GET_PSIZE(hdr);
l2arc_hdr_arcstats_decrement(hdr);
bytes_dropped +=
vdev_psize_to_asize(dev->l2ad_vdev, psize);
(void) zfs_refcount_remove_many(&dev->l2ad_alloc,
arc_hdr_size(hdr), hdr);
}
/*
* Allow ARC to begin reads and ghost list evictions to
* this L2ARC entry.
*/
arc_hdr_clear_flags(hdr, ARC_FLAG_L2_WRITING);
mutex_exit(hash_lock);
}
/*
* Free the allocated abd buffers for writing the log blocks.
* If the zio failed reclaim the allocated space and remove the
* pointers to these log blocks from the log block pointer list
* of the L2ARC device.
*/
while ((abd_buf = list_remove_tail(&cb->l2wcb_abd_list)) != NULL) {
abd_free(abd_buf->abd);
zio_buf_free(abd_buf, sizeof (*abd_buf));
if (zio->io_error != 0) {
lb_ptr_buf = list_remove_head(&dev->l2ad_lbptr_list);
/*
* L2BLK_GET_PSIZE returns aligned size for log
* blocks.
*/
uint64_t asize =
L2BLK_GET_PSIZE((lb_ptr_buf->lb_ptr)->lbp_prop);
bytes_dropped += asize;
ARCSTAT_INCR(arcstat_l2_log_blk_asize, -asize);
ARCSTAT_BUMPDOWN(arcstat_l2_log_blk_count);
zfs_refcount_remove_many(&dev->l2ad_lb_asize, asize,
lb_ptr_buf);
zfs_refcount_remove(&dev->l2ad_lb_count, lb_ptr_buf);
kmem_free(lb_ptr_buf->lb_ptr,
sizeof (l2arc_log_blkptr_t));
kmem_free(lb_ptr_buf, sizeof (l2arc_lb_ptr_buf_t));
}
}
list_destroy(&cb->l2wcb_abd_list);
if (zio->io_error != 0) {
ARCSTAT_BUMP(arcstat_l2_writes_error);
/*
* Restore the lbps array in the header to its previous state.
* If the list of log block pointers is empty, zero out the
* log block pointers in the device header.
*/
lb_ptr_buf = list_head(&dev->l2ad_lbptr_list);
for (int i = 0; i < 2; i++) {
if (lb_ptr_buf == NULL) {
/*
* If the list is empty zero out the device
* header. Otherwise zero out the second log
* block pointer in the header.
*/
if (i == 0) {
bzero(l2dhdr, dev->l2ad_dev_hdr_asize);
} else {
bzero(&l2dhdr->dh_start_lbps[i],
sizeof (l2arc_log_blkptr_t));
}
break;
}
bcopy(lb_ptr_buf->lb_ptr, &l2dhdr->dh_start_lbps[i],
sizeof (l2arc_log_blkptr_t));
lb_ptr_buf = list_next(&dev->l2ad_lbptr_list,
lb_ptr_buf);
}
}
ARCSTAT_BUMP(arcstat_l2_writes_done);
list_remove(buflist, head);
ASSERT(!HDR_HAS_L1HDR(head));
kmem_cache_free(hdr_l2only_cache, head);
mutex_exit(&dev->l2ad_mtx);
ASSERT(dev->l2ad_vdev != NULL);
vdev_space_update(dev->l2ad_vdev, -bytes_dropped, 0, 0);
l2arc_do_free_on_write();
kmem_free(cb, sizeof (l2arc_write_callback_t));
}
static int
l2arc_untransform(zio_t *zio, l2arc_read_callback_t *cb)
{
int ret;
spa_t *spa = zio->io_spa;
arc_buf_hdr_t *hdr = cb->l2rcb_hdr;
blkptr_t *bp = zio->io_bp;
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
boolean_t no_crypt = B_FALSE;
/*
* ZIL data is never be written to the L2ARC, so we don't need
* special handling for its unique MAC storage.
*/
ASSERT3U(BP_GET_TYPE(bp), !=, DMU_OT_INTENT_LOG);
ASSERT(MUTEX_HELD(HDR_LOCK(hdr)));
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
/*
* If the data was encrypted, decrypt it now. Note that
* we must check the bp here and not the hdr, since the
* hdr does not have its encryption parameters updated
* until arc_read_done().
*/
if (BP_IS_ENCRYPTED(bp)) {
abd_t *eabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr,
B_TRUE);
zio_crypt_decode_params_bp(bp, salt, iv);
zio_crypt_decode_mac_bp(bp, mac);
ret = spa_do_crypt_abd(B_FALSE, spa, &cb->l2rcb_zb,
BP_GET_TYPE(bp), BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp),
salt, iv, mac, HDR_GET_PSIZE(hdr), eabd,
hdr->b_l1hdr.b_pabd, &no_crypt);
if (ret != 0) {
arc_free_data_abd(hdr, eabd, arc_hdr_size(hdr), hdr);
goto error;
}
/*
* If we actually performed decryption, replace b_pabd
* with the decrypted data. Otherwise we can just throw
* our decryption buffer away.
*/
if (!no_crypt) {
arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd,
arc_hdr_size(hdr), hdr);
hdr->b_l1hdr.b_pabd = eabd;
zio->io_abd = eabd;
} else {
arc_free_data_abd(hdr, eabd, arc_hdr_size(hdr), hdr);
}
}
/*
* If the L2ARC block was compressed, but ARC compression
* is disabled we decompress the data into a new buffer and
* replace the existing data.
*/
if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
!HDR_COMPRESSION_ENABLED(hdr)) {
abd_t *cabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr,
B_TRUE);
void *tmp = abd_borrow_buf(cabd, arc_hdr_size(hdr));
ret = zio_decompress_data(HDR_GET_COMPRESS(hdr),
hdr->b_l1hdr.b_pabd, tmp, HDR_GET_PSIZE(hdr),
HDR_GET_LSIZE(hdr), &hdr->b_complevel);
if (ret != 0) {
abd_return_buf_copy(cabd, tmp, arc_hdr_size(hdr));
arc_free_data_abd(hdr, cabd, arc_hdr_size(hdr), hdr);
goto error;
}
abd_return_buf_copy(cabd, tmp, arc_hdr_size(hdr));
arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd,
arc_hdr_size(hdr), hdr);
hdr->b_l1hdr.b_pabd = cabd;
zio->io_abd = cabd;
zio->io_size = HDR_GET_LSIZE(hdr);
}
return (0);
error:
return (ret);
}
/*
* A read to a cache device completed. Validate buffer contents before
* handing over to the regular ARC routines.
*/
static void
l2arc_read_done(zio_t *zio)
{
int tfm_error = 0;
l2arc_read_callback_t *cb = zio->io_private;
arc_buf_hdr_t *hdr;
kmutex_t *hash_lock;
boolean_t valid_cksum;
boolean_t using_rdata = (BP_IS_ENCRYPTED(&cb->l2rcb_bp) &&
(cb->l2rcb_flags & ZIO_FLAG_RAW_ENCRYPT));
ASSERT3P(zio->io_vd, !=, NULL);
ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE);
spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd);
ASSERT3P(cb, !=, NULL);
hdr = cb->l2rcb_hdr;
ASSERT3P(hdr, !=, NULL);
hash_lock = HDR_LOCK(hdr);
mutex_enter(hash_lock);
ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
/*
* If the data was read into a temporary buffer,
* move it and free the buffer.
*/
if (cb->l2rcb_abd != NULL) {
ASSERT3U(arc_hdr_size(hdr), <, zio->io_size);
if (zio->io_error == 0) {
if (using_rdata) {
abd_copy(hdr->b_crypt_hdr.b_rabd,
cb->l2rcb_abd, arc_hdr_size(hdr));
} else {
abd_copy(hdr->b_l1hdr.b_pabd,
cb->l2rcb_abd, arc_hdr_size(hdr));
}
}
/*
* The following must be done regardless of whether
* there was an error:
* - free the temporary buffer
* - point zio to the real ARC buffer
* - set zio size accordingly
* These are required because zio is either re-used for
* an I/O of the block in the case of the error
* or the zio is passed to arc_read_done() and it
* needs real data.
*/
abd_free(cb->l2rcb_abd);
zio->io_size = zio->io_orig_size = arc_hdr_size(hdr);
if (using_rdata) {
ASSERT(HDR_HAS_RABD(hdr));
zio->io_abd = zio->io_orig_abd =
hdr->b_crypt_hdr.b_rabd;
} else {
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
zio->io_abd = zio->io_orig_abd = hdr->b_l1hdr.b_pabd;
}
}
ASSERT3P(zio->io_abd, !=, NULL);
/*
* Check this survived the L2ARC journey.
*/
ASSERT(zio->io_abd == hdr->b_l1hdr.b_pabd ||
(HDR_HAS_RABD(hdr) && zio->io_abd == hdr->b_crypt_hdr.b_rabd));
zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */
zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */
zio->io_prop.zp_complevel = hdr->b_complevel;
valid_cksum = arc_cksum_is_equal(hdr, zio);
/*
* b_rabd will always match the data as it exists on disk if it is
* being used. Therefore if we are reading into b_rabd we do not
* attempt to untransform the data.
*/
if (valid_cksum && !using_rdata)
tfm_error = l2arc_untransform(zio, cb);
if (valid_cksum && tfm_error == 0 && zio->io_error == 0 &&
!HDR_L2_EVICTED(hdr)) {
mutex_exit(hash_lock);
zio->io_private = hdr;
arc_read_done(zio);
} else {
/*
* Buffer didn't survive caching. Increment stats and
* reissue to the original storage device.
*/
if (zio->io_error != 0) {
ARCSTAT_BUMP(arcstat_l2_io_error);
} else {
zio->io_error = SET_ERROR(EIO);
}
if (!valid_cksum || tfm_error != 0)
ARCSTAT_BUMP(arcstat_l2_cksum_bad);
/*
* If there's no waiter, issue an async i/o to the primary
* storage now. If there *is* a waiter, the caller must
* issue the i/o in a context where it's OK to block.
*/
if (zio->io_waiter == NULL) {
zio_t *pio = zio_unique_parent(zio);
void *abd = (using_rdata) ?
hdr->b_crypt_hdr.b_rabd : hdr->b_l1hdr.b_pabd;
ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL);
zio = zio_read(pio, zio->io_spa, zio->io_bp,
abd, zio->io_size, arc_read_done,
hdr, zio->io_priority, cb->l2rcb_flags,
&cb->l2rcb_zb);
/*
* Original ZIO will be freed, so we need to update
* ARC header with the new ZIO pointer to be used
* by zio_change_priority() in arc_read().
*/
for (struct arc_callback *acb = hdr->b_l1hdr.b_acb;
acb != NULL; acb = acb->acb_next)
acb->acb_zio_head = zio;
mutex_exit(hash_lock);
zio_nowait(zio);
} else {
mutex_exit(hash_lock);
}
}
kmem_free(cb, sizeof (l2arc_read_callback_t));
}
/*
* This is the list priority from which the L2ARC will search for pages to
* cache. This is used within loops (0..3) to cycle through lists in the
* desired order. This order can have a significant effect on cache
* performance.
*
* Currently the metadata lists are hit first, MFU then MRU, followed by
* the data lists. This function returns a locked list, and also returns
* the lock pointer.
*/
static multilist_sublist_t *
l2arc_sublist_lock(int list_num)
{
multilist_t *ml = NULL;
unsigned int idx;
ASSERT(list_num >= 0 && list_num < L2ARC_FEED_TYPES);
switch (list_num) {
case 0:
ml = &arc_mfu->arcs_list[ARC_BUFC_METADATA];
break;
case 1:
ml = &arc_mru->arcs_list[ARC_BUFC_METADATA];
break;
case 2:
ml = &arc_mfu->arcs_list[ARC_BUFC_DATA];
break;
case 3:
ml = &arc_mru->arcs_list[ARC_BUFC_DATA];
break;
default:
return (NULL);
}
/*
* Return a randomly-selected sublist. This is acceptable
* because the caller feeds only a little bit of data for each
* call (8MB). Subsequent calls will result in different
* sublists being selected.
*/
idx = multilist_get_random_index(ml);
return (multilist_sublist_lock(ml, idx));
}
/*
* Calculates the maximum overhead of L2ARC metadata log blocks for a given
* L2ARC write size. l2arc_evict and l2arc_write_size need to include this
* overhead in processing to make sure there is enough headroom available
* when writing buffers.
*/
static inline uint64_t
l2arc_log_blk_overhead(uint64_t write_sz, l2arc_dev_t *dev)
{
if (dev->l2ad_log_entries == 0) {
return (0);
} else {
uint64_t log_entries = write_sz >> SPA_MINBLOCKSHIFT;
uint64_t log_blocks = (log_entries +
dev->l2ad_log_entries - 1) /
dev->l2ad_log_entries;
return (vdev_psize_to_asize(dev->l2ad_vdev,
sizeof (l2arc_log_blk_phys_t)) * log_blocks);
}
}
/*
* Evict buffers from the device write hand to the distance specified in
* bytes. This distance may span populated buffers, it may span nothing.
* This is clearing a region on the L2ARC device ready for writing.
* If the 'all' boolean is set, every buffer is evicted.
*/
static void
l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all)
{
list_t *buflist;
arc_buf_hdr_t *hdr, *hdr_prev;
kmutex_t *hash_lock;
uint64_t taddr;
l2arc_lb_ptr_buf_t *lb_ptr_buf, *lb_ptr_buf_prev;
vdev_t *vd = dev->l2ad_vdev;
boolean_t rerun;
buflist = &dev->l2ad_buflist;
/*
* We need to add in the worst case scenario of log block overhead.
*/
distance += l2arc_log_blk_overhead(distance, dev);
if (vd->vdev_has_trim && l2arc_trim_ahead > 0) {
/*
* Trim ahead of the write size 64MB or (l2arc_trim_ahead/100)
* times the write size, whichever is greater.
*/
distance += MAX(64 * 1024 * 1024,
(distance * l2arc_trim_ahead) / 100);
}
top:
rerun = B_FALSE;
if (dev->l2ad_hand >= (dev->l2ad_end - distance)) {
/*
* When there is no space to accommodate upcoming writes,
* evict to the end. Then bump the write and evict hands
* to the start and iterate. This iteration does not
* happen indefinitely as we make sure in
* l2arc_write_size() that when the write hand is reset,
* the write size does not exceed the end of the device.
*/
rerun = B_TRUE;
taddr = dev->l2ad_end;
} else {
taddr = dev->l2ad_hand + distance;
}
DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist,
uint64_t, taddr, boolean_t, all);
if (!all) {
/*
* This check has to be placed after deciding whether to
* iterate (rerun).
*/
if (dev->l2ad_first) {
/*
* This is the first sweep through the device. There is
* nothing to evict. We have already trimmmed the
* whole device.
*/
goto out;
} else {
/*
* Trim the space to be evicted.
*/
if (vd->vdev_has_trim && dev->l2ad_evict < taddr &&
l2arc_trim_ahead > 0) {
/*
* We have to drop the spa_config lock because
* vdev_trim_range() will acquire it.
* l2ad_evict already accounts for the label
* size. To prevent vdev_trim_ranges() from
* adding it again, we subtract it from
* l2ad_evict.
*/
spa_config_exit(dev->l2ad_spa, SCL_L2ARC, dev);
vdev_trim_simple(vd,
dev->l2ad_evict - VDEV_LABEL_START_SIZE,
taddr - dev->l2ad_evict);
spa_config_enter(dev->l2ad_spa, SCL_L2ARC, dev,
RW_READER);
}
/*
* When rebuilding L2ARC we retrieve the evict hand
* from the header of the device. Of note, l2arc_evict()
* does not actually delete buffers from the cache
* device, but trimming may do so depending on the
* hardware implementation. Thus keeping track of the
* evict hand is useful.
*/
dev->l2ad_evict = MAX(dev->l2ad_evict, taddr);
}
}
retry:
mutex_enter(&dev->l2ad_mtx);
/*
* We have to account for evicted log blocks. Run vdev_space_update()
* on log blocks whose offset (in bytes) is before the evicted offset
* (in bytes) by searching in the list of pointers to log blocks
* present in the L2ARC device.
*/
for (lb_ptr_buf = list_tail(&dev->l2ad_lbptr_list); lb_ptr_buf;
lb_ptr_buf = lb_ptr_buf_prev) {
lb_ptr_buf_prev = list_prev(&dev->l2ad_lbptr_list, lb_ptr_buf);
/* L2BLK_GET_PSIZE returns aligned size for log blocks */
uint64_t asize = L2BLK_GET_PSIZE(
(lb_ptr_buf->lb_ptr)->lbp_prop);
/*
* We don't worry about log blocks left behind (ie
* lbp_payload_start < l2ad_hand) because l2arc_write_buffers()
* will never write more than l2arc_evict() evicts.
*/
if (!all && l2arc_log_blkptr_valid(dev, lb_ptr_buf->lb_ptr)) {
break;
} else {
vdev_space_update(vd, -asize, 0, 0);
ARCSTAT_INCR(arcstat_l2_log_blk_asize, -asize);
ARCSTAT_BUMPDOWN(arcstat_l2_log_blk_count);
zfs_refcount_remove_many(&dev->l2ad_lb_asize, asize,
lb_ptr_buf);
zfs_refcount_remove(&dev->l2ad_lb_count, lb_ptr_buf);
list_remove(&dev->l2ad_lbptr_list, lb_ptr_buf);
kmem_free(lb_ptr_buf->lb_ptr,
sizeof (l2arc_log_blkptr_t));
kmem_free(lb_ptr_buf, sizeof (l2arc_lb_ptr_buf_t));
}
}
for (hdr = list_tail(buflist); hdr; hdr = hdr_prev) {
hdr_prev = list_prev(buflist, hdr);
ASSERT(!HDR_EMPTY(hdr));
hash_lock = HDR_LOCK(hdr);
/*
* We cannot use mutex_enter or else we can deadlock
* with l2arc_write_buffers (due to swapping the order
* the hash lock and l2ad_mtx are taken).
*/
if (!mutex_tryenter(hash_lock)) {
/*
* Missed the hash lock. Retry.
*/
ARCSTAT_BUMP(arcstat_l2_evict_lock_retry);
mutex_exit(&dev->l2ad_mtx);
mutex_enter(hash_lock);
mutex_exit(hash_lock);
goto retry;
}
/*
* A header can't be on this list if it doesn't have L2 header.
*/
ASSERT(HDR_HAS_L2HDR(hdr));
/* Ensure this header has finished being written. */
ASSERT(!HDR_L2_WRITING(hdr));
ASSERT(!HDR_L2_WRITE_HEAD(hdr));
if (!all && (hdr->b_l2hdr.b_daddr >= dev->l2ad_evict ||
hdr->b_l2hdr.b_daddr < dev->l2ad_hand)) {
/*
* We've evicted to the target address,
* or the end of the device.
*/
mutex_exit(hash_lock);
break;
}
if (!HDR_HAS_L1HDR(hdr)) {
ASSERT(!HDR_L2_READING(hdr));
/*
* This doesn't exist in the ARC. Destroy.
* arc_hdr_destroy() will call list_remove()
* and decrement arcstat_l2_lsize.
*/
arc_change_state(arc_anon, hdr, hash_lock);
arc_hdr_destroy(hdr);
} else {
ASSERT(hdr->b_l1hdr.b_state != arc_l2c_only);
ARCSTAT_BUMP(arcstat_l2_evict_l1cached);
/*
* Invalidate issued or about to be issued
* reads, since we may be about to write
* over this location.
*/
if (HDR_L2_READING(hdr)) {
ARCSTAT_BUMP(arcstat_l2_evict_reading);
arc_hdr_set_flags(hdr, ARC_FLAG_L2_EVICTED);
}
arc_hdr_l2hdr_destroy(hdr);
}
mutex_exit(hash_lock);
}
mutex_exit(&dev->l2ad_mtx);
out:
/*
* We need to check if we evict all buffers, otherwise we may iterate
* unnecessarily.
*/
if (!all && rerun) {
/*
* Bump device hand to the device start if it is approaching the
* end. l2arc_evict() has already evicted ahead for this case.
*/
dev->l2ad_hand = dev->l2ad_start;
dev->l2ad_evict = dev->l2ad_start;
dev->l2ad_first = B_FALSE;
goto top;
}
if (!all) {
/*
* In case of cache device removal (all) the following
* assertions may be violated without functional consequences
* as the device is about to be removed.
*/
ASSERT3U(dev->l2ad_hand + distance, <, dev->l2ad_end);
if (!dev->l2ad_first)
ASSERT3U(dev->l2ad_hand, <, dev->l2ad_evict);
}
}
/*
* Handle any abd transforms that might be required for writing to the L2ARC.
* If successful, this function will always return an abd with the data
* transformed as it is on disk in a new abd of asize bytes.
*/
static int
l2arc_apply_transforms(spa_t *spa, arc_buf_hdr_t *hdr, uint64_t asize,
abd_t **abd_out)
{
int ret;
void *tmp = NULL;
abd_t *cabd = NULL, *eabd = NULL, *to_write = hdr->b_l1hdr.b_pabd;
enum zio_compress compress = HDR_GET_COMPRESS(hdr);
uint64_t psize = HDR_GET_PSIZE(hdr);
uint64_t size = arc_hdr_size(hdr);
boolean_t ismd = HDR_ISTYPE_METADATA(hdr);
boolean_t bswap = (hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS);
dsl_crypto_key_t *dck = NULL;
uint8_t mac[ZIO_DATA_MAC_LEN] = { 0 };
boolean_t no_crypt = B_FALSE;
ASSERT((HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
!HDR_COMPRESSION_ENABLED(hdr)) ||
HDR_ENCRYPTED(hdr) || HDR_SHARED_DATA(hdr) || psize != asize);
ASSERT3U(psize, <=, asize);
/*
* If this data simply needs its own buffer, we simply allocate it
* and copy the data. This may be done to eliminate a dependency on a
* shared buffer or to reallocate the buffer to match asize.
*/
if (HDR_HAS_RABD(hdr) && asize != psize) {
ASSERT3U(asize, >=, psize);
to_write = abd_alloc_for_io(asize, ismd);
abd_copy(to_write, hdr->b_crypt_hdr.b_rabd, psize);
if (psize != asize)
abd_zero_off(to_write, psize, asize - psize);
goto out;
}
if ((compress == ZIO_COMPRESS_OFF || HDR_COMPRESSION_ENABLED(hdr)) &&
!HDR_ENCRYPTED(hdr)) {
ASSERT3U(size, ==, psize);
to_write = abd_alloc_for_io(asize, ismd);
abd_copy(to_write, hdr->b_l1hdr.b_pabd, size);
if (size != asize)
abd_zero_off(to_write, size, asize - size);
goto out;
}
if (compress != ZIO_COMPRESS_OFF && !HDR_COMPRESSION_ENABLED(hdr)) {
cabd = abd_alloc_for_io(asize, ismd);
tmp = abd_borrow_buf(cabd, asize);
psize = zio_compress_data(compress, to_write, tmp, size,
hdr->b_complevel);
if (psize >= size) {
abd_return_buf(cabd, tmp, asize);
HDR_SET_COMPRESS(hdr, ZIO_COMPRESS_OFF);
to_write = cabd;
abd_copy(to_write, hdr->b_l1hdr.b_pabd, size);
if (size != asize)
abd_zero_off(to_write, size, asize - size);
goto encrypt;
}
ASSERT3U(psize, <=, HDR_GET_PSIZE(hdr));
if (psize < asize)
bzero((char *)tmp + psize, asize - psize);
psize = HDR_GET_PSIZE(hdr);
abd_return_buf_copy(cabd, tmp, asize);
to_write = cabd;
}
encrypt:
if (HDR_ENCRYPTED(hdr)) {
eabd = abd_alloc_for_io(asize, ismd);
/*
* If the dataset was disowned before the buffer
* made it to this point, the key to re-encrypt
* it won't be available. In this case we simply
* won't write the buffer to the L2ARC.
*/
ret = spa_keystore_lookup_key(spa, hdr->b_crypt_hdr.b_dsobj,
FTAG, &dck);
if (ret != 0)
goto error;
ret = zio_do_crypt_abd(B_TRUE, &dck->dck_key,
hdr->b_crypt_hdr.b_ot, bswap, hdr->b_crypt_hdr.b_salt,
hdr->b_crypt_hdr.b_iv, mac, psize, to_write, eabd,
&no_crypt);
if (ret != 0)
goto error;
if (no_crypt)
abd_copy(eabd, to_write, psize);
if (psize != asize)
abd_zero_off(eabd, psize, asize - psize);
/* assert that the MAC we got here matches the one we saved */
ASSERT0(bcmp(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN));
spa_keystore_dsl_key_rele(spa, dck, FTAG);
if (to_write == cabd)
abd_free(cabd);
to_write = eabd;
}
out:
ASSERT3P(to_write, !=, hdr->b_l1hdr.b_pabd);
*abd_out = to_write;
return (0);
error:
if (dck != NULL)
spa_keystore_dsl_key_rele(spa, dck, FTAG);
if (cabd != NULL)
abd_free(cabd);
if (eabd != NULL)
abd_free(eabd);
*abd_out = NULL;
return (ret);
}
static void
l2arc_blk_fetch_done(zio_t *zio)
{
l2arc_read_callback_t *cb;
cb = zio->io_private;
if (cb->l2rcb_abd != NULL)
abd_free(cb->l2rcb_abd);
kmem_free(cb, sizeof (l2arc_read_callback_t));
}
/*
* Find and write ARC buffers to the L2ARC device.
*
* An ARC_FLAG_L2_WRITING flag is set so that the L2ARC buffers are not valid
* for reading until they have completed writing.
* The headroom_boost is an in-out parameter used to maintain headroom boost
* state between calls to this function.
*
* Returns the number of bytes actually written (which may be smaller than
* the delta by which the device hand has changed due to alignment and the
* writing of log blocks).
*/
static uint64_t
l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz)
{
arc_buf_hdr_t *hdr, *hdr_prev, *head;
uint64_t write_asize, write_psize, write_lsize, headroom;
boolean_t full;
l2arc_write_callback_t *cb = NULL;
zio_t *pio, *wzio;
uint64_t guid = spa_load_guid(spa);
l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr;
ASSERT3P(dev->l2ad_vdev, !=, NULL);
pio = NULL;
write_lsize = write_asize = write_psize = 0;
full = B_FALSE;
head = kmem_cache_alloc(hdr_l2only_cache, KM_PUSHPAGE);
arc_hdr_set_flags(head, ARC_FLAG_L2_WRITE_HEAD | ARC_FLAG_HAS_L2HDR);
/*
* Copy buffers for L2ARC writing.
*/
for (int pass = 0; pass < L2ARC_FEED_TYPES; pass++) {
/*
* If pass == 1 or 3, we cache MRU metadata and data
* respectively.
*/
if (l2arc_mfuonly) {
if (pass == 1 || pass == 3)
continue;
}
multilist_sublist_t *mls = l2arc_sublist_lock(pass);
uint64_t passed_sz = 0;
VERIFY3P(mls, !=, NULL);
/*
* L2ARC fast warmup.
*
* Until the ARC is warm and starts to evict, read from the
* head of the ARC lists rather than the tail.
*/
if (arc_warm == B_FALSE)
hdr = multilist_sublist_head(mls);
else
hdr = multilist_sublist_tail(mls);
headroom = target_sz * l2arc_headroom;
if (zfs_compressed_arc_enabled)
headroom = (headroom * l2arc_headroom_boost) / 100;
for (; hdr; hdr = hdr_prev) {
kmutex_t *hash_lock;
abd_t *to_write = NULL;
if (arc_warm == B_FALSE)
hdr_prev = multilist_sublist_next(mls, hdr);
else
hdr_prev = multilist_sublist_prev(mls, hdr);
hash_lock = HDR_LOCK(hdr);
if (!mutex_tryenter(hash_lock)) {
/*
* Skip this buffer rather than waiting.
*/
continue;
}
passed_sz += HDR_GET_LSIZE(hdr);
if (l2arc_headroom != 0 && passed_sz > headroom) {
/*
* Searched too far.
*/
mutex_exit(hash_lock);
break;
}
if (!l2arc_write_eligible(guid, hdr)) {
mutex_exit(hash_lock);
continue;
}
/*
* We rely on the L1 portion of the header below, so
* it's invalid for this header to have been evicted out
* of the ghost cache, prior to being written out. The
* ARC_FLAG_L2_WRITING bit ensures this won't happen.
*/
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT3U(HDR_GET_PSIZE(hdr), >, 0);
ASSERT3U(arc_hdr_size(hdr), >, 0);
ASSERT(hdr->b_l1hdr.b_pabd != NULL ||
HDR_HAS_RABD(hdr));
uint64_t psize = HDR_GET_PSIZE(hdr);
uint64_t asize = vdev_psize_to_asize(dev->l2ad_vdev,
psize);
if ((write_asize + asize) > target_sz) {
full = B_TRUE;
mutex_exit(hash_lock);
break;
}
/*
* We rely on the L1 portion of the header below, so
* it's invalid for this header to have been evicted out
* of the ghost cache, prior to being written out. The
* ARC_FLAG_L2_WRITING bit ensures this won't happen.
*/
arc_hdr_set_flags(hdr, ARC_FLAG_L2_WRITING);
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT3U(HDR_GET_PSIZE(hdr), >, 0);
ASSERT(hdr->b_l1hdr.b_pabd != NULL ||
HDR_HAS_RABD(hdr));
ASSERT3U(arc_hdr_size(hdr), >, 0);
/*
* If this header has b_rabd, we can use this since it
* must always match the data exactly as it exists on
* disk. Otherwise, the L2ARC can normally use the
* hdr's data, but if we're sharing data between the
* hdr and one of its bufs, L2ARC needs its own copy of
* the data so that the ZIO below can't race with the
* buf consumer. To ensure that this copy will be
* available for the lifetime of the ZIO and be cleaned
* up afterwards, we add it to the l2arc_free_on_write
* queue. If we need to apply any transforms to the
* data (compression, encryption) we will also need the
* extra buffer.
*/
if (HDR_HAS_RABD(hdr) && psize == asize) {
to_write = hdr->b_crypt_hdr.b_rabd;
} else if ((HDR_COMPRESSION_ENABLED(hdr) ||
HDR_GET_COMPRESS(hdr) == ZIO_COMPRESS_OFF) &&
!HDR_ENCRYPTED(hdr) && !HDR_SHARED_DATA(hdr) &&
psize == asize) {
to_write = hdr->b_l1hdr.b_pabd;
} else {
int ret;
arc_buf_contents_t type = arc_buf_type(hdr);
ret = l2arc_apply_transforms(spa, hdr, asize,
&to_write);
if (ret != 0) {
arc_hdr_clear_flags(hdr,
ARC_FLAG_L2_WRITING);
mutex_exit(hash_lock);
continue;
}
l2arc_free_abd_on_write(to_write, asize, type);
}
if (pio == NULL) {
/*
* Insert a dummy header on the buflist so
* l2arc_write_done() can find where the
* write buffers begin without searching.
*/
mutex_enter(&dev->l2ad_mtx);
list_insert_head(&dev->l2ad_buflist, head);
mutex_exit(&dev->l2ad_mtx);
cb = kmem_alloc(
sizeof (l2arc_write_callback_t), KM_SLEEP);
cb->l2wcb_dev = dev;
cb->l2wcb_head = head;
/*
* Create a list to save allocated abd buffers
* for l2arc_log_blk_commit().
*/
list_create(&cb->l2wcb_abd_list,
sizeof (l2arc_lb_abd_buf_t),
offsetof(l2arc_lb_abd_buf_t, node));
pio = zio_root(spa, l2arc_write_done, cb,
ZIO_FLAG_CANFAIL);
}
hdr->b_l2hdr.b_dev = dev;
hdr->b_l2hdr.b_hits = 0;
hdr->b_l2hdr.b_daddr = dev->l2ad_hand;
hdr->b_l2hdr.b_arcs_state =
hdr->b_l1hdr.b_state->arcs_state;
arc_hdr_set_flags(hdr, ARC_FLAG_HAS_L2HDR);
mutex_enter(&dev->l2ad_mtx);
list_insert_head(&dev->l2ad_buflist, hdr);
mutex_exit(&dev->l2ad_mtx);
(void) zfs_refcount_add_many(&dev->l2ad_alloc,
arc_hdr_size(hdr), hdr);
wzio = zio_write_phys(pio, dev->l2ad_vdev,
hdr->b_l2hdr.b_daddr, asize, to_write,
ZIO_CHECKSUM_OFF, NULL, hdr,
ZIO_PRIORITY_ASYNC_WRITE,
ZIO_FLAG_CANFAIL, B_FALSE);
write_lsize += HDR_GET_LSIZE(hdr);
DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev,
zio_t *, wzio);
write_psize += psize;
write_asize += asize;
dev->l2ad_hand += asize;
l2arc_hdr_arcstats_increment(hdr);
vdev_space_update(dev->l2ad_vdev, asize, 0, 0);
mutex_exit(hash_lock);
/*
* Append buf info to current log and commit if full.
* arcstat_l2_{size,asize} kstats are updated
* internally.
*/
if (l2arc_log_blk_insert(dev, hdr))
l2arc_log_blk_commit(dev, pio, cb);
zio_nowait(wzio);
}
multilist_sublist_unlock(mls);
if (full == B_TRUE)
break;
}
/* No buffers selected for writing? */
if (pio == NULL) {
ASSERT0(write_lsize);
ASSERT(!HDR_HAS_L1HDR(head));
kmem_cache_free(hdr_l2only_cache, head);
/*
* Although we did not write any buffers l2ad_evict may
* have advanced.
*/
if (dev->l2ad_evict != l2dhdr->dh_evict)
l2arc_dev_hdr_update(dev);
return (0);
}
if (!dev->l2ad_first)
ASSERT3U(dev->l2ad_hand, <=, dev->l2ad_evict);
ASSERT3U(write_asize, <=, target_sz);
ARCSTAT_BUMP(arcstat_l2_writes_sent);
ARCSTAT_INCR(arcstat_l2_write_bytes, write_psize);
dev->l2ad_writing = B_TRUE;
(void) zio_wait(pio);
dev->l2ad_writing = B_FALSE;
/*
* Update the device header after the zio completes as
* l2arc_write_done() may have updated the memory holding the log block
* pointers in the device header.
*/
l2arc_dev_hdr_update(dev);
return (write_asize);
}
static boolean_t
l2arc_hdr_limit_reached(void)
{
int64_t s = aggsum_upper_bound(&arc_sums.arcstat_l2_hdr_size);
return (arc_reclaim_needed() || (s > arc_meta_limit * 3 / 4) ||
(s > (arc_warm ? arc_c : arc_c_max) * l2arc_meta_percent / 100));
}
/*
* This thread feeds the L2ARC at regular intervals. This is the beating
* heart of the L2ARC.
*/
/* ARGSUSED */
static void
l2arc_feed_thread(void *unused)
{
callb_cpr_t cpr;
l2arc_dev_t *dev;
spa_t *spa;
uint64_t size, wrote;
clock_t begin, next = ddi_get_lbolt();
fstrans_cookie_t cookie;
CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG);
mutex_enter(&l2arc_feed_thr_lock);
cookie = spl_fstrans_mark();
while (l2arc_thread_exit == 0) {
CALLB_CPR_SAFE_BEGIN(&cpr);
(void) cv_timedwait_idle(&l2arc_feed_thr_cv,
&l2arc_feed_thr_lock, next);
CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock);
next = ddi_get_lbolt() + hz;
/*
* Quick check for L2ARC devices.
*/
mutex_enter(&l2arc_dev_mtx);
if (l2arc_ndev == 0) {
mutex_exit(&l2arc_dev_mtx);
continue;
}
mutex_exit(&l2arc_dev_mtx);
begin = ddi_get_lbolt();
/*
* This selects the next l2arc device to write to, and in
* doing so the next spa to feed from: dev->l2ad_spa. This
* will return NULL if there are now no l2arc devices or if
* they are all faulted.
*
* If a device is returned, its spa's config lock is also
* held to prevent device removal. l2arc_dev_get_next()
* will grab and release l2arc_dev_mtx.
*/
if ((dev = l2arc_dev_get_next()) == NULL)
continue;
spa = dev->l2ad_spa;
ASSERT3P(spa, !=, NULL);
/*
* If the pool is read-only then force the feed thread to
* sleep a little longer.
*/
if (!spa_writeable(spa)) {
next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz;
spa_config_exit(spa, SCL_L2ARC, dev);
continue;
}
/*
* Avoid contributing to memory pressure.
*/
if (l2arc_hdr_limit_reached()) {
ARCSTAT_BUMP(arcstat_l2_abort_lowmem);
spa_config_exit(spa, SCL_L2ARC, dev);
continue;
}
ARCSTAT_BUMP(arcstat_l2_feeds);
size = l2arc_write_size(dev);
/*
* Evict L2ARC buffers that will be overwritten.
*/
l2arc_evict(dev, size, B_FALSE);
/*
* Write ARC buffers.
*/
wrote = l2arc_write_buffers(spa, dev, size);
/*
* Calculate interval between writes.
*/
next = l2arc_write_interval(begin, size, wrote);
spa_config_exit(spa, SCL_L2ARC, dev);
}
spl_fstrans_unmark(cookie);
l2arc_thread_exit = 0;
cv_broadcast(&l2arc_feed_thr_cv);
CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */
thread_exit();
}
boolean_t
l2arc_vdev_present(vdev_t *vd)
{
return (l2arc_vdev_get(vd) != NULL);
}
/*
* Returns the l2arc_dev_t associated with a particular vdev_t or NULL if
* the vdev_t isn't an L2ARC device.
*/
l2arc_dev_t *
l2arc_vdev_get(vdev_t *vd)
{
l2arc_dev_t *dev;
mutex_enter(&l2arc_dev_mtx);
for (dev = list_head(l2arc_dev_list); dev != NULL;
dev = list_next(l2arc_dev_list, dev)) {
if (dev->l2ad_vdev == vd)
break;
}
mutex_exit(&l2arc_dev_mtx);
return (dev);
}
+static void
+l2arc_rebuild_dev(l2arc_dev_t *dev, boolean_t reopen)
+{
+ l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr;
+ uint64_t l2dhdr_asize = dev->l2ad_dev_hdr_asize;
+ spa_t *spa = dev->l2ad_spa;
+
+ /*
+ * The L2ARC has to hold at least the payload of one log block for
+ * them to be restored (persistent L2ARC). The payload of a log block
+ * depends on the amount of its log entries. We always write log blocks
+ * with 1022 entries. How many of them are committed or restored depends
+ * on the size of the L2ARC device. Thus the maximum payload of
+ * one log block is 1022 * SPA_MAXBLOCKSIZE = 16GB. If the L2ARC device
+ * is less than that, we reduce the amount of committed and restored
+ * log entries per block so as to enable persistence.
+ */
+ if (dev->l2ad_end < l2arc_rebuild_blocks_min_l2size) {
+ dev->l2ad_log_entries = 0;
+ } else {
+ dev->l2ad_log_entries = MIN((dev->l2ad_end -
+ dev->l2ad_start) >> SPA_MAXBLOCKSHIFT,
+ L2ARC_LOG_BLK_MAX_ENTRIES);
+ }
+
+ /*
+ * Read the device header, if an error is returned do not rebuild L2ARC.
+ */
+ if (l2arc_dev_hdr_read(dev) == 0 && dev->l2ad_log_entries > 0) {
+ /*
+ * If we are onlining a cache device (vdev_reopen) that was
+ * still present (l2arc_vdev_present()) and rebuild is enabled,
+ * we should evict all ARC buffers and pointers to log blocks
+ * and reclaim their space before restoring its contents to
+ * L2ARC.
+ */
+ if (reopen) {
+ if (!l2arc_rebuild_enabled) {
+ return;
+ } else {
+ l2arc_evict(dev, 0, B_TRUE);
+ /* start a new log block */
+ dev->l2ad_log_ent_idx = 0;
+ dev->l2ad_log_blk_payload_asize = 0;
+ dev->l2ad_log_blk_payload_start = 0;
+ }
+ }
+ /*
+ * Just mark the device as pending for a rebuild. We won't
+ * be starting a rebuild in line here as it would block pool
+ * import. Instead spa_load_impl will hand that off to an
+ * async task which will call l2arc_spa_rebuild_start.
+ */
+ dev->l2ad_rebuild = B_TRUE;
+ } else if (spa_writeable(spa)) {
+ /*
+ * In this case TRIM the whole device if l2arc_trim_ahead > 0,
+ * otherwise create a new header. We zero out the memory holding
+ * the header to reset dh_start_lbps. If we TRIM the whole
+ * device the new header will be written by
+ * vdev_trim_l2arc_thread() at the end of the TRIM to update the
+ * trim_state in the header too. When reading the header, if
+ * trim_state is not VDEV_TRIM_COMPLETE and l2arc_trim_ahead > 0
+ * we opt to TRIM the whole device again.
+ */
+ if (l2arc_trim_ahead > 0) {
+ dev->l2ad_trim_all = B_TRUE;
+ } else {
+ bzero(l2dhdr, l2dhdr_asize);
+ l2arc_dev_hdr_update(dev);
+ }
+ }
+}
+
/*
* Add a vdev for use by the L2ARC. By this point the spa has already
* validated the vdev and opened it.
*/
void
l2arc_add_vdev(spa_t *spa, vdev_t *vd)
{
l2arc_dev_t *adddev;
uint64_t l2dhdr_asize;
ASSERT(!l2arc_vdev_present(vd));
/*
* Create a new l2arc device entry.
*/
adddev = vmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP);
adddev->l2ad_spa = spa;
adddev->l2ad_vdev = vd;
/* leave extra size for an l2arc device header */
l2dhdr_asize = adddev->l2ad_dev_hdr_asize =
MAX(sizeof (*adddev->l2ad_dev_hdr), 1 << vd->vdev_ashift);
adddev->l2ad_start = VDEV_LABEL_START_SIZE + l2dhdr_asize;
adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd);
ASSERT3U(adddev->l2ad_start, <, adddev->l2ad_end);
adddev->l2ad_hand = adddev->l2ad_start;
adddev->l2ad_evict = adddev->l2ad_start;
adddev->l2ad_first = B_TRUE;
adddev->l2ad_writing = B_FALSE;
adddev->l2ad_trim_all = B_FALSE;
list_link_init(&adddev->l2ad_node);
adddev->l2ad_dev_hdr = kmem_zalloc(l2dhdr_asize, KM_SLEEP);
mutex_init(&adddev->l2ad_mtx, NULL, MUTEX_DEFAULT, NULL);
/*
* This is a list of all ARC buffers that are still valid on the
* device.
*/
list_create(&adddev->l2ad_buflist, sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l2hdr.b_l2node));
/*
* This is a list of pointers to log blocks that are still present
* on the device.
*/
list_create(&adddev->l2ad_lbptr_list, sizeof (l2arc_lb_ptr_buf_t),
offsetof(l2arc_lb_ptr_buf_t, node));
vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand);
zfs_refcount_create(&adddev->l2ad_alloc);
zfs_refcount_create(&adddev->l2ad_lb_asize);
zfs_refcount_create(&adddev->l2ad_lb_count);
+ /*
+ * Decide if dev is eligible for L2ARC rebuild or whole device
+ * trimming. This has to happen before the device is added in the
+ * cache device list and l2arc_dev_mtx is released. Otherwise
+ * l2arc_feed_thread() might already start writing on the
+ * device.
+ */
+ l2arc_rebuild_dev(adddev, B_FALSE);
+
/*
* Add device to global list
*/
mutex_enter(&l2arc_dev_mtx);
list_insert_head(l2arc_dev_list, adddev);
atomic_inc_64(&l2arc_ndev);
mutex_exit(&l2arc_dev_mtx);
-
- /*
- * Decide if vdev is eligible for L2ARC rebuild
- */
- l2arc_rebuild_vdev(adddev->l2ad_vdev, B_FALSE);
}
+/*
+ * Decide if a vdev is eligible for L2ARC rebuild, called from vdev_reopen()
+ * in case of onlining a cache device.
+ */
void
l2arc_rebuild_vdev(vdev_t *vd, boolean_t reopen)
{
l2arc_dev_t *dev = NULL;
- l2arc_dev_hdr_phys_t *l2dhdr;
- uint64_t l2dhdr_asize;
- spa_t *spa;
dev = l2arc_vdev_get(vd);
ASSERT3P(dev, !=, NULL);
- spa = dev->l2ad_spa;
- l2dhdr = dev->l2ad_dev_hdr;
- l2dhdr_asize = dev->l2ad_dev_hdr_asize;
-
- /*
- * The L2ARC has to hold at least the payload of one log block for
- * them to be restored (persistent L2ARC). The payload of a log block
- * depends on the amount of its log entries. We always write log blocks
- * with 1022 entries. How many of them are committed or restored depends
- * on the size of the L2ARC device. Thus the maximum payload of
- * one log block is 1022 * SPA_MAXBLOCKSIZE = 16GB. If the L2ARC device
- * is less than that, we reduce the amount of committed and restored
- * log entries per block so as to enable persistence.
- */
- if (dev->l2ad_end < l2arc_rebuild_blocks_min_l2size) {
- dev->l2ad_log_entries = 0;
- } else {
- dev->l2ad_log_entries = MIN((dev->l2ad_end -
- dev->l2ad_start) >> SPA_MAXBLOCKSHIFT,
- L2ARC_LOG_BLK_MAX_ENTRIES);
- }
/*
- * Read the device header, if an error is returned do not rebuild L2ARC.
+ * In contrast to l2arc_add_vdev() we do not have to worry about
+ * l2arc_feed_thread() invalidating previous content when onlining a
+ * cache device. The device parameters (l2ad*) are not cleared when
+ * offlining the device and writing new buffers will not invalidate
+ * all previous content. In worst case only buffers that have not had
+ * their log block written to the device will be lost.
+ * When onlining the cache device (ie offline->online without exporting
+ * the pool in between) this happens:
+ * vdev_reopen() -> vdev_open() -> l2arc_rebuild_vdev()
+ * | |
+ * vdev_is_dead() = B_FALSE l2ad_rebuild = B_TRUE
+ * During the time where vdev_is_dead = B_FALSE and until l2ad_rebuild
+ * is set to B_TRUE we might write additional buffers to the device.
*/
- if (l2arc_dev_hdr_read(dev) == 0 && dev->l2ad_log_entries > 0) {
- /*
- * If we are onlining a cache device (vdev_reopen) that was
- * still present (l2arc_vdev_present()) and rebuild is enabled,
- * we should evict all ARC buffers and pointers to log blocks
- * and reclaim their space before restoring its contents to
- * L2ARC.
- */
- if (reopen) {
- if (!l2arc_rebuild_enabled) {
- return;
- } else {
- l2arc_evict(dev, 0, B_TRUE);
- /* start a new log block */
- dev->l2ad_log_ent_idx = 0;
- dev->l2ad_log_blk_payload_asize = 0;
- dev->l2ad_log_blk_payload_start = 0;
- }
- }
- /*
- * Just mark the device as pending for a rebuild. We won't
- * be starting a rebuild in line here as it would block pool
- * import. Instead spa_load_impl will hand that off to an
- * async task which will call l2arc_spa_rebuild_start.
- */
- dev->l2ad_rebuild = B_TRUE;
- } else if (spa_writeable(spa)) {
- /*
- * In this case TRIM the whole device if l2arc_trim_ahead > 0,
- * otherwise create a new header. We zero out the memory holding
- * the header to reset dh_start_lbps. If we TRIM the whole
- * device the new header will be written by
- * vdev_trim_l2arc_thread() at the end of the TRIM to update the
- * trim_state in the header too. When reading the header, if
- * trim_state is not VDEV_TRIM_COMPLETE and l2arc_trim_ahead > 0
- * we opt to TRIM the whole device again.
- */
- if (l2arc_trim_ahead > 0) {
- dev->l2ad_trim_all = B_TRUE;
- } else {
- bzero(l2dhdr, l2dhdr_asize);
- l2arc_dev_hdr_update(dev);
- }
- }
+ l2arc_rebuild_dev(dev, reopen);
}
/*
* Remove a vdev from the L2ARC.
*/
void
l2arc_remove_vdev(vdev_t *vd)
{
l2arc_dev_t *remdev = NULL;
/*
* Find the device by vdev
*/
remdev = l2arc_vdev_get(vd);
ASSERT3P(remdev, !=, NULL);
/*
* Cancel any ongoing or scheduled rebuild.
*/
mutex_enter(&l2arc_rebuild_thr_lock);
if (remdev->l2ad_rebuild_began == B_TRUE) {
remdev->l2ad_rebuild_cancel = B_TRUE;
while (remdev->l2ad_rebuild == B_TRUE)
cv_wait(&l2arc_rebuild_thr_cv, &l2arc_rebuild_thr_lock);
}
mutex_exit(&l2arc_rebuild_thr_lock);
/*
* Remove device from global list
*/
mutex_enter(&l2arc_dev_mtx);
list_remove(l2arc_dev_list, remdev);
l2arc_dev_last = NULL; /* may have been invalidated */
atomic_dec_64(&l2arc_ndev);
mutex_exit(&l2arc_dev_mtx);
/*
* Clear all buflists and ARC references. L2ARC device flush.
*/
l2arc_evict(remdev, 0, B_TRUE);
list_destroy(&remdev->l2ad_buflist);
ASSERT(list_is_empty(&remdev->l2ad_lbptr_list));
list_destroy(&remdev->l2ad_lbptr_list);
mutex_destroy(&remdev->l2ad_mtx);
zfs_refcount_destroy(&remdev->l2ad_alloc);
zfs_refcount_destroy(&remdev->l2ad_lb_asize);
zfs_refcount_destroy(&remdev->l2ad_lb_count);
kmem_free(remdev->l2ad_dev_hdr, remdev->l2ad_dev_hdr_asize);
vmem_free(remdev, sizeof (l2arc_dev_t));
}
void
l2arc_init(void)
{
l2arc_thread_exit = 0;
l2arc_ndev = 0;
mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&l2arc_rebuild_thr_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&l2arc_rebuild_thr_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL);
l2arc_dev_list = &L2ARC_dev_list;
l2arc_free_on_write = &L2ARC_free_on_write;
list_create(l2arc_dev_list, sizeof (l2arc_dev_t),
offsetof(l2arc_dev_t, l2ad_node));
list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t),
offsetof(l2arc_data_free_t, l2df_list_node));
}
void
l2arc_fini(void)
{
mutex_destroy(&l2arc_feed_thr_lock);
cv_destroy(&l2arc_feed_thr_cv);
mutex_destroy(&l2arc_rebuild_thr_lock);
cv_destroy(&l2arc_rebuild_thr_cv);
mutex_destroy(&l2arc_dev_mtx);
mutex_destroy(&l2arc_free_on_write_mtx);
list_destroy(l2arc_dev_list);
list_destroy(l2arc_free_on_write);
}
void
l2arc_start(void)
{
if (!(spa_mode_global & SPA_MODE_WRITE))
return;
(void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0,
TS_RUN, defclsyspri);
}
void
l2arc_stop(void)
{
if (!(spa_mode_global & SPA_MODE_WRITE))
return;
mutex_enter(&l2arc_feed_thr_lock);
cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */
l2arc_thread_exit = 1;
while (l2arc_thread_exit != 0)
cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock);
mutex_exit(&l2arc_feed_thr_lock);
}
/*
* Punches out rebuild threads for the L2ARC devices in a spa. This should
* be called after pool import from the spa async thread, since starting
* these threads directly from spa_import() will make them part of the
* "zpool import" context and delay process exit (and thus pool import).
*/
void
l2arc_spa_rebuild_start(spa_t *spa)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
/*
* Locate the spa's l2arc devices and kick off rebuild threads.
*/
for (int i = 0; i < spa->spa_l2cache.sav_count; i++) {
l2arc_dev_t *dev =
l2arc_vdev_get(spa->spa_l2cache.sav_vdevs[i]);
if (dev == NULL) {
/* Don't attempt a rebuild if the vdev is UNAVAIL */
continue;
}
mutex_enter(&l2arc_rebuild_thr_lock);
if (dev->l2ad_rebuild && !dev->l2ad_rebuild_cancel) {
dev->l2ad_rebuild_began = B_TRUE;
(void) thread_create(NULL, 0, l2arc_dev_rebuild_thread,
dev, 0, &p0, TS_RUN, minclsyspri);
}
mutex_exit(&l2arc_rebuild_thr_lock);
}
}
/*
* Main entry point for L2ARC rebuilding.
*/
static void
l2arc_dev_rebuild_thread(void *arg)
{
l2arc_dev_t *dev = arg;
VERIFY(!dev->l2ad_rebuild_cancel);
VERIFY(dev->l2ad_rebuild);
(void) l2arc_rebuild(dev);
mutex_enter(&l2arc_rebuild_thr_lock);
dev->l2ad_rebuild_began = B_FALSE;
dev->l2ad_rebuild = B_FALSE;
mutex_exit(&l2arc_rebuild_thr_lock);
thread_exit();
}
/*
* This function implements the actual L2ARC metadata rebuild. It:
* starts reading the log block chain and restores each block's contents
* to memory (reconstructing arc_buf_hdr_t's).
*
* Operation stops under any of the following conditions:
*
* 1) We reach the end of the log block chain.
* 2) We encounter *any* error condition (cksum errors, io errors)
*/
static int
l2arc_rebuild(l2arc_dev_t *dev)
{
vdev_t *vd = dev->l2ad_vdev;
spa_t *spa = vd->vdev_spa;
int err = 0;
l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr;
l2arc_log_blk_phys_t *this_lb, *next_lb;
zio_t *this_io = NULL, *next_io = NULL;
l2arc_log_blkptr_t lbps[2];
l2arc_lb_ptr_buf_t *lb_ptr_buf;
boolean_t lock_held;
this_lb = vmem_zalloc(sizeof (*this_lb), KM_SLEEP);
next_lb = vmem_zalloc(sizeof (*next_lb), KM_SLEEP);
/*
* We prevent device removal while issuing reads to the device,
* then during the rebuilding phases we drop this lock again so
* that a spa_unload or device remove can be initiated - this is
* safe, because the spa will signal us to stop before removing
* our device and wait for us to stop.
*/
spa_config_enter(spa, SCL_L2ARC, vd, RW_READER);
lock_held = B_TRUE;
/*
* Retrieve the persistent L2ARC device state.
* L2BLK_GET_PSIZE returns aligned size for log blocks.
*/
dev->l2ad_evict = MAX(l2dhdr->dh_evict, dev->l2ad_start);
dev->l2ad_hand = MAX(l2dhdr->dh_start_lbps[0].lbp_daddr +
L2BLK_GET_PSIZE((&l2dhdr->dh_start_lbps[0])->lbp_prop),
dev->l2ad_start);
dev->l2ad_first = !!(l2dhdr->dh_flags & L2ARC_DEV_HDR_EVICT_FIRST);
vd->vdev_trim_action_time = l2dhdr->dh_trim_action_time;
vd->vdev_trim_state = l2dhdr->dh_trim_state;
/*
* In case the zfs module parameter l2arc_rebuild_enabled is false
* we do not start the rebuild process.
*/
if (!l2arc_rebuild_enabled)
goto out;
/* Prepare the rebuild process */
bcopy(l2dhdr->dh_start_lbps, lbps, sizeof (lbps));
/* Start the rebuild process */
for (;;) {
if (!l2arc_log_blkptr_valid(dev, &lbps[0]))
break;
if ((err = l2arc_log_blk_read(dev, &lbps[0], &lbps[1],
this_lb, next_lb, this_io, &next_io)) != 0)
goto out;
/*
* Our memory pressure valve. If the system is running low
* on memory, rather than swamping memory with new ARC buf
* hdrs, we opt not to rebuild the L2ARC. At this point,
* however, we have already set up our L2ARC dev to chain in
* new metadata log blocks, so the user may choose to offline/
* online the L2ARC dev at a later time (or re-import the pool)
* to reconstruct it (when there's less memory pressure).
*/
if (l2arc_hdr_limit_reached()) {
ARCSTAT_BUMP(arcstat_l2_rebuild_abort_lowmem);
cmn_err(CE_NOTE, "System running low on memory, "
"aborting L2ARC rebuild.");
err = SET_ERROR(ENOMEM);
goto out;
}
spa_config_exit(spa, SCL_L2ARC, vd);
lock_held = B_FALSE;
/*
* Now that we know that the next_lb checks out alright, we
* can start reconstruction from this log block.
* L2BLK_GET_PSIZE returns aligned size for log blocks.
*/
uint64_t asize = L2BLK_GET_PSIZE((&lbps[0])->lbp_prop);
l2arc_log_blk_restore(dev, this_lb, asize);
/*
* log block restored, include its pointer in the list of
* pointers to log blocks present in the L2ARC device.
*/
lb_ptr_buf = kmem_zalloc(sizeof (l2arc_lb_ptr_buf_t), KM_SLEEP);
lb_ptr_buf->lb_ptr = kmem_zalloc(sizeof (l2arc_log_blkptr_t),
KM_SLEEP);
bcopy(&lbps[0], lb_ptr_buf->lb_ptr,
sizeof (l2arc_log_blkptr_t));
mutex_enter(&dev->l2ad_mtx);
list_insert_tail(&dev->l2ad_lbptr_list, lb_ptr_buf);
ARCSTAT_INCR(arcstat_l2_log_blk_asize, asize);
ARCSTAT_BUMP(arcstat_l2_log_blk_count);
zfs_refcount_add_many(&dev->l2ad_lb_asize, asize, lb_ptr_buf);
zfs_refcount_add(&dev->l2ad_lb_count, lb_ptr_buf);
mutex_exit(&dev->l2ad_mtx);
vdev_space_update(vd, asize, 0, 0);
/*
* Protection against loops of log blocks:
*
* l2ad_hand l2ad_evict
* V V
* l2ad_start |=======================================| l2ad_end
* -----|||----|||---|||----|||
* (3) (2) (1) (0)
* ---|||---|||----|||---|||
* (7) (6) (5) (4)
*
* In this situation the pointer of log block (4) passes
* l2arc_log_blkptr_valid() but the log block should not be
* restored as it is overwritten by the payload of log block
* (0). Only log blocks (0)-(3) should be restored. We check
* whether l2ad_evict lies in between the payload starting
* offset of the next log block (lbps[1].lbp_payload_start)
* and the payload starting offset of the present log block
* (lbps[0].lbp_payload_start). If true and this isn't the
* first pass, we are looping from the beginning and we should
* stop.
*/
if (l2arc_range_check_overlap(lbps[1].lbp_payload_start,
lbps[0].lbp_payload_start, dev->l2ad_evict) &&
!dev->l2ad_first)
goto out;
cond_resched();
for (;;) {
mutex_enter(&l2arc_rebuild_thr_lock);
if (dev->l2ad_rebuild_cancel) {
dev->l2ad_rebuild = B_FALSE;
cv_signal(&l2arc_rebuild_thr_cv);
mutex_exit(&l2arc_rebuild_thr_lock);
err = SET_ERROR(ECANCELED);
goto out;
}
mutex_exit(&l2arc_rebuild_thr_lock);
if (spa_config_tryenter(spa, SCL_L2ARC, vd,
RW_READER)) {
lock_held = B_TRUE;
break;
}
/*
* L2ARC config lock held by somebody in writer,
* possibly due to them trying to remove us. They'll
* likely to want us to shut down, so after a little
* delay, we check l2ad_rebuild_cancel and retry
* the lock again.
*/
delay(1);
}
/*
* Continue with the next log block.
*/
lbps[0] = lbps[1];
lbps[1] = this_lb->lb_prev_lbp;
PTR_SWAP(this_lb, next_lb);
this_io = next_io;
next_io = NULL;
}
if (this_io != NULL)
l2arc_log_blk_fetch_abort(this_io);
out:
if (next_io != NULL)
l2arc_log_blk_fetch_abort(next_io);
vmem_free(this_lb, sizeof (*this_lb));
vmem_free(next_lb, sizeof (*next_lb));
if (!l2arc_rebuild_enabled) {
spa_history_log_internal(spa, "L2ARC rebuild", NULL,
"disabled");
} else if (err == 0 && zfs_refcount_count(&dev->l2ad_lb_count) > 0) {
ARCSTAT_BUMP(arcstat_l2_rebuild_success);
spa_history_log_internal(spa, "L2ARC rebuild", NULL,
"successful, restored %llu blocks",
(u_longlong_t)zfs_refcount_count(&dev->l2ad_lb_count));
} else if (err == 0 && zfs_refcount_count(&dev->l2ad_lb_count) == 0) {
/*
* No error but also nothing restored, meaning the lbps array
* in the device header points to invalid/non-present log
* blocks. Reset the header.
*/
spa_history_log_internal(spa, "L2ARC rebuild", NULL,
"no valid log blocks");
bzero(l2dhdr, dev->l2ad_dev_hdr_asize);
l2arc_dev_hdr_update(dev);
} else if (err == ECANCELED) {
/*
* In case the rebuild was canceled do not log to spa history
* log as the pool may be in the process of being removed.
*/
zfs_dbgmsg("L2ARC rebuild aborted, restored %llu blocks",
(u_longlong_t)zfs_refcount_count(&dev->l2ad_lb_count));
} else if (err != 0) {
spa_history_log_internal(spa, "L2ARC rebuild", NULL,
"aborted, restored %llu blocks",
(u_longlong_t)zfs_refcount_count(&dev->l2ad_lb_count));
}
if (lock_held)
spa_config_exit(spa, SCL_L2ARC, vd);
return (err);
}
/*
* Attempts to read the device header on the provided L2ARC device and writes
* it to `hdr'. On success, this function returns 0, otherwise the appropriate
* error code is returned.
*/
static int
l2arc_dev_hdr_read(l2arc_dev_t *dev)
{
int err;
uint64_t guid;
l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr;
const uint64_t l2dhdr_asize = dev->l2ad_dev_hdr_asize;
abd_t *abd;
guid = spa_guid(dev->l2ad_vdev->vdev_spa);
abd = abd_get_from_buf(l2dhdr, l2dhdr_asize);
err = zio_wait(zio_read_phys(NULL, dev->l2ad_vdev,
VDEV_LABEL_START_SIZE, l2dhdr_asize, abd,
ZIO_CHECKSUM_LABEL, NULL, NULL, ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL |
ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY |
ZIO_FLAG_SPECULATIVE, B_FALSE));
abd_free(abd);
if (err != 0) {
ARCSTAT_BUMP(arcstat_l2_rebuild_abort_dh_errors);
zfs_dbgmsg("L2ARC IO error (%d) while reading device header, "
"vdev guid: %llu", err,
(u_longlong_t)dev->l2ad_vdev->vdev_guid);
return (err);
}
if (l2dhdr->dh_magic == BSWAP_64(L2ARC_DEV_HDR_MAGIC))
byteswap_uint64_array(l2dhdr, sizeof (*l2dhdr));
if (l2dhdr->dh_magic != L2ARC_DEV_HDR_MAGIC ||
l2dhdr->dh_spa_guid != guid ||
l2dhdr->dh_vdev_guid != dev->l2ad_vdev->vdev_guid ||
l2dhdr->dh_version != L2ARC_PERSISTENT_VERSION ||
l2dhdr->dh_log_entries != dev->l2ad_log_entries ||
l2dhdr->dh_end != dev->l2ad_end ||
!l2arc_range_check_overlap(dev->l2ad_start, dev->l2ad_end,
l2dhdr->dh_evict) ||
(l2dhdr->dh_trim_state != VDEV_TRIM_COMPLETE &&
l2arc_trim_ahead > 0)) {
/*
* Attempt to rebuild a device containing no actual dev hdr
* or containing a header from some other pool or from another
* version of persistent L2ARC.
*/
ARCSTAT_BUMP(arcstat_l2_rebuild_abort_unsupported);
return (SET_ERROR(ENOTSUP));
}
return (0);
}
/*
* Reads L2ARC log blocks from storage and validates their contents.
*
* This function implements a simple fetcher to make sure that while
* we're processing one buffer the L2ARC is already fetching the next
* one in the chain.
*
* The arguments this_lp and next_lp point to the current and next log block
* address in the block chain. Similarly, this_lb and next_lb hold the
* l2arc_log_blk_phys_t's of the current and next L2ARC blk.
*
* The `this_io' and `next_io' arguments are used for block fetching.
* When issuing the first blk IO during rebuild, you should pass NULL for
* `this_io'. This function will then issue a sync IO to read the block and
* also issue an async IO to fetch the next block in the block chain. The
* fetched IO is returned in `next_io'. On subsequent calls to this
* function, pass the value returned in `next_io' from the previous call
* as `this_io' and a fresh `next_io' pointer to hold the next fetch IO.
* Prior to the call, you should initialize your `next_io' pointer to be
* NULL. If no fetch IO was issued, the pointer is left set at NULL.
*
* On success, this function returns 0, otherwise it returns an appropriate
* error code. On error the fetching IO is aborted and cleared before
* returning from this function. Therefore, if we return `success', the
* caller can assume that we have taken care of cleanup of fetch IOs.
*/
static int
l2arc_log_blk_read(l2arc_dev_t *dev,
const l2arc_log_blkptr_t *this_lbp, const l2arc_log_blkptr_t *next_lbp,
l2arc_log_blk_phys_t *this_lb, l2arc_log_blk_phys_t *next_lb,
zio_t *this_io, zio_t **next_io)
{
int err = 0;
zio_cksum_t cksum;
abd_t *abd = NULL;
uint64_t asize;
ASSERT(this_lbp != NULL && next_lbp != NULL);
ASSERT(this_lb != NULL && next_lb != NULL);
ASSERT(next_io != NULL && *next_io == NULL);
ASSERT(l2arc_log_blkptr_valid(dev, this_lbp));
/*
* Check to see if we have issued the IO for this log block in a
* previous run. If not, this is the first call, so issue it now.
*/
if (this_io == NULL) {
this_io = l2arc_log_blk_fetch(dev->l2ad_vdev, this_lbp,
this_lb);
}
/*
* Peek to see if we can start issuing the next IO immediately.
*/
if (l2arc_log_blkptr_valid(dev, next_lbp)) {
/*
* Start issuing IO for the next log block early - this
* should help keep the L2ARC device busy while we
* decompress and restore this log block.
*/
*next_io = l2arc_log_blk_fetch(dev->l2ad_vdev, next_lbp,
next_lb);
}
/* Wait for the IO to read this log block to complete */
if ((err = zio_wait(this_io)) != 0) {
ARCSTAT_BUMP(arcstat_l2_rebuild_abort_io_errors);
zfs_dbgmsg("L2ARC IO error (%d) while reading log block, "
"offset: %llu, vdev guid: %llu", err,
(u_longlong_t)this_lbp->lbp_daddr,
(u_longlong_t)dev->l2ad_vdev->vdev_guid);
goto cleanup;
}
/*
* Make sure the buffer checks out.
* L2BLK_GET_PSIZE returns aligned size for log blocks.
*/
asize = L2BLK_GET_PSIZE((this_lbp)->lbp_prop);
fletcher_4_native(this_lb, asize, NULL, &cksum);
if (!ZIO_CHECKSUM_EQUAL(cksum, this_lbp->lbp_cksum)) {
ARCSTAT_BUMP(arcstat_l2_rebuild_abort_cksum_lb_errors);
zfs_dbgmsg("L2ARC log block cksum failed, offset: %llu, "
"vdev guid: %llu, l2ad_hand: %llu, l2ad_evict: %llu",
(u_longlong_t)this_lbp->lbp_daddr,
(u_longlong_t)dev->l2ad_vdev->vdev_guid,
(u_longlong_t)dev->l2ad_hand,
(u_longlong_t)dev->l2ad_evict);
err = SET_ERROR(ECKSUM);
goto cleanup;
}
/* Now we can take our time decoding this buffer */
switch (L2BLK_GET_COMPRESS((this_lbp)->lbp_prop)) {
case ZIO_COMPRESS_OFF:
break;
case ZIO_COMPRESS_LZ4:
abd = abd_alloc_for_io(asize, B_TRUE);
abd_copy_from_buf_off(abd, this_lb, 0, asize);
if ((err = zio_decompress_data(
L2BLK_GET_COMPRESS((this_lbp)->lbp_prop),
abd, this_lb, asize, sizeof (*this_lb), NULL)) != 0) {
err = SET_ERROR(EINVAL);
goto cleanup;
}
break;
default:
err = SET_ERROR(EINVAL);
goto cleanup;
}
if (this_lb->lb_magic == BSWAP_64(L2ARC_LOG_BLK_MAGIC))
byteswap_uint64_array(this_lb, sizeof (*this_lb));
if (this_lb->lb_magic != L2ARC_LOG_BLK_MAGIC) {
err = SET_ERROR(EINVAL);
goto cleanup;
}
cleanup:
/* Abort an in-flight fetch I/O in case of error */
if (err != 0 && *next_io != NULL) {
l2arc_log_blk_fetch_abort(*next_io);
*next_io = NULL;
}
if (abd != NULL)
abd_free(abd);
return (err);
}
/*
* Restores the payload of a log block to ARC. This creates empty ARC hdr
* entries which only contain an l2arc hdr, essentially restoring the
* buffers to their L2ARC evicted state. This function also updates space
* usage on the L2ARC vdev to make sure it tracks restored buffers.
*/
static void
l2arc_log_blk_restore(l2arc_dev_t *dev, const l2arc_log_blk_phys_t *lb,
uint64_t lb_asize)
{
uint64_t size = 0, asize = 0;
uint64_t log_entries = dev->l2ad_log_entries;
/*
* Usually arc_adapt() is called only for data, not headers, but
* since we may allocate significant amount of memory here, let ARC
* grow its arc_c.
*/
arc_adapt(log_entries * HDR_L2ONLY_SIZE, arc_l2c_only);
for (int i = log_entries - 1; i >= 0; i--) {
/*
* Restore goes in the reverse temporal direction to preserve
* correct temporal ordering of buffers in the l2ad_buflist.
* l2arc_hdr_restore also does a list_insert_tail instead of
* list_insert_head on the l2ad_buflist:
*
* LIST l2ad_buflist LIST
* HEAD <------ (time) ------ TAIL
* direction +-----+-----+-----+-----+-----+ direction
* of l2arc <== | buf | buf | buf | buf | buf | ===> of rebuild
* fill +-----+-----+-----+-----+-----+
* ^ ^
* | |
* | |
* l2arc_feed_thread l2arc_rebuild
* will place new bufs here restores bufs here
*
* During l2arc_rebuild() the device is not used by
* l2arc_feed_thread() as dev->l2ad_rebuild is set to true.
*/
size += L2BLK_GET_LSIZE((&lb->lb_entries[i])->le_prop);
asize += vdev_psize_to_asize(dev->l2ad_vdev,
L2BLK_GET_PSIZE((&lb->lb_entries[i])->le_prop));
l2arc_hdr_restore(&lb->lb_entries[i], dev);
}
/*
* Record rebuild stats:
* size Logical size of restored buffers in the L2ARC
* asize Aligned size of restored buffers in the L2ARC
*/
ARCSTAT_INCR(arcstat_l2_rebuild_size, size);
ARCSTAT_INCR(arcstat_l2_rebuild_asize, asize);
ARCSTAT_INCR(arcstat_l2_rebuild_bufs, log_entries);
ARCSTAT_F_AVG(arcstat_l2_log_blk_avg_asize, lb_asize);
ARCSTAT_F_AVG(arcstat_l2_data_to_meta_ratio, asize / lb_asize);
ARCSTAT_BUMP(arcstat_l2_rebuild_log_blks);
}
/*
* Restores a single ARC buf hdr from a log entry. The ARC buffer is put
* into a state indicating that it has been evicted to L2ARC.
*/
static void
l2arc_hdr_restore(const l2arc_log_ent_phys_t *le, l2arc_dev_t *dev)
{
arc_buf_hdr_t *hdr, *exists;
kmutex_t *hash_lock;
arc_buf_contents_t type = L2BLK_GET_TYPE((le)->le_prop);
uint64_t asize;
/*
* Do all the allocation before grabbing any locks, this lets us
* sleep if memory is full and we don't have to deal with failed
* allocations.
*/
hdr = arc_buf_alloc_l2only(L2BLK_GET_LSIZE((le)->le_prop), type,
dev, le->le_dva, le->le_daddr,
L2BLK_GET_PSIZE((le)->le_prop), le->le_birth,
L2BLK_GET_COMPRESS((le)->le_prop), le->le_complevel,
L2BLK_GET_PROTECTED((le)->le_prop),
L2BLK_GET_PREFETCH((le)->le_prop),
L2BLK_GET_STATE((le)->le_prop));
asize = vdev_psize_to_asize(dev->l2ad_vdev,
L2BLK_GET_PSIZE((le)->le_prop));
/*
* vdev_space_update() has to be called before arc_hdr_destroy() to
* avoid underflow since the latter also calls vdev_space_update().
*/
l2arc_hdr_arcstats_increment(hdr);
vdev_space_update(dev->l2ad_vdev, asize, 0, 0);
mutex_enter(&dev->l2ad_mtx);
list_insert_tail(&dev->l2ad_buflist, hdr);
(void) zfs_refcount_add_many(&dev->l2ad_alloc, arc_hdr_size(hdr), hdr);
mutex_exit(&dev->l2ad_mtx);
exists = buf_hash_insert(hdr, &hash_lock);
if (exists) {
/* Buffer was already cached, no need to restore it. */
arc_hdr_destroy(hdr);
/*
* If the buffer is already cached, check whether it has
* L2ARC metadata. If not, enter them and update the flag.
* This is important is case of onlining a cache device, since
* we previously evicted all L2ARC metadata from ARC.
*/
if (!HDR_HAS_L2HDR(exists)) {
arc_hdr_set_flags(exists, ARC_FLAG_HAS_L2HDR);
exists->b_l2hdr.b_dev = dev;
exists->b_l2hdr.b_daddr = le->le_daddr;
exists->b_l2hdr.b_arcs_state =
L2BLK_GET_STATE((le)->le_prop);
mutex_enter(&dev->l2ad_mtx);
list_insert_tail(&dev->l2ad_buflist, exists);
(void) zfs_refcount_add_many(&dev->l2ad_alloc,
arc_hdr_size(exists), exists);
mutex_exit(&dev->l2ad_mtx);
l2arc_hdr_arcstats_increment(exists);
vdev_space_update(dev->l2ad_vdev, asize, 0, 0);
}
ARCSTAT_BUMP(arcstat_l2_rebuild_bufs_precached);
}
mutex_exit(hash_lock);
}
/*
* Starts an asynchronous read IO to read a log block. This is used in log
* block reconstruction to start reading the next block before we are done
* decoding and reconstructing the current block, to keep the l2arc device
* nice and hot with read IO to process.
* The returned zio will contain a newly allocated memory buffers for the IO
* data which should then be freed by the caller once the zio is no longer
* needed (i.e. due to it having completed). If you wish to abort this
* zio, you should do so using l2arc_log_blk_fetch_abort, which takes
* care of disposing of the allocated buffers correctly.
*/
static zio_t *
l2arc_log_blk_fetch(vdev_t *vd, const l2arc_log_blkptr_t *lbp,
l2arc_log_blk_phys_t *lb)
{
uint32_t asize;
zio_t *pio;
l2arc_read_callback_t *cb;
/* L2BLK_GET_PSIZE returns aligned size for log blocks */
asize = L2BLK_GET_PSIZE((lbp)->lbp_prop);
ASSERT(asize <= sizeof (l2arc_log_blk_phys_t));
cb = kmem_zalloc(sizeof (l2arc_read_callback_t), KM_SLEEP);
cb->l2rcb_abd = abd_get_from_buf(lb, asize);
pio = zio_root(vd->vdev_spa, l2arc_blk_fetch_done, cb,
ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE |
ZIO_FLAG_DONT_RETRY);
(void) zio_nowait(zio_read_phys(pio, vd, lbp->lbp_daddr, asize,
cb->l2rcb_abd, ZIO_CHECKSUM_OFF, NULL, NULL,
ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL |
ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY, B_FALSE));
return (pio);
}
/*
* Aborts a zio returned from l2arc_log_blk_fetch and frees the data
* buffers allocated for it.
*/
static void
l2arc_log_blk_fetch_abort(zio_t *zio)
{
(void) zio_wait(zio);
}
/*
* Creates a zio to update the device header on an l2arc device.
*/
void
l2arc_dev_hdr_update(l2arc_dev_t *dev)
{
l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr;
const uint64_t l2dhdr_asize = dev->l2ad_dev_hdr_asize;
abd_t *abd;
int err;
VERIFY(spa_config_held(dev->l2ad_spa, SCL_STATE_ALL, RW_READER));
l2dhdr->dh_magic = L2ARC_DEV_HDR_MAGIC;
l2dhdr->dh_version = L2ARC_PERSISTENT_VERSION;
l2dhdr->dh_spa_guid = spa_guid(dev->l2ad_vdev->vdev_spa);
l2dhdr->dh_vdev_guid = dev->l2ad_vdev->vdev_guid;
l2dhdr->dh_log_entries = dev->l2ad_log_entries;
l2dhdr->dh_evict = dev->l2ad_evict;
l2dhdr->dh_start = dev->l2ad_start;
l2dhdr->dh_end = dev->l2ad_end;
l2dhdr->dh_lb_asize = zfs_refcount_count(&dev->l2ad_lb_asize);
l2dhdr->dh_lb_count = zfs_refcount_count(&dev->l2ad_lb_count);
l2dhdr->dh_flags = 0;
l2dhdr->dh_trim_action_time = dev->l2ad_vdev->vdev_trim_action_time;
l2dhdr->dh_trim_state = dev->l2ad_vdev->vdev_trim_state;
if (dev->l2ad_first)
l2dhdr->dh_flags |= L2ARC_DEV_HDR_EVICT_FIRST;
abd = abd_get_from_buf(l2dhdr, l2dhdr_asize);
err = zio_wait(zio_write_phys(NULL, dev->l2ad_vdev,
VDEV_LABEL_START_SIZE, l2dhdr_asize, abd, ZIO_CHECKSUM_LABEL, NULL,
NULL, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_CANFAIL, B_FALSE));
abd_free(abd);
if (err != 0) {
zfs_dbgmsg("L2ARC IO error (%d) while writing device header, "
"vdev guid: %llu", err,
(u_longlong_t)dev->l2ad_vdev->vdev_guid);
}
}
/*
* Commits a log block to the L2ARC device. This routine is invoked from
* l2arc_write_buffers when the log block fills up.
* This function allocates some memory to temporarily hold the serialized
* buffer to be written. This is then released in l2arc_write_done.
*/
static void
l2arc_log_blk_commit(l2arc_dev_t *dev, zio_t *pio, l2arc_write_callback_t *cb)
{
l2arc_log_blk_phys_t *lb = &dev->l2ad_log_blk;
l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr;
uint64_t psize, asize;
zio_t *wzio;
l2arc_lb_abd_buf_t *abd_buf;
uint8_t *tmpbuf;
l2arc_lb_ptr_buf_t *lb_ptr_buf;
VERIFY3S(dev->l2ad_log_ent_idx, ==, dev->l2ad_log_entries);
tmpbuf = zio_buf_alloc(sizeof (*lb));
abd_buf = zio_buf_alloc(sizeof (*abd_buf));
abd_buf->abd = abd_get_from_buf(lb, sizeof (*lb));
lb_ptr_buf = kmem_zalloc(sizeof (l2arc_lb_ptr_buf_t), KM_SLEEP);
lb_ptr_buf->lb_ptr = kmem_zalloc(sizeof (l2arc_log_blkptr_t), KM_SLEEP);
/* link the buffer into the block chain */
lb->lb_prev_lbp = l2dhdr->dh_start_lbps[1];
lb->lb_magic = L2ARC_LOG_BLK_MAGIC;
/*
* l2arc_log_blk_commit() may be called multiple times during a single
* l2arc_write_buffers() call. Save the allocated abd buffers in a list
* so we can free them in l2arc_write_done() later on.
*/
list_insert_tail(&cb->l2wcb_abd_list, abd_buf);
/* try to compress the buffer */
psize = zio_compress_data(ZIO_COMPRESS_LZ4,
abd_buf->abd, tmpbuf, sizeof (*lb), 0);
/* a log block is never entirely zero */
ASSERT(psize != 0);
asize = vdev_psize_to_asize(dev->l2ad_vdev, psize);
ASSERT(asize <= sizeof (*lb));
/*
* Update the start log block pointer in the device header to point
* to the log block we're about to write.
*/
l2dhdr->dh_start_lbps[1] = l2dhdr->dh_start_lbps[0];
l2dhdr->dh_start_lbps[0].lbp_daddr = dev->l2ad_hand;
l2dhdr->dh_start_lbps[0].lbp_payload_asize =
dev->l2ad_log_blk_payload_asize;
l2dhdr->dh_start_lbps[0].lbp_payload_start =
dev->l2ad_log_blk_payload_start;
- _NOTE(CONSTCOND)
L2BLK_SET_LSIZE(
(&l2dhdr->dh_start_lbps[0])->lbp_prop, sizeof (*lb));
L2BLK_SET_PSIZE(
(&l2dhdr->dh_start_lbps[0])->lbp_prop, asize);
L2BLK_SET_CHECKSUM(
(&l2dhdr->dh_start_lbps[0])->lbp_prop,
ZIO_CHECKSUM_FLETCHER_4);
if (asize < sizeof (*lb)) {
/* compression succeeded */
bzero(tmpbuf + psize, asize - psize);
L2BLK_SET_COMPRESS(
(&l2dhdr->dh_start_lbps[0])->lbp_prop,
ZIO_COMPRESS_LZ4);
} else {
/* compression failed */
bcopy(lb, tmpbuf, sizeof (*lb));
L2BLK_SET_COMPRESS(
(&l2dhdr->dh_start_lbps[0])->lbp_prop,
ZIO_COMPRESS_OFF);
}
/* checksum what we're about to write */
fletcher_4_native(tmpbuf, asize, NULL,
&l2dhdr->dh_start_lbps[0].lbp_cksum);
abd_free(abd_buf->abd);
/* perform the write itself */
abd_buf->abd = abd_get_from_buf(tmpbuf, sizeof (*lb));
abd_take_ownership_of_buf(abd_buf->abd, B_TRUE);
wzio = zio_write_phys(pio, dev->l2ad_vdev, dev->l2ad_hand,
asize, abd_buf->abd, ZIO_CHECKSUM_OFF, NULL, NULL,
ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_CANFAIL, B_FALSE);
DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, zio_t *, wzio);
(void) zio_nowait(wzio);
dev->l2ad_hand += asize;
/*
* Include the committed log block's pointer in the list of pointers
* to log blocks present in the L2ARC device.
*/
bcopy(&l2dhdr->dh_start_lbps[0], lb_ptr_buf->lb_ptr,
sizeof (l2arc_log_blkptr_t));
mutex_enter(&dev->l2ad_mtx);
list_insert_head(&dev->l2ad_lbptr_list, lb_ptr_buf);
ARCSTAT_INCR(arcstat_l2_log_blk_asize, asize);
ARCSTAT_BUMP(arcstat_l2_log_blk_count);
zfs_refcount_add_many(&dev->l2ad_lb_asize, asize, lb_ptr_buf);
zfs_refcount_add(&dev->l2ad_lb_count, lb_ptr_buf);
mutex_exit(&dev->l2ad_mtx);
vdev_space_update(dev->l2ad_vdev, asize, 0, 0);
/* bump the kstats */
ARCSTAT_INCR(arcstat_l2_write_bytes, asize);
ARCSTAT_BUMP(arcstat_l2_log_blk_writes);
ARCSTAT_F_AVG(arcstat_l2_log_blk_avg_asize, asize);
ARCSTAT_F_AVG(arcstat_l2_data_to_meta_ratio,
dev->l2ad_log_blk_payload_asize / asize);
/* start a new log block */
dev->l2ad_log_ent_idx = 0;
dev->l2ad_log_blk_payload_asize = 0;
dev->l2ad_log_blk_payload_start = 0;
}
/*
* Validates an L2ARC log block address to make sure that it can be read
* from the provided L2ARC device.
*/
boolean_t
l2arc_log_blkptr_valid(l2arc_dev_t *dev, const l2arc_log_blkptr_t *lbp)
{
/* L2BLK_GET_PSIZE returns aligned size for log blocks */
uint64_t asize = L2BLK_GET_PSIZE((lbp)->lbp_prop);
uint64_t end = lbp->lbp_daddr + asize - 1;
uint64_t start = lbp->lbp_payload_start;
boolean_t evicted = B_FALSE;
/*
* A log block is valid if all of the following conditions are true:
* - it fits entirely (including its payload) between l2ad_start and
* l2ad_end
* - it has a valid size
* - neither the log block itself nor part of its payload was evicted
* by l2arc_evict():
*
* l2ad_hand l2ad_evict
* | | lbp_daddr
* | start | | end
* | | | | |
* V V V V V
* l2ad_start ============================================ l2ad_end
* --------------------------||||
* ^ ^
* | log block
* payload
*/
evicted =
l2arc_range_check_overlap(start, end, dev->l2ad_hand) ||
l2arc_range_check_overlap(start, end, dev->l2ad_evict) ||
l2arc_range_check_overlap(dev->l2ad_hand, dev->l2ad_evict, start) ||
l2arc_range_check_overlap(dev->l2ad_hand, dev->l2ad_evict, end);
return (start >= dev->l2ad_start && end <= dev->l2ad_end &&
asize > 0 && asize <= sizeof (l2arc_log_blk_phys_t) &&
(!evicted || dev->l2ad_first));
}
/*
* Inserts ARC buffer header `hdr' into the current L2ARC log block on
* the device. The buffer being inserted must be present in L2ARC.
* Returns B_TRUE if the L2ARC log block is full and needs to be committed
* to L2ARC, or B_FALSE if it still has room for more ARC buffers.
*/
static boolean_t
l2arc_log_blk_insert(l2arc_dev_t *dev, const arc_buf_hdr_t *hdr)
{
l2arc_log_blk_phys_t *lb = &dev->l2ad_log_blk;
l2arc_log_ent_phys_t *le;
if (dev->l2ad_log_entries == 0)
return (B_FALSE);
int index = dev->l2ad_log_ent_idx++;
ASSERT3S(index, <, dev->l2ad_log_entries);
ASSERT(HDR_HAS_L2HDR(hdr));
le = &lb->lb_entries[index];
bzero(le, sizeof (*le));
le->le_dva = hdr->b_dva;
le->le_birth = hdr->b_birth;
le->le_daddr = hdr->b_l2hdr.b_daddr;
if (index == 0)
dev->l2ad_log_blk_payload_start = le->le_daddr;
L2BLK_SET_LSIZE((le)->le_prop, HDR_GET_LSIZE(hdr));
L2BLK_SET_PSIZE((le)->le_prop, HDR_GET_PSIZE(hdr));
L2BLK_SET_COMPRESS((le)->le_prop, HDR_GET_COMPRESS(hdr));
le->le_complevel = hdr->b_complevel;
L2BLK_SET_TYPE((le)->le_prop, hdr->b_type);
L2BLK_SET_PROTECTED((le)->le_prop, !!(HDR_PROTECTED(hdr)));
L2BLK_SET_PREFETCH((le)->le_prop, !!(HDR_PREFETCH(hdr)));
L2BLK_SET_STATE((le)->le_prop, hdr->b_l1hdr.b_state->arcs_state);
dev->l2ad_log_blk_payload_asize += vdev_psize_to_asize(dev->l2ad_vdev,
HDR_GET_PSIZE(hdr));
return (dev->l2ad_log_ent_idx == dev->l2ad_log_entries);
}
/*
* Checks whether a given L2ARC device address sits in a time-sequential
* range. The trick here is that the L2ARC is a rotary buffer, so we can't
* just do a range comparison, we need to handle the situation in which the
* range wraps around the end of the L2ARC device. Arguments:
* bottom -- Lower end of the range to check (written to earlier).
* top -- Upper end of the range to check (written to later).
* check -- The address for which we want to determine if it sits in
* between the top and bottom.
*
* The 3-way conditional below represents the following cases:
*
* bottom < top : Sequentially ordered case:
* <check>--------+-------------------+
* | (overlap here?) |
* L2ARC dev V V
* |---------------<bottom>============<top>--------------|
*
* bottom > top: Looped-around case:
* <check>--------+------------------+
* | (overlap here?) |
* L2ARC dev V V
* |===============<top>---------------<bottom>===========|
* ^ ^
* | (or here?) |
* +---------------+---------<check>
*
* top == bottom : Just a single address comparison.
*/
boolean_t
l2arc_range_check_overlap(uint64_t bottom, uint64_t top, uint64_t check)
{
if (bottom < top)
return (bottom <= check && check <= top);
else if (bottom > top)
return (check <= top || bottom <= check);
else
return (check == top);
}
EXPORT_SYMBOL(arc_buf_size);
EXPORT_SYMBOL(arc_write);
EXPORT_SYMBOL(arc_read);
EXPORT_SYMBOL(arc_buf_info);
EXPORT_SYMBOL(arc_getbuf_func);
EXPORT_SYMBOL(arc_add_prune_callback);
EXPORT_SYMBOL(arc_remove_prune_callback);
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, min, param_set_arc_long,
param_get_long, ZMOD_RW, "Min arc size");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, max, param_set_arc_long,
param_get_long, ZMOD_RW, "Max arc size");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, meta_limit, param_set_arc_long,
param_get_long, ZMOD_RW, "Metadata limit for arc size");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, meta_limit_percent,
param_set_arc_long, param_get_long, ZMOD_RW,
"Percent of arc size for arc meta limit");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, meta_min, param_set_arc_long,
param_get_long, ZMOD_RW, "Min arc metadata");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, meta_prune, INT, ZMOD_RW,
"Meta objects to scan for prune");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, meta_adjust_restarts, INT, ZMOD_RW,
"Limit number of restarts in arc_evict_meta");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, meta_strategy, INT, ZMOD_RW,
"Meta reclaim strategy");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, grow_retry, param_set_arc_int,
param_get_int, ZMOD_RW, "Seconds before growing arc size");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, p_dampener_disable, INT, ZMOD_RW,
"Disable arc_p adapt dampener");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, shrink_shift, param_set_arc_int,
param_get_int, ZMOD_RW, "log2(fraction of arc to reclaim)");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, pc_percent, UINT, ZMOD_RW,
"Percent of pagecache to reclaim arc to");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, p_min_shift, param_set_arc_int,
param_get_int, ZMOD_RW, "arc_c shift to calc min/max arc_p");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, average_blocksize, INT, ZMOD_RD,
"Target average block size");
ZFS_MODULE_PARAM(zfs, zfs_, compressed_arc_enabled, INT, ZMOD_RW,
"Disable compressed arc buffers");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, min_prefetch_ms, param_set_arc_int,
param_get_int, ZMOD_RW, "Min life of prefetch block in ms");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, min_prescient_prefetch_ms,
param_set_arc_int, param_get_int, ZMOD_RW,
"Min life of prescient prefetched block in ms");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, write_max, ULONG, ZMOD_RW,
"Max write bytes per interval");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, write_boost, ULONG, ZMOD_RW,
"Extra write bytes during device warmup");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, headroom, ULONG, ZMOD_RW,
"Number of max device writes to precache");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, headroom_boost, ULONG, ZMOD_RW,
"Compressed l2arc_headroom multiplier");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, trim_ahead, ULONG, ZMOD_RW,
"TRIM ahead L2ARC write size multiplier");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, feed_secs, ULONG, ZMOD_RW,
"Seconds between L2ARC writing");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, feed_min_ms, ULONG, ZMOD_RW,
"Min feed interval in milliseconds");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, noprefetch, INT, ZMOD_RW,
"Skip caching prefetched buffers");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, feed_again, INT, ZMOD_RW,
"Turbo L2ARC warmup");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, norw, INT, ZMOD_RW,
"No reads during writes");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, meta_percent, INT, ZMOD_RW,
"Percent of ARC size allowed for L2ARC-only headers");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, rebuild_enabled, INT, ZMOD_RW,
"Rebuild the L2ARC when importing a pool");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, rebuild_blocks_min_l2size, ULONG, ZMOD_RW,
"Min size in bytes to write rebuild log blocks in L2ARC");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, mfuonly, INT, ZMOD_RW,
"Cache only MFU data from ARC into L2ARC");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, lotsfree_percent, param_set_arc_int,
param_get_int, ZMOD_RW, "System free memory I/O throttle in bytes");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, sys_free, param_set_arc_long,
param_get_long, ZMOD_RW, "System free memory target size in bytes");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, dnode_limit, param_set_arc_long,
param_get_long, ZMOD_RW, "Minimum bytes of dnodes in arc");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, dnode_limit_percent,
param_set_arc_long, param_get_long, ZMOD_RW,
"Percent of ARC meta buffers for dnodes");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, dnode_reduce_percent, ULONG, ZMOD_RW,
"Percentage of excess dnodes to try to unpin");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, eviction_pct, INT, ZMOD_RW,
"When full, ARC allocation waits for eviction of this % of alloc size");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, evict_batch_limit, INT, ZMOD_RW,
"The number of headers to evict per sublist before moving to the next");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/ddt.c b/sys/contrib/openzfs/module/zfs/ddt.c
index 479e5a3ad625..fe5a188f4da1 100644
--- a/sys/contrib/openzfs/module/zfs/ddt.c
+++ b/sys/contrib/openzfs/module/zfs/ddt.c
@@ -1,1187 +1,1186 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/zio.h>
#include <sys/ddt.h>
#include <sys/zap.h>
#include <sys/dmu_tx.h>
#include <sys/arc.h>
#include <sys/dsl_pool.h>
#include <sys/zio_checksum.h>
#include <sys/zio_compress.h>
#include <sys/dsl_scan.h>
#include <sys/abd.h>
static kmem_cache_t *ddt_cache;
static kmem_cache_t *ddt_entry_cache;
/*
* Enable/disable prefetching of dedup-ed blocks which are going to be freed.
*/
int zfs_dedup_prefetch = 0;
static const ddt_ops_t *ddt_ops[DDT_TYPES] = {
&ddt_zap_ops,
};
static const char *ddt_class_name[DDT_CLASSES] = {
"ditto",
"duplicate",
"unique",
};
static void
ddt_object_create(ddt_t *ddt, enum ddt_type type, enum ddt_class class,
dmu_tx_t *tx)
{
spa_t *spa = ddt->ddt_spa;
objset_t *os = ddt->ddt_os;
uint64_t *objectp = &ddt->ddt_object[type][class];
boolean_t prehash = zio_checksum_table[ddt->ddt_checksum].ci_flags &
ZCHECKSUM_FLAG_DEDUP;
char name[DDT_NAMELEN];
ddt_object_name(ddt, type, class, name);
ASSERT(*objectp == 0);
VERIFY(ddt_ops[type]->ddt_op_create(os, objectp, tx, prehash) == 0);
ASSERT(*objectp != 0);
VERIFY(zap_add(os, DMU_POOL_DIRECTORY_OBJECT, name,
sizeof (uint64_t), 1, objectp, tx) == 0);
VERIFY(zap_add(os, spa->spa_ddt_stat_object, name,
sizeof (uint64_t), sizeof (ddt_histogram_t) / sizeof (uint64_t),
&ddt->ddt_histogram[type][class], tx) == 0);
}
static void
ddt_object_destroy(ddt_t *ddt, enum ddt_type type, enum ddt_class class,
dmu_tx_t *tx)
{
spa_t *spa = ddt->ddt_spa;
objset_t *os = ddt->ddt_os;
uint64_t *objectp = &ddt->ddt_object[type][class];
uint64_t count;
char name[DDT_NAMELEN];
ddt_object_name(ddt, type, class, name);
ASSERT(*objectp != 0);
ASSERT(ddt_histogram_empty(&ddt->ddt_histogram[type][class]));
VERIFY(ddt_object_count(ddt, type, class, &count) == 0 && count == 0);
VERIFY(zap_remove(os, DMU_POOL_DIRECTORY_OBJECT, name, tx) == 0);
VERIFY(zap_remove(os, spa->spa_ddt_stat_object, name, tx) == 0);
VERIFY(ddt_ops[type]->ddt_op_destroy(os, *objectp, tx) == 0);
bzero(&ddt->ddt_object_stats[type][class], sizeof (ddt_object_t));
*objectp = 0;
}
static int
ddt_object_load(ddt_t *ddt, enum ddt_type type, enum ddt_class class)
{
ddt_object_t *ddo = &ddt->ddt_object_stats[type][class];
dmu_object_info_t doi;
uint64_t count;
char name[DDT_NAMELEN];
int error;
ddt_object_name(ddt, type, class, name);
error = zap_lookup(ddt->ddt_os, DMU_POOL_DIRECTORY_OBJECT, name,
sizeof (uint64_t), 1, &ddt->ddt_object[type][class]);
if (error != 0)
return (error);
error = zap_lookup(ddt->ddt_os, ddt->ddt_spa->spa_ddt_stat_object, name,
sizeof (uint64_t), sizeof (ddt_histogram_t) / sizeof (uint64_t),
&ddt->ddt_histogram[type][class]);
if (error != 0)
return (error);
/*
* Seed the cached statistics.
*/
error = ddt_object_info(ddt, type, class, &doi);
if (error)
return (error);
error = ddt_object_count(ddt, type, class, &count);
if (error)
return (error);
ddo->ddo_count = count;
ddo->ddo_dspace = doi.doi_physical_blocks_512 << 9;
ddo->ddo_mspace = doi.doi_fill_count * doi.doi_data_block_size;
return (0);
}
static void
ddt_object_sync(ddt_t *ddt, enum ddt_type type, enum ddt_class class,
dmu_tx_t *tx)
{
ddt_object_t *ddo = &ddt->ddt_object_stats[type][class];
dmu_object_info_t doi;
uint64_t count;
char name[DDT_NAMELEN];
ddt_object_name(ddt, type, class, name);
VERIFY(zap_update(ddt->ddt_os, ddt->ddt_spa->spa_ddt_stat_object, name,
sizeof (uint64_t), sizeof (ddt_histogram_t) / sizeof (uint64_t),
&ddt->ddt_histogram[type][class], tx) == 0);
/*
* Cache DDT statistics; this is the only time they'll change.
*/
VERIFY(ddt_object_info(ddt, type, class, &doi) == 0);
VERIFY(ddt_object_count(ddt, type, class, &count) == 0);
ddo->ddo_count = count;
ddo->ddo_dspace = doi.doi_physical_blocks_512 << 9;
ddo->ddo_mspace = doi.doi_fill_count * doi.doi_data_block_size;
}
static int
ddt_object_lookup(ddt_t *ddt, enum ddt_type type, enum ddt_class class,
ddt_entry_t *dde)
{
if (!ddt_object_exists(ddt, type, class))
return (SET_ERROR(ENOENT));
return (ddt_ops[type]->ddt_op_lookup(ddt->ddt_os,
ddt->ddt_object[type][class], dde));
}
static void
ddt_object_prefetch(ddt_t *ddt, enum ddt_type type, enum ddt_class class,
ddt_entry_t *dde)
{
if (!ddt_object_exists(ddt, type, class))
return;
ddt_ops[type]->ddt_op_prefetch(ddt->ddt_os,
ddt->ddt_object[type][class], dde);
}
int
ddt_object_update(ddt_t *ddt, enum ddt_type type, enum ddt_class class,
ddt_entry_t *dde, dmu_tx_t *tx)
{
ASSERT(ddt_object_exists(ddt, type, class));
return (ddt_ops[type]->ddt_op_update(ddt->ddt_os,
ddt->ddt_object[type][class], dde, tx));
}
static int
ddt_object_remove(ddt_t *ddt, enum ddt_type type, enum ddt_class class,
ddt_entry_t *dde, dmu_tx_t *tx)
{
ASSERT(ddt_object_exists(ddt, type, class));
return (ddt_ops[type]->ddt_op_remove(ddt->ddt_os,
ddt->ddt_object[type][class], dde, tx));
}
int
ddt_object_walk(ddt_t *ddt, enum ddt_type type, enum ddt_class class,
uint64_t *walk, ddt_entry_t *dde)
{
ASSERT(ddt_object_exists(ddt, type, class));
return (ddt_ops[type]->ddt_op_walk(ddt->ddt_os,
ddt->ddt_object[type][class], dde, walk));
}
int
ddt_object_count(ddt_t *ddt, enum ddt_type type, enum ddt_class class,
uint64_t *count)
{
ASSERT(ddt_object_exists(ddt, type, class));
return (ddt_ops[type]->ddt_op_count(ddt->ddt_os,
ddt->ddt_object[type][class], count));
}
int
ddt_object_info(ddt_t *ddt, enum ddt_type type, enum ddt_class class,
dmu_object_info_t *doi)
{
if (!ddt_object_exists(ddt, type, class))
return (SET_ERROR(ENOENT));
return (dmu_object_info(ddt->ddt_os, ddt->ddt_object[type][class],
doi));
}
boolean_t
ddt_object_exists(ddt_t *ddt, enum ddt_type type, enum ddt_class class)
{
return (!!ddt->ddt_object[type][class]);
}
void
ddt_object_name(ddt_t *ddt, enum ddt_type type, enum ddt_class class,
char *name)
{
(void) snprintf(name, DDT_NAMELEN, DMU_POOL_DDT,
zio_checksum_table[ddt->ddt_checksum].ci_name,
ddt_ops[type]->ddt_op_name, ddt_class_name[class]);
}
void
ddt_bp_fill(const ddt_phys_t *ddp, blkptr_t *bp, uint64_t txg)
{
ASSERT(txg != 0);
for (int d = 0; d < SPA_DVAS_PER_BP; d++)
bp->blk_dva[d] = ddp->ddp_dva[d];
BP_SET_BIRTH(bp, txg, ddp->ddp_phys_birth);
}
/*
* The bp created via this function may be used for repairs and scrub, but it
* will be missing the salt / IV required to do a full decrypting read.
*/
void
ddt_bp_create(enum zio_checksum checksum,
const ddt_key_t *ddk, const ddt_phys_t *ddp, blkptr_t *bp)
{
BP_ZERO(bp);
if (ddp != NULL)
ddt_bp_fill(ddp, bp, ddp->ddp_phys_birth);
bp->blk_cksum = ddk->ddk_cksum;
BP_SET_LSIZE(bp, DDK_GET_LSIZE(ddk));
BP_SET_PSIZE(bp, DDK_GET_PSIZE(ddk));
BP_SET_COMPRESS(bp, DDK_GET_COMPRESS(ddk));
BP_SET_CRYPT(bp, DDK_GET_CRYPT(ddk));
BP_SET_FILL(bp, 1);
BP_SET_CHECKSUM(bp, checksum);
BP_SET_TYPE(bp, DMU_OT_DEDUP);
BP_SET_LEVEL(bp, 0);
BP_SET_DEDUP(bp, 1);
BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
}
void
ddt_key_fill(ddt_key_t *ddk, const blkptr_t *bp)
{
ddk->ddk_cksum = bp->blk_cksum;
ddk->ddk_prop = 0;
ASSERT(BP_IS_ENCRYPTED(bp) || !BP_USES_CRYPT(bp));
DDK_SET_LSIZE(ddk, BP_GET_LSIZE(bp));
DDK_SET_PSIZE(ddk, BP_GET_PSIZE(bp));
DDK_SET_COMPRESS(ddk, BP_GET_COMPRESS(bp));
DDK_SET_CRYPT(ddk, BP_USES_CRYPT(bp));
}
void
ddt_phys_fill(ddt_phys_t *ddp, const blkptr_t *bp)
{
ASSERT(ddp->ddp_phys_birth == 0);
for (int d = 0; d < SPA_DVAS_PER_BP; d++)
ddp->ddp_dva[d] = bp->blk_dva[d];
ddp->ddp_phys_birth = BP_PHYSICAL_BIRTH(bp);
}
void
ddt_phys_clear(ddt_phys_t *ddp)
{
bzero(ddp, sizeof (*ddp));
}
void
ddt_phys_addref(ddt_phys_t *ddp)
{
ddp->ddp_refcnt++;
}
void
ddt_phys_decref(ddt_phys_t *ddp)
{
if (ddp) {
ASSERT(ddp->ddp_refcnt > 0);
ddp->ddp_refcnt--;
}
}
void
ddt_phys_free(ddt_t *ddt, ddt_key_t *ddk, ddt_phys_t *ddp, uint64_t txg)
{
blkptr_t blk;
ddt_bp_create(ddt->ddt_checksum, ddk, ddp, &blk);
/*
* We clear the dedup bit so that zio_free() will actually free the
* space, rather than just decrementing the refcount in the DDT.
*/
BP_SET_DEDUP(&blk, 0);
ddt_phys_clear(ddp);
zio_free(ddt->ddt_spa, txg, &blk);
}
ddt_phys_t *
ddt_phys_select(const ddt_entry_t *dde, const blkptr_t *bp)
{
ddt_phys_t *ddp = (ddt_phys_t *)dde->dde_phys;
for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
if (DVA_EQUAL(BP_IDENTITY(bp), &ddp->ddp_dva[0]) &&
BP_PHYSICAL_BIRTH(bp) == ddp->ddp_phys_birth)
return (ddp);
}
return (NULL);
}
uint64_t
ddt_phys_total_refcnt(const ddt_entry_t *dde)
{
uint64_t refcnt = 0;
for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++)
refcnt += dde->dde_phys[p].ddp_refcnt;
return (refcnt);
}
static void
ddt_stat_generate(ddt_t *ddt, ddt_entry_t *dde, ddt_stat_t *dds)
{
spa_t *spa = ddt->ddt_spa;
ddt_phys_t *ddp = dde->dde_phys;
ddt_key_t *ddk = &dde->dde_key;
uint64_t lsize = DDK_GET_LSIZE(ddk);
uint64_t psize = DDK_GET_PSIZE(ddk);
bzero(dds, sizeof (*dds));
for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
uint64_t dsize = 0;
uint64_t refcnt = ddp->ddp_refcnt;
if (ddp->ddp_phys_birth == 0)
continue;
for (int d = 0; d < DDE_GET_NDVAS(dde); d++)
dsize += dva_get_dsize_sync(spa, &ddp->ddp_dva[d]);
dds->dds_blocks += 1;
dds->dds_lsize += lsize;
dds->dds_psize += psize;
dds->dds_dsize += dsize;
dds->dds_ref_blocks += refcnt;
dds->dds_ref_lsize += lsize * refcnt;
dds->dds_ref_psize += psize * refcnt;
dds->dds_ref_dsize += dsize * refcnt;
}
}
void
ddt_stat_add(ddt_stat_t *dst, const ddt_stat_t *src, uint64_t neg)
{
const uint64_t *s = (const uint64_t *)src;
uint64_t *d = (uint64_t *)dst;
uint64_t *d_end = (uint64_t *)(dst + 1);
ASSERT(neg == 0 || neg == -1ULL); /* add or subtract */
for (int i = 0; i < d_end - d; i++)
d[i] += (s[i] ^ neg) - neg;
}
static void
ddt_stat_update(ddt_t *ddt, ddt_entry_t *dde, uint64_t neg)
{
ddt_stat_t dds;
ddt_histogram_t *ddh;
int bucket;
ddt_stat_generate(ddt, dde, &dds);
bucket = highbit64(dds.dds_ref_blocks) - 1;
ASSERT(bucket >= 0);
ddh = &ddt->ddt_histogram[dde->dde_type][dde->dde_class];
ddt_stat_add(&ddh->ddh_stat[bucket], &dds, neg);
}
void
ddt_histogram_add(ddt_histogram_t *dst, const ddt_histogram_t *src)
{
for (int h = 0; h < 64; h++)
ddt_stat_add(&dst->ddh_stat[h], &src->ddh_stat[h], 0);
}
void
ddt_histogram_stat(ddt_stat_t *dds, const ddt_histogram_t *ddh)
{
bzero(dds, sizeof (*dds));
for (int h = 0; h < 64; h++)
ddt_stat_add(dds, &ddh->ddh_stat[h], 0);
}
boolean_t
ddt_histogram_empty(const ddt_histogram_t *ddh)
{
const uint64_t *s = (const uint64_t *)ddh;
const uint64_t *s_end = (const uint64_t *)(ddh + 1);
while (s < s_end)
if (*s++ != 0)
return (B_FALSE);
return (B_TRUE);
}
void
ddt_get_dedup_object_stats(spa_t *spa, ddt_object_t *ddo_total)
{
/* Sum the statistics we cached in ddt_object_sync(). */
for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
ddt_t *ddt = spa->spa_ddt[c];
for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
for (enum ddt_class class = 0; class < DDT_CLASSES;
class++) {
ddt_object_t *ddo =
&ddt->ddt_object_stats[type][class];
ddo_total->ddo_count += ddo->ddo_count;
ddo_total->ddo_dspace += ddo->ddo_dspace;
ddo_total->ddo_mspace += ddo->ddo_mspace;
}
}
}
/* ... and compute the averages. */
if (ddo_total->ddo_count != 0) {
ddo_total->ddo_dspace /= ddo_total->ddo_count;
ddo_total->ddo_mspace /= ddo_total->ddo_count;
}
}
void
ddt_get_dedup_histogram(spa_t *spa, ddt_histogram_t *ddh)
{
for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
ddt_t *ddt = spa->spa_ddt[c];
for (enum ddt_type type = 0; type < DDT_TYPES && ddt; type++) {
for (enum ddt_class class = 0; class < DDT_CLASSES;
class++) {
ddt_histogram_add(ddh,
&ddt->ddt_histogram_cache[type][class]);
}
}
}
}
void
ddt_get_dedup_stats(spa_t *spa, ddt_stat_t *dds_total)
{
ddt_histogram_t *ddh_total;
ddh_total = kmem_zalloc(sizeof (ddt_histogram_t), KM_SLEEP);
ddt_get_dedup_histogram(spa, ddh_total);
ddt_histogram_stat(dds_total, ddh_total);
kmem_free(ddh_total, sizeof (ddt_histogram_t));
}
uint64_t
ddt_get_dedup_dspace(spa_t *spa)
{
ddt_stat_t dds_total;
if (spa->spa_dedup_dspace != ~0ULL)
return (spa->spa_dedup_dspace);
bzero(&dds_total, sizeof (ddt_stat_t));
/* Calculate and cache the stats */
ddt_get_dedup_stats(spa, &dds_total);
spa->spa_dedup_dspace = dds_total.dds_ref_dsize - dds_total.dds_dsize;
return (spa->spa_dedup_dspace);
}
uint64_t
ddt_get_pool_dedup_ratio(spa_t *spa)
{
ddt_stat_t dds_total = { 0 };
ddt_get_dedup_stats(spa, &dds_total);
if (dds_total.dds_dsize == 0)
return (100);
return (dds_total.dds_ref_dsize * 100 / dds_total.dds_dsize);
}
size_t
ddt_compress(void *src, uchar_t *dst, size_t s_len, size_t d_len)
{
uchar_t *version = dst++;
int cpfunc = ZIO_COMPRESS_ZLE;
zio_compress_info_t *ci = &zio_compress_table[cpfunc];
size_t c_len;
ASSERT(d_len >= s_len + 1); /* no compression plus version byte */
c_len = ci->ci_compress(src, dst, s_len, d_len - 1, ci->ci_level);
if (c_len == s_len) {
cpfunc = ZIO_COMPRESS_OFF;
bcopy(src, dst, s_len);
}
*version = cpfunc;
- /* CONSTCOND */
if (ZFS_HOST_BYTEORDER)
*version |= DDT_COMPRESS_BYTEORDER_MASK;
return (c_len + 1);
}
void
ddt_decompress(uchar_t *src, void *dst, size_t s_len, size_t d_len)
{
uchar_t version = *src++;
int cpfunc = version & DDT_COMPRESS_FUNCTION_MASK;
zio_compress_info_t *ci = &zio_compress_table[cpfunc];
if (ci->ci_decompress != NULL)
(void) ci->ci_decompress(src, dst, s_len, d_len, ci->ci_level);
else
bcopy(src, dst, d_len);
if (((version & DDT_COMPRESS_BYTEORDER_MASK) != 0) !=
(ZFS_HOST_BYTEORDER != 0))
byteswap_uint64_array(dst, d_len);
}
ddt_t *
ddt_select(spa_t *spa, const blkptr_t *bp)
{
return (spa->spa_ddt[BP_GET_CHECKSUM(bp)]);
}
void
ddt_enter(ddt_t *ddt)
{
mutex_enter(&ddt->ddt_lock);
}
void
ddt_exit(ddt_t *ddt)
{
mutex_exit(&ddt->ddt_lock);
}
void
ddt_init(void)
{
ddt_cache = kmem_cache_create("ddt_cache",
sizeof (ddt_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
ddt_entry_cache = kmem_cache_create("ddt_entry_cache",
sizeof (ddt_entry_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
}
void
ddt_fini(void)
{
kmem_cache_destroy(ddt_entry_cache);
kmem_cache_destroy(ddt_cache);
}
static ddt_entry_t *
ddt_alloc(const ddt_key_t *ddk)
{
ddt_entry_t *dde;
dde = kmem_cache_alloc(ddt_entry_cache, KM_SLEEP);
bzero(dde, sizeof (ddt_entry_t));
cv_init(&dde->dde_cv, NULL, CV_DEFAULT, NULL);
dde->dde_key = *ddk;
return (dde);
}
static void
ddt_free(ddt_entry_t *dde)
{
ASSERT(!dde->dde_loading);
for (int p = 0; p < DDT_PHYS_TYPES; p++)
ASSERT(dde->dde_lead_zio[p] == NULL);
if (dde->dde_repair_abd != NULL)
abd_free(dde->dde_repair_abd);
cv_destroy(&dde->dde_cv);
kmem_cache_free(ddt_entry_cache, dde);
}
void
ddt_remove(ddt_t *ddt, ddt_entry_t *dde)
{
ASSERT(MUTEX_HELD(&ddt->ddt_lock));
avl_remove(&ddt->ddt_tree, dde);
ddt_free(dde);
}
ddt_entry_t *
ddt_lookup(ddt_t *ddt, const blkptr_t *bp, boolean_t add)
{
ddt_entry_t *dde, dde_search;
enum ddt_type type;
enum ddt_class class;
avl_index_t where;
int error;
ASSERT(MUTEX_HELD(&ddt->ddt_lock));
ddt_key_fill(&dde_search.dde_key, bp);
dde = avl_find(&ddt->ddt_tree, &dde_search, &where);
if (dde == NULL) {
if (!add)
return (NULL);
dde = ddt_alloc(&dde_search.dde_key);
avl_insert(&ddt->ddt_tree, dde, where);
}
while (dde->dde_loading)
cv_wait(&dde->dde_cv, &ddt->ddt_lock);
if (dde->dde_loaded)
return (dde);
dde->dde_loading = B_TRUE;
ddt_exit(ddt);
error = ENOENT;
for (type = 0; type < DDT_TYPES; type++) {
for (class = 0; class < DDT_CLASSES; class++) {
error = ddt_object_lookup(ddt, type, class, dde);
if (error != ENOENT) {
ASSERT0(error);
break;
}
}
if (error != ENOENT)
break;
}
ddt_enter(ddt);
ASSERT(dde->dde_loaded == B_FALSE);
ASSERT(dde->dde_loading == B_TRUE);
dde->dde_type = type; /* will be DDT_TYPES if no entry found */
dde->dde_class = class; /* will be DDT_CLASSES if no entry found */
dde->dde_loaded = B_TRUE;
dde->dde_loading = B_FALSE;
if (error == 0)
ddt_stat_update(ddt, dde, -1ULL);
cv_broadcast(&dde->dde_cv);
return (dde);
}
void
ddt_prefetch(spa_t *spa, const blkptr_t *bp)
{
ddt_t *ddt;
ddt_entry_t dde;
if (!zfs_dedup_prefetch || bp == NULL || !BP_GET_DEDUP(bp))
return;
/*
* We only remove the DDT once all tables are empty and only
* prefetch dedup blocks when there are entries in the DDT.
* Thus no locking is required as the DDT can't disappear on us.
*/
ddt = ddt_select(spa, bp);
ddt_key_fill(&dde.dde_key, bp);
for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
for (enum ddt_class class = 0; class < DDT_CLASSES; class++) {
ddt_object_prefetch(ddt, type, class, &dde);
}
}
}
/*
* Opaque struct used for ddt_key comparison
*/
#define DDT_KEY_CMP_LEN (sizeof (ddt_key_t) / sizeof (uint16_t))
typedef struct ddt_key_cmp {
uint16_t u16[DDT_KEY_CMP_LEN];
} ddt_key_cmp_t;
int
ddt_entry_compare(const void *x1, const void *x2)
{
const ddt_entry_t *dde1 = x1;
const ddt_entry_t *dde2 = x2;
const ddt_key_cmp_t *k1 = (const ddt_key_cmp_t *)&dde1->dde_key;
const ddt_key_cmp_t *k2 = (const ddt_key_cmp_t *)&dde2->dde_key;
int32_t cmp = 0;
for (int i = 0; i < DDT_KEY_CMP_LEN; i++) {
cmp = (int32_t)k1->u16[i] - (int32_t)k2->u16[i];
if (likely(cmp))
break;
}
return (TREE_ISIGN(cmp));
}
static ddt_t *
ddt_table_alloc(spa_t *spa, enum zio_checksum c)
{
ddt_t *ddt;
ddt = kmem_cache_alloc(ddt_cache, KM_SLEEP);
bzero(ddt, sizeof (ddt_t));
mutex_init(&ddt->ddt_lock, NULL, MUTEX_DEFAULT, NULL);
avl_create(&ddt->ddt_tree, ddt_entry_compare,
sizeof (ddt_entry_t), offsetof(ddt_entry_t, dde_node));
avl_create(&ddt->ddt_repair_tree, ddt_entry_compare,
sizeof (ddt_entry_t), offsetof(ddt_entry_t, dde_node));
ddt->ddt_checksum = c;
ddt->ddt_spa = spa;
ddt->ddt_os = spa->spa_meta_objset;
return (ddt);
}
static void
ddt_table_free(ddt_t *ddt)
{
ASSERT(avl_numnodes(&ddt->ddt_tree) == 0);
ASSERT(avl_numnodes(&ddt->ddt_repair_tree) == 0);
avl_destroy(&ddt->ddt_tree);
avl_destroy(&ddt->ddt_repair_tree);
mutex_destroy(&ddt->ddt_lock);
kmem_cache_free(ddt_cache, ddt);
}
void
ddt_create(spa_t *spa)
{
spa->spa_dedup_checksum = ZIO_DEDUPCHECKSUM;
for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++)
spa->spa_ddt[c] = ddt_table_alloc(spa, c);
}
int
ddt_load(spa_t *spa)
{
int error;
ddt_create(spa);
error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_DDT_STATS, sizeof (uint64_t), 1,
&spa->spa_ddt_stat_object);
if (error)
return (error == ENOENT ? 0 : error);
for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
ddt_t *ddt = spa->spa_ddt[c];
for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
for (enum ddt_class class = 0; class < DDT_CLASSES;
class++) {
error = ddt_object_load(ddt, type, class);
if (error != 0 && error != ENOENT)
return (error);
}
}
/*
* Seed the cached histograms.
*/
bcopy(ddt->ddt_histogram, &ddt->ddt_histogram_cache,
sizeof (ddt->ddt_histogram));
spa->spa_dedup_dspace = ~0ULL;
}
return (0);
}
void
ddt_unload(spa_t *spa)
{
for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
if (spa->spa_ddt[c]) {
ddt_table_free(spa->spa_ddt[c]);
spa->spa_ddt[c] = NULL;
}
}
}
boolean_t
ddt_class_contains(spa_t *spa, enum ddt_class max_class, const blkptr_t *bp)
{
ddt_t *ddt;
ddt_entry_t *dde;
if (!BP_GET_DEDUP(bp))
return (B_FALSE);
if (max_class == DDT_CLASS_UNIQUE)
return (B_TRUE);
ddt = spa->spa_ddt[BP_GET_CHECKSUM(bp)];
dde = kmem_cache_alloc(ddt_entry_cache, KM_SLEEP);
ddt_key_fill(&(dde->dde_key), bp);
for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
for (enum ddt_class class = 0; class <= max_class; class++) {
if (ddt_object_lookup(ddt, type, class, dde) == 0) {
kmem_cache_free(ddt_entry_cache, dde);
return (B_TRUE);
}
}
}
kmem_cache_free(ddt_entry_cache, dde);
return (B_FALSE);
}
ddt_entry_t *
ddt_repair_start(ddt_t *ddt, const blkptr_t *bp)
{
ddt_key_t ddk;
ddt_entry_t *dde;
ddt_key_fill(&ddk, bp);
dde = ddt_alloc(&ddk);
for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
for (enum ddt_class class = 0; class < DDT_CLASSES; class++) {
/*
* We can only do repair if there are multiple copies
* of the block. For anything in the UNIQUE class,
* there's definitely only one copy, so don't even try.
*/
if (class != DDT_CLASS_UNIQUE &&
ddt_object_lookup(ddt, type, class, dde) == 0)
return (dde);
}
}
bzero(dde->dde_phys, sizeof (dde->dde_phys));
return (dde);
}
void
ddt_repair_done(ddt_t *ddt, ddt_entry_t *dde)
{
avl_index_t where;
ddt_enter(ddt);
if (dde->dde_repair_abd != NULL && spa_writeable(ddt->ddt_spa) &&
avl_find(&ddt->ddt_repair_tree, dde, &where) == NULL)
avl_insert(&ddt->ddt_repair_tree, dde, where);
else
ddt_free(dde);
ddt_exit(ddt);
}
static void
ddt_repair_entry_done(zio_t *zio)
{
ddt_entry_t *rdde = zio->io_private;
ddt_free(rdde);
}
static void
ddt_repair_entry(ddt_t *ddt, ddt_entry_t *dde, ddt_entry_t *rdde, zio_t *rio)
{
ddt_phys_t *ddp = dde->dde_phys;
ddt_phys_t *rddp = rdde->dde_phys;
ddt_key_t *ddk = &dde->dde_key;
ddt_key_t *rddk = &rdde->dde_key;
zio_t *zio;
blkptr_t blk;
zio = zio_null(rio, rio->io_spa, NULL,
ddt_repair_entry_done, rdde, rio->io_flags);
for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++, rddp++) {
if (ddp->ddp_phys_birth == 0 ||
ddp->ddp_phys_birth != rddp->ddp_phys_birth ||
bcmp(ddp->ddp_dva, rddp->ddp_dva, sizeof (ddp->ddp_dva)))
continue;
ddt_bp_create(ddt->ddt_checksum, ddk, ddp, &blk);
zio_nowait(zio_rewrite(zio, zio->io_spa, 0, &blk,
rdde->dde_repair_abd, DDK_GET_PSIZE(rddk), NULL, NULL,
ZIO_PRIORITY_SYNC_WRITE, ZIO_DDT_CHILD_FLAGS(zio), NULL));
}
zio_nowait(zio);
}
static void
ddt_repair_table(ddt_t *ddt, zio_t *rio)
{
spa_t *spa = ddt->ddt_spa;
ddt_entry_t *dde, *rdde_next, *rdde;
avl_tree_t *t = &ddt->ddt_repair_tree;
blkptr_t blk;
if (spa_sync_pass(spa) > 1)
return;
ddt_enter(ddt);
for (rdde = avl_first(t); rdde != NULL; rdde = rdde_next) {
rdde_next = AVL_NEXT(t, rdde);
avl_remove(&ddt->ddt_repair_tree, rdde);
ddt_exit(ddt);
ddt_bp_create(ddt->ddt_checksum, &rdde->dde_key, NULL, &blk);
dde = ddt_repair_start(ddt, &blk);
ddt_repair_entry(ddt, dde, rdde, rio);
ddt_repair_done(ddt, dde);
ddt_enter(ddt);
}
ddt_exit(ddt);
}
static void
ddt_sync_entry(ddt_t *ddt, ddt_entry_t *dde, dmu_tx_t *tx, uint64_t txg)
{
dsl_pool_t *dp = ddt->ddt_spa->spa_dsl_pool;
ddt_phys_t *ddp = dde->dde_phys;
ddt_key_t *ddk = &dde->dde_key;
enum ddt_type otype = dde->dde_type;
enum ddt_type ntype = DDT_TYPE_CURRENT;
enum ddt_class oclass = dde->dde_class;
enum ddt_class nclass;
uint64_t total_refcnt = 0;
ASSERT(dde->dde_loaded);
ASSERT(!dde->dde_loading);
for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
ASSERT(dde->dde_lead_zio[p] == NULL);
if (ddp->ddp_phys_birth == 0) {
ASSERT(ddp->ddp_refcnt == 0);
continue;
}
if (p == DDT_PHYS_DITTO) {
/*
* Note, we no longer create DDT-DITTO blocks, but we
* don't want to leak any written by older software.
*/
ddt_phys_free(ddt, ddk, ddp, txg);
continue;
}
if (ddp->ddp_refcnt == 0)
ddt_phys_free(ddt, ddk, ddp, txg);
total_refcnt += ddp->ddp_refcnt;
}
/* We do not create new DDT-DITTO blocks. */
ASSERT0(dde->dde_phys[DDT_PHYS_DITTO].ddp_phys_birth);
if (total_refcnt > 1)
nclass = DDT_CLASS_DUPLICATE;
else
nclass = DDT_CLASS_UNIQUE;
if (otype != DDT_TYPES &&
(otype != ntype || oclass != nclass || total_refcnt == 0)) {
VERIFY(ddt_object_remove(ddt, otype, oclass, dde, tx) == 0);
ASSERT(ddt_object_lookup(ddt, otype, oclass, dde) == ENOENT);
}
if (total_refcnt != 0) {
dde->dde_type = ntype;
dde->dde_class = nclass;
ddt_stat_update(ddt, dde, 0);
if (!ddt_object_exists(ddt, ntype, nclass))
ddt_object_create(ddt, ntype, nclass, tx);
VERIFY(ddt_object_update(ddt, ntype, nclass, dde, tx) == 0);
/*
* If the class changes, the order that we scan this bp
* changes. If it decreases, we could miss it, so
* scan it right now. (This covers both class changing
* while we are doing ddt_walk(), and when we are
* traversing.)
*/
if (nclass < oclass) {
dsl_scan_ddt_entry(dp->dp_scan,
ddt->ddt_checksum, dde, tx);
}
}
}
static void
ddt_sync_table(ddt_t *ddt, dmu_tx_t *tx, uint64_t txg)
{
spa_t *spa = ddt->ddt_spa;
ddt_entry_t *dde;
void *cookie = NULL;
if (avl_numnodes(&ddt->ddt_tree) == 0)
return;
ASSERT(spa->spa_uberblock.ub_version >= SPA_VERSION_DEDUP);
if (spa->spa_ddt_stat_object == 0) {
spa->spa_ddt_stat_object = zap_create_link(ddt->ddt_os,
DMU_OT_DDT_STATS, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_DDT_STATS, tx);
}
while ((dde = avl_destroy_nodes(&ddt->ddt_tree, &cookie)) != NULL) {
ddt_sync_entry(ddt, dde, tx, txg);
ddt_free(dde);
}
for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
uint64_t add, count = 0;
for (enum ddt_class class = 0; class < DDT_CLASSES; class++) {
if (ddt_object_exists(ddt, type, class)) {
ddt_object_sync(ddt, type, class, tx);
VERIFY(ddt_object_count(ddt, type, class,
&add) == 0);
count += add;
}
}
for (enum ddt_class class = 0; class < DDT_CLASSES; class++) {
if (count == 0 && ddt_object_exists(ddt, type, class))
ddt_object_destroy(ddt, type, class, tx);
}
}
bcopy(ddt->ddt_histogram, &ddt->ddt_histogram_cache,
sizeof (ddt->ddt_histogram));
spa->spa_dedup_dspace = ~0ULL;
}
void
ddt_sync(spa_t *spa, uint64_t txg)
{
dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
dmu_tx_t *tx;
zio_t *rio;
ASSERT(spa_syncing_txg(spa) == txg);
tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
rio = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SELF_HEAL);
/*
* This function may cause an immediate scan of ddt blocks (see
* the comment above dsl_scan_ddt() for details). We set the
* scan's root zio here so that we can wait for any scan IOs in
* addition to the regular ddt IOs.
*/
ASSERT3P(scn->scn_zio_root, ==, NULL);
scn->scn_zio_root = rio;
for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
ddt_t *ddt = spa->spa_ddt[c];
if (ddt == NULL)
continue;
ddt_sync_table(ddt, tx, txg);
ddt_repair_table(ddt, rio);
}
(void) zio_wait(rio);
scn->scn_zio_root = NULL;
dmu_tx_commit(tx);
}
int
ddt_walk(spa_t *spa, ddt_bookmark_t *ddb, ddt_entry_t *dde)
{
do {
do {
do {
ddt_t *ddt = spa->spa_ddt[ddb->ddb_checksum];
int error = ENOENT;
if (ddt_object_exists(ddt, ddb->ddb_type,
ddb->ddb_class)) {
error = ddt_object_walk(ddt,
ddb->ddb_type, ddb->ddb_class,
&ddb->ddb_cursor, dde);
}
dde->dde_type = ddb->ddb_type;
dde->dde_class = ddb->ddb_class;
if (error == 0)
return (0);
if (error != ENOENT)
return (error);
ddb->ddb_cursor = 0;
} while (++ddb->ddb_checksum < ZIO_CHECKSUM_FUNCTIONS);
ddb->ddb_checksum = 0;
} while (++ddb->ddb_type < DDT_TYPES);
ddb->ddb_type = 0;
} while (++ddb->ddb_class < DDT_CLASSES);
return (SET_ERROR(ENOENT));
}
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_dedup, zfs_dedup_, prefetch, INT, ZMOD_RW,
"Enable prefetching dedup-ed blks");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/dnode.c b/sys/contrib/openzfs/module/zfs/dnode.c
index b1813a8951d5..7f741542ce02 100644
--- a/sys/contrib/openzfs/module/zfs/dnode.c
+++ b/sys/contrib/openzfs/module/zfs/dnode.c
@@ -1,2579 +1,2580 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2020 by Delphix. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/dbuf.h>
#include <sys/dnode.h>
#include <sys/dmu.h>
#include <sys/dmu_impl.h>
#include <sys/dmu_tx.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_dataset.h>
#include <sys/spa.h>
#include <sys/zio.h>
#include <sys/dmu_zfetch.h>
#include <sys/range_tree.h>
#include <sys/trace_zfs.h>
#include <sys/zfs_project.h>
dnode_stats_t dnode_stats = {
{ "dnode_hold_dbuf_hold", KSTAT_DATA_UINT64 },
{ "dnode_hold_dbuf_read", KSTAT_DATA_UINT64 },
{ "dnode_hold_alloc_hits", KSTAT_DATA_UINT64 },
{ "dnode_hold_alloc_misses", KSTAT_DATA_UINT64 },
{ "dnode_hold_alloc_interior", KSTAT_DATA_UINT64 },
{ "dnode_hold_alloc_lock_retry", KSTAT_DATA_UINT64 },
{ "dnode_hold_alloc_lock_misses", KSTAT_DATA_UINT64 },
{ "dnode_hold_alloc_type_none", KSTAT_DATA_UINT64 },
{ "dnode_hold_free_hits", KSTAT_DATA_UINT64 },
{ "dnode_hold_free_misses", KSTAT_DATA_UINT64 },
{ "dnode_hold_free_lock_misses", KSTAT_DATA_UINT64 },
{ "dnode_hold_free_lock_retry", KSTAT_DATA_UINT64 },
{ "dnode_hold_free_overflow", KSTAT_DATA_UINT64 },
{ "dnode_hold_free_refcount", KSTAT_DATA_UINT64 },
{ "dnode_free_interior_lock_retry", KSTAT_DATA_UINT64 },
{ "dnode_allocate", KSTAT_DATA_UINT64 },
{ "dnode_reallocate", KSTAT_DATA_UINT64 },
{ "dnode_buf_evict", KSTAT_DATA_UINT64 },
{ "dnode_alloc_next_chunk", KSTAT_DATA_UINT64 },
{ "dnode_alloc_race", KSTAT_DATA_UINT64 },
{ "dnode_alloc_next_block", KSTAT_DATA_UINT64 },
{ "dnode_move_invalid", KSTAT_DATA_UINT64 },
{ "dnode_move_recheck1", KSTAT_DATA_UINT64 },
{ "dnode_move_recheck2", KSTAT_DATA_UINT64 },
{ "dnode_move_special", KSTAT_DATA_UINT64 },
{ "dnode_move_handle", KSTAT_DATA_UINT64 },
{ "dnode_move_rwlock", KSTAT_DATA_UINT64 },
{ "dnode_move_active", KSTAT_DATA_UINT64 },
};
static kstat_t *dnode_ksp;
static kmem_cache_t *dnode_cache;
static dnode_phys_t dnode_phys_zero __maybe_unused;
int zfs_default_bs = SPA_MINBLOCKSHIFT;
int zfs_default_ibs = DN_MAX_INDBLKSHIFT;
#ifdef _KERNEL
static kmem_cbrc_t dnode_move(void *, void *, size_t, void *);
#endif /* _KERNEL */
static int
dbuf_compare(const void *x1, const void *x2)
{
const dmu_buf_impl_t *d1 = x1;
const dmu_buf_impl_t *d2 = x2;
int cmp = TREE_CMP(d1->db_level, d2->db_level);
if (likely(cmp))
return (cmp);
cmp = TREE_CMP(d1->db_blkid, d2->db_blkid);
if (likely(cmp))
return (cmp);
if (d1->db_state == DB_SEARCH) {
ASSERT3S(d2->db_state, !=, DB_SEARCH);
return (-1);
} else if (d2->db_state == DB_SEARCH) {
ASSERT3S(d1->db_state, !=, DB_SEARCH);
return (1);
}
return (TREE_PCMP(d1, d2));
}
/* ARGSUSED */
static int
dnode_cons(void *arg, void *unused, int kmflag)
{
dnode_t *dn = arg;
int i;
rw_init(&dn->dn_struct_rwlock, NULL, RW_NOLOCKDEP, NULL);
mutex_init(&dn->dn_mtx, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&dn->dn_dbufs_mtx, NULL, MUTEX_DEFAULT, NULL);
cv_init(&dn->dn_notxholds, NULL, CV_DEFAULT, NULL);
cv_init(&dn->dn_nodnholds, NULL, CV_DEFAULT, NULL);
/*
* Every dbuf has a reference, and dropping a tracked reference is
* O(number of references), so don't track dn_holds.
*/
zfs_refcount_create_untracked(&dn->dn_holds);
zfs_refcount_create(&dn->dn_tx_holds);
list_link_init(&dn->dn_link);
+ bzero(&dn->dn_next_type[0], sizeof (dn->dn_next_type));
bzero(&dn->dn_next_nblkptr[0], sizeof (dn->dn_next_nblkptr));
bzero(&dn->dn_next_nlevels[0], sizeof (dn->dn_next_nlevels));
bzero(&dn->dn_next_indblkshift[0], sizeof (dn->dn_next_indblkshift));
bzero(&dn->dn_next_bonustype[0], sizeof (dn->dn_next_bonustype));
bzero(&dn->dn_rm_spillblk[0], sizeof (dn->dn_rm_spillblk));
bzero(&dn->dn_next_bonuslen[0], sizeof (dn->dn_next_bonuslen));
bzero(&dn->dn_next_blksz[0], sizeof (dn->dn_next_blksz));
bzero(&dn->dn_next_maxblkid[0], sizeof (dn->dn_next_maxblkid));
for (i = 0; i < TXG_SIZE; i++) {
multilist_link_init(&dn->dn_dirty_link[i]);
dn->dn_free_ranges[i] = NULL;
list_create(&dn->dn_dirty_records[i],
sizeof (dbuf_dirty_record_t),
offsetof(dbuf_dirty_record_t, dr_dirty_node));
}
dn->dn_allocated_txg = 0;
dn->dn_free_txg = 0;
dn->dn_assigned_txg = 0;
dn->dn_dirty_txg = 0;
dn->dn_dirtyctx = 0;
dn->dn_dirtyctx_firstset = NULL;
dn->dn_bonus = NULL;
dn->dn_have_spill = B_FALSE;
dn->dn_zio = NULL;
dn->dn_oldused = 0;
dn->dn_oldflags = 0;
dn->dn_olduid = 0;
dn->dn_oldgid = 0;
dn->dn_oldprojid = ZFS_DEFAULT_PROJID;
dn->dn_newuid = 0;
dn->dn_newgid = 0;
dn->dn_newprojid = ZFS_DEFAULT_PROJID;
dn->dn_id_flags = 0;
dn->dn_dbufs_count = 0;
avl_create(&dn->dn_dbufs, dbuf_compare, sizeof (dmu_buf_impl_t),
offsetof(dmu_buf_impl_t, db_link));
dn->dn_moved = 0;
return (0);
}
/* ARGSUSED */
static void
dnode_dest(void *arg, void *unused)
{
int i;
dnode_t *dn = arg;
rw_destroy(&dn->dn_struct_rwlock);
mutex_destroy(&dn->dn_mtx);
mutex_destroy(&dn->dn_dbufs_mtx);
cv_destroy(&dn->dn_notxholds);
cv_destroy(&dn->dn_nodnholds);
zfs_refcount_destroy(&dn->dn_holds);
zfs_refcount_destroy(&dn->dn_tx_holds);
ASSERT(!list_link_active(&dn->dn_link));
for (i = 0; i < TXG_SIZE; i++) {
ASSERT(!multilist_link_active(&dn->dn_dirty_link[i]));
ASSERT3P(dn->dn_free_ranges[i], ==, NULL);
list_destroy(&dn->dn_dirty_records[i]);
ASSERT0(dn->dn_next_nblkptr[i]);
ASSERT0(dn->dn_next_nlevels[i]);
ASSERT0(dn->dn_next_indblkshift[i]);
ASSERT0(dn->dn_next_bonustype[i]);
ASSERT0(dn->dn_rm_spillblk[i]);
ASSERT0(dn->dn_next_bonuslen[i]);
ASSERT0(dn->dn_next_blksz[i]);
ASSERT0(dn->dn_next_maxblkid[i]);
}
ASSERT0(dn->dn_allocated_txg);
ASSERT0(dn->dn_free_txg);
ASSERT0(dn->dn_assigned_txg);
ASSERT0(dn->dn_dirty_txg);
ASSERT0(dn->dn_dirtyctx);
ASSERT3P(dn->dn_dirtyctx_firstset, ==, NULL);
ASSERT3P(dn->dn_bonus, ==, NULL);
ASSERT(!dn->dn_have_spill);
ASSERT3P(dn->dn_zio, ==, NULL);
ASSERT0(dn->dn_oldused);
ASSERT0(dn->dn_oldflags);
ASSERT0(dn->dn_olduid);
ASSERT0(dn->dn_oldgid);
ASSERT0(dn->dn_oldprojid);
ASSERT0(dn->dn_newuid);
ASSERT0(dn->dn_newgid);
ASSERT0(dn->dn_newprojid);
ASSERT0(dn->dn_id_flags);
ASSERT0(dn->dn_dbufs_count);
avl_destroy(&dn->dn_dbufs);
}
void
dnode_init(void)
{
ASSERT(dnode_cache == NULL);
dnode_cache = kmem_cache_create("dnode_t", sizeof (dnode_t),
0, dnode_cons, dnode_dest, NULL, NULL, NULL, 0);
kmem_cache_set_move(dnode_cache, dnode_move);
dnode_ksp = kstat_create("zfs", 0, "dnodestats", "misc",
KSTAT_TYPE_NAMED, sizeof (dnode_stats) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL);
if (dnode_ksp != NULL) {
dnode_ksp->ks_data = &dnode_stats;
kstat_install(dnode_ksp);
}
}
void
dnode_fini(void)
{
if (dnode_ksp != NULL) {
kstat_delete(dnode_ksp);
dnode_ksp = NULL;
}
kmem_cache_destroy(dnode_cache);
dnode_cache = NULL;
}
#ifdef ZFS_DEBUG
void
dnode_verify(dnode_t *dn)
{
int drop_struct_lock = FALSE;
ASSERT(dn->dn_phys);
ASSERT(dn->dn_objset);
ASSERT(dn->dn_handle->dnh_dnode == dn);
ASSERT(DMU_OT_IS_VALID(dn->dn_phys->dn_type));
if (!(zfs_flags & ZFS_DEBUG_DNODE_VERIFY))
return;
if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
rw_enter(&dn->dn_struct_rwlock, RW_READER);
drop_struct_lock = TRUE;
}
if (dn->dn_phys->dn_type != DMU_OT_NONE || dn->dn_allocated_txg != 0) {
int i;
int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
ASSERT3U(dn->dn_indblkshift, <=, SPA_MAXBLOCKSHIFT);
if (dn->dn_datablkshift) {
ASSERT3U(dn->dn_datablkshift, >=, SPA_MINBLOCKSHIFT);
ASSERT3U(dn->dn_datablkshift, <=, SPA_MAXBLOCKSHIFT);
ASSERT3U(1<<dn->dn_datablkshift, ==, dn->dn_datablksz);
}
ASSERT3U(dn->dn_nlevels, <=, 30);
ASSERT(DMU_OT_IS_VALID(dn->dn_type));
ASSERT3U(dn->dn_nblkptr, >=, 1);
ASSERT3U(dn->dn_nblkptr, <=, DN_MAX_NBLKPTR);
ASSERT3U(dn->dn_bonuslen, <=, max_bonuslen);
ASSERT3U(dn->dn_datablksz, ==,
dn->dn_datablkszsec << SPA_MINBLOCKSHIFT);
ASSERT3U(ISP2(dn->dn_datablksz), ==, dn->dn_datablkshift != 0);
ASSERT3U((dn->dn_nblkptr - 1) * sizeof (blkptr_t) +
dn->dn_bonuslen, <=, max_bonuslen);
for (i = 0; i < TXG_SIZE; i++) {
ASSERT3U(dn->dn_next_nlevels[i], <=, dn->dn_nlevels);
}
}
if (dn->dn_phys->dn_type != DMU_OT_NONE)
ASSERT3U(dn->dn_phys->dn_nlevels, <=, dn->dn_nlevels);
ASSERT(DMU_OBJECT_IS_SPECIAL(dn->dn_object) || dn->dn_dbuf != NULL);
if (dn->dn_dbuf != NULL) {
ASSERT3P(dn->dn_phys, ==,
(dnode_phys_t *)dn->dn_dbuf->db.db_data +
(dn->dn_object % (dn->dn_dbuf->db.db_size >> DNODE_SHIFT)));
}
if (drop_struct_lock)
rw_exit(&dn->dn_struct_rwlock);
}
#endif
void
dnode_byteswap(dnode_phys_t *dnp)
{
uint64_t *buf64 = (void*)&dnp->dn_blkptr;
int i;
if (dnp->dn_type == DMU_OT_NONE) {
bzero(dnp, sizeof (dnode_phys_t));
return;
}
dnp->dn_datablkszsec = BSWAP_16(dnp->dn_datablkszsec);
dnp->dn_bonuslen = BSWAP_16(dnp->dn_bonuslen);
dnp->dn_extra_slots = BSWAP_8(dnp->dn_extra_slots);
dnp->dn_maxblkid = BSWAP_64(dnp->dn_maxblkid);
dnp->dn_used = BSWAP_64(dnp->dn_used);
/*
* dn_nblkptr is only one byte, so it's OK to read it in either
* byte order. We can't read dn_bouslen.
*/
ASSERT(dnp->dn_indblkshift <= SPA_MAXBLOCKSHIFT);
ASSERT(dnp->dn_nblkptr <= DN_MAX_NBLKPTR);
for (i = 0; i < dnp->dn_nblkptr * sizeof (blkptr_t)/8; i++)
buf64[i] = BSWAP_64(buf64[i]);
/*
* OK to check dn_bonuslen for zero, because it won't matter if
* we have the wrong byte order. This is necessary because the
* dnode dnode is smaller than a regular dnode.
*/
if (dnp->dn_bonuslen != 0) {
/*
* Note that the bonus length calculated here may be
* longer than the actual bonus buffer. This is because
* we always put the bonus buffer after the last block
* pointer (instead of packing it against the end of the
* dnode buffer).
*/
int off = (dnp->dn_nblkptr-1) * sizeof (blkptr_t);
int slots = dnp->dn_extra_slots + 1;
size_t len = DN_SLOTS_TO_BONUSLEN(slots) - off;
dmu_object_byteswap_t byteswap;
ASSERT(DMU_OT_IS_VALID(dnp->dn_bonustype));
byteswap = DMU_OT_BYTESWAP(dnp->dn_bonustype);
dmu_ot_byteswap[byteswap].ob_func(dnp->dn_bonus + off, len);
}
/* Swap SPILL block if we have one */
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)
byteswap_uint64_array(DN_SPILL_BLKPTR(dnp), sizeof (blkptr_t));
}
void
dnode_buf_byteswap(void *vbuf, size_t size)
{
int i = 0;
ASSERT3U(sizeof (dnode_phys_t), ==, (1<<DNODE_SHIFT));
ASSERT((size & (sizeof (dnode_phys_t)-1)) == 0);
while (i < size) {
dnode_phys_t *dnp = (void *)(((char *)vbuf) + i);
dnode_byteswap(dnp);
i += DNODE_MIN_SIZE;
if (dnp->dn_type != DMU_OT_NONE)
i += dnp->dn_extra_slots * DNODE_MIN_SIZE;
}
}
void
dnode_setbonuslen(dnode_t *dn, int newsize, dmu_tx_t *tx)
{
ASSERT3U(zfs_refcount_count(&dn->dn_holds), >=, 1);
dnode_setdirty(dn, tx);
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
ASSERT3U(newsize, <=, DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) -
(dn->dn_nblkptr-1) * sizeof (blkptr_t));
if (newsize < dn->dn_bonuslen) {
/* clear any data after the end of the new size */
size_t diff = dn->dn_bonuslen - newsize;
char *data_end = ((char *)dn->dn_bonus->db.db_data) + newsize;
bzero(data_end, diff);
}
dn->dn_bonuslen = newsize;
if (newsize == 0)
dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = DN_ZERO_BONUSLEN;
else
dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen;
rw_exit(&dn->dn_struct_rwlock);
}
void
dnode_setbonus_type(dnode_t *dn, dmu_object_type_t newtype, dmu_tx_t *tx)
{
ASSERT3U(zfs_refcount_count(&dn->dn_holds), >=, 1);
dnode_setdirty(dn, tx);
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
dn->dn_bonustype = newtype;
dn->dn_next_bonustype[tx->tx_txg & TXG_MASK] = dn->dn_bonustype;
rw_exit(&dn->dn_struct_rwlock);
}
void
dnode_rm_spill(dnode_t *dn, dmu_tx_t *tx)
{
ASSERT3U(zfs_refcount_count(&dn->dn_holds), >=, 1);
ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
dnode_setdirty(dn, tx);
dn->dn_rm_spillblk[tx->tx_txg & TXG_MASK] = DN_KILL_SPILLBLK;
dn->dn_have_spill = B_FALSE;
}
static void
dnode_setdblksz(dnode_t *dn, int size)
{
ASSERT0(P2PHASE(size, SPA_MINBLOCKSIZE));
ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
ASSERT3U(size, >=, SPA_MINBLOCKSIZE);
ASSERT3U(size >> SPA_MINBLOCKSHIFT, <,
1<<(sizeof (dn->dn_phys->dn_datablkszsec) * 8));
dn->dn_datablksz = size;
dn->dn_datablkszsec = size >> SPA_MINBLOCKSHIFT;
dn->dn_datablkshift = ISP2(size) ? highbit64(size - 1) : 0;
}
static dnode_t *
dnode_create(objset_t *os, dnode_phys_t *dnp, dmu_buf_impl_t *db,
uint64_t object, dnode_handle_t *dnh)
{
dnode_t *dn;
dn = kmem_cache_alloc(dnode_cache, KM_SLEEP);
dn->dn_moved = 0;
/*
* Defer setting dn_objset until the dnode is ready to be a candidate
* for the dnode_move() callback.
*/
dn->dn_object = object;
dn->dn_dbuf = db;
dn->dn_handle = dnh;
dn->dn_phys = dnp;
if (dnp->dn_datablkszsec) {
dnode_setdblksz(dn, dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
} else {
dn->dn_datablksz = 0;
dn->dn_datablkszsec = 0;
dn->dn_datablkshift = 0;
}
dn->dn_indblkshift = dnp->dn_indblkshift;
dn->dn_nlevels = dnp->dn_nlevels;
dn->dn_type = dnp->dn_type;
dn->dn_nblkptr = dnp->dn_nblkptr;
dn->dn_checksum = dnp->dn_checksum;
dn->dn_compress = dnp->dn_compress;
dn->dn_bonustype = dnp->dn_bonustype;
dn->dn_bonuslen = dnp->dn_bonuslen;
dn->dn_num_slots = dnp->dn_extra_slots + 1;
dn->dn_maxblkid = dnp->dn_maxblkid;
dn->dn_have_spill = ((dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) != 0);
dn->dn_id_flags = 0;
dmu_zfetch_init(&dn->dn_zfetch, dn);
ASSERT(DMU_OT_IS_VALID(dn->dn_phys->dn_type));
ASSERT(zrl_is_locked(&dnh->dnh_zrlock));
ASSERT(!DN_SLOT_IS_PTR(dnh->dnh_dnode));
mutex_enter(&os->os_lock);
/*
* Exclude special dnodes from os_dnodes so an empty os_dnodes
* signifies that the special dnodes have no references from
* their children (the entries in os_dnodes). This allows
* dnode_destroy() to easily determine if the last child has
* been removed and then complete eviction of the objset.
*/
if (!DMU_OBJECT_IS_SPECIAL(object))
list_insert_head(&os->os_dnodes, dn);
membar_producer();
/*
* Everything else must be valid before assigning dn_objset
* makes the dnode eligible for dnode_move().
*/
dn->dn_objset = os;
dnh->dnh_dnode = dn;
mutex_exit(&os->os_lock);
arc_space_consume(sizeof (dnode_t), ARC_SPACE_DNODE);
return (dn);
}
/*
* Caller must be holding the dnode handle, which is released upon return.
*/
static void
dnode_destroy(dnode_t *dn)
{
objset_t *os = dn->dn_objset;
boolean_t complete_os_eviction = B_FALSE;
ASSERT((dn->dn_id_flags & DN_ID_NEW_EXIST) == 0);
mutex_enter(&os->os_lock);
POINTER_INVALIDATE(&dn->dn_objset);
if (!DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
list_remove(&os->os_dnodes, dn);
complete_os_eviction =
list_is_empty(&os->os_dnodes) &&
list_link_active(&os->os_evicting_node);
}
mutex_exit(&os->os_lock);
/* the dnode can no longer move, so we can release the handle */
if (!zrl_is_locked(&dn->dn_handle->dnh_zrlock))
zrl_remove(&dn->dn_handle->dnh_zrlock);
dn->dn_allocated_txg = 0;
dn->dn_free_txg = 0;
dn->dn_assigned_txg = 0;
dn->dn_dirty_txg = 0;
dn->dn_dirtyctx = 0;
dn->dn_dirtyctx_firstset = NULL;
if (dn->dn_bonus != NULL) {
mutex_enter(&dn->dn_bonus->db_mtx);
dbuf_destroy(dn->dn_bonus);
dn->dn_bonus = NULL;
}
dn->dn_zio = NULL;
dn->dn_have_spill = B_FALSE;
dn->dn_oldused = 0;
dn->dn_oldflags = 0;
dn->dn_olduid = 0;
dn->dn_oldgid = 0;
dn->dn_oldprojid = ZFS_DEFAULT_PROJID;
dn->dn_newuid = 0;
dn->dn_newgid = 0;
dn->dn_newprojid = ZFS_DEFAULT_PROJID;
dn->dn_id_flags = 0;
dmu_zfetch_fini(&dn->dn_zfetch);
kmem_cache_free(dnode_cache, dn);
arc_space_return(sizeof (dnode_t), ARC_SPACE_DNODE);
if (complete_os_eviction)
dmu_objset_evict_done(os);
}
void
dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs,
dmu_object_type_t bonustype, int bonuslen, int dn_slots, dmu_tx_t *tx)
{
int i;
ASSERT3U(dn_slots, >, 0);
ASSERT3U(dn_slots << DNODE_SHIFT, <=,
spa_maxdnodesize(dmu_objset_spa(dn->dn_objset)));
ASSERT3U(blocksize, <=,
spa_maxblocksize(dmu_objset_spa(dn->dn_objset)));
if (blocksize == 0)
blocksize = 1 << zfs_default_bs;
else
blocksize = P2ROUNDUP(blocksize, SPA_MINBLOCKSIZE);
if (ibs == 0)
ibs = zfs_default_ibs;
ibs = MIN(MAX(ibs, DN_MIN_INDBLKSHIFT), DN_MAX_INDBLKSHIFT);
dprintf("os=%p obj=%llu txg=%llu blocksize=%d ibs=%d dn_slots=%d\n",
dn->dn_objset, (u_longlong_t)dn->dn_object,
(u_longlong_t)tx->tx_txg, blocksize, ibs, dn_slots);
DNODE_STAT_BUMP(dnode_allocate);
ASSERT(dn->dn_type == DMU_OT_NONE);
ASSERT(bcmp(dn->dn_phys, &dnode_phys_zero, sizeof (dnode_phys_t)) == 0);
ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE);
ASSERT(ot != DMU_OT_NONE);
ASSERT(DMU_OT_IS_VALID(ot));
ASSERT((bonustype == DMU_OT_NONE && bonuslen == 0) ||
(bonustype == DMU_OT_SA && bonuslen == 0) ||
(bonustype != DMU_OT_NONE && bonuslen != 0));
ASSERT(DMU_OT_IS_VALID(bonustype));
ASSERT3U(bonuslen, <=, DN_SLOTS_TO_BONUSLEN(dn_slots));
ASSERT(dn->dn_type == DMU_OT_NONE);
ASSERT0(dn->dn_maxblkid);
ASSERT0(dn->dn_allocated_txg);
ASSERT0(dn->dn_assigned_txg);
ASSERT(zfs_refcount_is_zero(&dn->dn_tx_holds));
ASSERT3U(zfs_refcount_count(&dn->dn_holds), <=, 1);
ASSERT(avl_is_empty(&dn->dn_dbufs));
for (i = 0; i < TXG_SIZE; i++) {
ASSERT0(dn->dn_next_nblkptr[i]);
ASSERT0(dn->dn_next_nlevels[i]);
ASSERT0(dn->dn_next_indblkshift[i]);
ASSERT0(dn->dn_next_bonuslen[i]);
ASSERT0(dn->dn_next_bonustype[i]);
ASSERT0(dn->dn_rm_spillblk[i]);
ASSERT0(dn->dn_next_blksz[i]);
ASSERT0(dn->dn_next_maxblkid[i]);
ASSERT(!multilist_link_active(&dn->dn_dirty_link[i]));
ASSERT3P(list_head(&dn->dn_dirty_records[i]), ==, NULL);
ASSERT3P(dn->dn_free_ranges[i], ==, NULL);
}
dn->dn_type = ot;
dnode_setdblksz(dn, blocksize);
dn->dn_indblkshift = ibs;
dn->dn_nlevels = 1;
dn->dn_num_slots = dn_slots;
if (bonustype == DMU_OT_SA) /* Maximize bonus space for SA */
dn->dn_nblkptr = 1;
else {
dn->dn_nblkptr = MIN(DN_MAX_NBLKPTR,
1 + ((DN_SLOTS_TO_BONUSLEN(dn_slots) - bonuslen) >>
SPA_BLKPTRSHIFT));
}
dn->dn_bonustype = bonustype;
dn->dn_bonuslen = bonuslen;
dn->dn_checksum = ZIO_CHECKSUM_INHERIT;
dn->dn_compress = ZIO_COMPRESS_INHERIT;
dn->dn_dirtyctx = 0;
dn->dn_free_txg = 0;
dn->dn_dirtyctx_firstset = NULL;
dn->dn_dirty_txg = 0;
dn->dn_allocated_txg = tx->tx_txg;
dn->dn_id_flags = 0;
dnode_setdirty(dn, tx);
dn->dn_next_indblkshift[tx->tx_txg & TXG_MASK] = ibs;
dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen;
dn->dn_next_bonustype[tx->tx_txg & TXG_MASK] = dn->dn_bonustype;
dn->dn_next_blksz[tx->tx_txg & TXG_MASK] = dn->dn_datablksz;
}
void
dnode_reallocate(dnode_t *dn, dmu_object_type_t ot, int blocksize,
dmu_object_type_t bonustype, int bonuslen, int dn_slots,
boolean_t keep_spill, dmu_tx_t *tx)
{
int nblkptr;
ASSERT3U(blocksize, >=, SPA_MINBLOCKSIZE);
ASSERT3U(blocksize, <=,
spa_maxblocksize(dmu_objset_spa(dn->dn_objset)));
ASSERT0(blocksize % SPA_MINBLOCKSIZE);
ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT || dmu_tx_private_ok(tx));
ASSERT(tx->tx_txg != 0);
ASSERT((bonustype == DMU_OT_NONE && bonuslen == 0) ||
(bonustype != DMU_OT_NONE && bonuslen != 0) ||
(bonustype == DMU_OT_SA && bonuslen == 0));
ASSERT(DMU_OT_IS_VALID(bonustype));
ASSERT3U(bonuslen, <=,
DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(dn->dn_objset))));
ASSERT3U(bonuslen, <=, DN_BONUS_SIZE(dn_slots << DNODE_SHIFT));
dnode_free_interior_slots(dn);
DNODE_STAT_BUMP(dnode_reallocate);
/* clean up any unreferenced dbufs */
dnode_evict_dbufs(dn);
dn->dn_id_flags = 0;
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
dnode_setdirty(dn, tx);
if (dn->dn_datablksz != blocksize) {
/* change blocksize */
ASSERT0(dn->dn_maxblkid);
ASSERT(BP_IS_HOLE(&dn->dn_phys->dn_blkptr[0]) ||
dnode_block_freed(dn, 0));
dnode_setdblksz(dn, blocksize);
dn->dn_next_blksz[tx->tx_txg & TXG_MASK] = blocksize;
}
if (dn->dn_bonuslen != bonuslen)
dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = bonuslen;
if (bonustype == DMU_OT_SA) /* Maximize bonus space for SA */
nblkptr = 1;
else
nblkptr = MIN(DN_MAX_NBLKPTR,
1 + ((DN_SLOTS_TO_BONUSLEN(dn_slots) - bonuslen) >>
SPA_BLKPTRSHIFT));
if (dn->dn_bonustype != bonustype)
dn->dn_next_bonustype[tx->tx_txg & TXG_MASK] = bonustype;
if (dn->dn_nblkptr != nblkptr)
dn->dn_next_nblkptr[tx->tx_txg & TXG_MASK] = nblkptr;
if (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR && !keep_spill) {
dbuf_rm_spill(dn, tx);
dnode_rm_spill(dn, tx);
}
rw_exit(&dn->dn_struct_rwlock);
/* change type */
dn->dn_type = ot;
/* change bonus size and type */
mutex_enter(&dn->dn_mtx);
dn->dn_bonustype = bonustype;
dn->dn_bonuslen = bonuslen;
dn->dn_num_slots = dn_slots;
dn->dn_nblkptr = nblkptr;
dn->dn_checksum = ZIO_CHECKSUM_INHERIT;
dn->dn_compress = ZIO_COMPRESS_INHERIT;
ASSERT3U(dn->dn_nblkptr, <=, DN_MAX_NBLKPTR);
/* fix up the bonus db_size */
if (dn->dn_bonus) {
dn->dn_bonus->db.db_size =
DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) -
(dn->dn_nblkptr-1) * sizeof (blkptr_t);
ASSERT(dn->dn_bonuslen <= dn->dn_bonus->db.db_size);
}
dn->dn_allocated_txg = tx->tx_txg;
mutex_exit(&dn->dn_mtx);
}
#ifdef _KERNEL
static void
dnode_move_impl(dnode_t *odn, dnode_t *ndn)
{
int i;
ASSERT(!RW_LOCK_HELD(&odn->dn_struct_rwlock));
ASSERT(MUTEX_NOT_HELD(&odn->dn_mtx));
ASSERT(MUTEX_NOT_HELD(&odn->dn_dbufs_mtx));
/* Copy fields. */
ndn->dn_objset = odn->dn_objset;
ndn->dn_object = odn->dn_object;
ndn->dn_dbuf = odn->dn_dbuf;
ndn->dn_handle = odn->dn_handle;
ndn->dn_phys = odn->dn_phys;
ndn->dn_type = odn->dn_type;
ndn->dn_bonuslen = odn->dn_bonuslen;
ndn->dn_bonustype = odn->dn_bonustype;
ndn->dn_nblkptr = odn->dn_nblkptr;
ndn->dn_checksum = odn->dn_checksum;
ndn->dn_compress = odn->dn_compress;
ndn->dn_nlevels = odn->dn_nlevels;
ndn->dn_indblkshift = odn->dn_indblkshift;
ndn->dn_datablkshift = odn->dn_datablkshift;
ndn->dn_datablkszsec = odn->dn_datablkszsec;
ndn->dn_datablksz = odn->dn_datablksz;
ndn->dn_maxblkid = odn->dn_maxblkid;
ndn->dn_num_slots = odn->dn_num_slots;
bcopy(&odn->dn_next_type[0], &ndn->dn_next_type[0],
sizeof (odn->dn_next_type));
bcopy(&odn->dn_next_nblkptr[0], &ndn->dn_next_nblkptr[0],
sizeof (odn->dn_next_nblkptr));
bcopy(&odn->dn_next_nlevels[0], &ndn->dn_next_nlevels[0],
sizeof (odn->dn_next_nlevels));
bcopy(&odn->dn_next_indblkshift[0], &ndn->dn_next_indblkshift[0],
sizeof (odn->dn_next_indblkshift));
bcopy(&odn->dn_next_bonustype[0], &ndn->dn_next_bonustype[0],
sizeof (odn->dn_next_bonustype));
bcopy(&odn->dn_rm_spillblk[0], &ndn->dn_rm_spillblk[0],
sizeof (odn->dn_rm_spillblk));
bcopy(&odn->dn_next_bonuslen[0], &ndn->dn_next_bonuslen[0],
sizeof (odn->dn_next_bonuslen));
bcopy(&odn->dn_next_blksz[0], &ndn->dn_next_blksz[0],
sizeof (odn->dn_next_blksz));
bcopy(&odn->dn_next_maxblkid[0], &ndn->dn_next_maxblkid[0],
sizeof (odn->dn_next_maxblkid));
for (i = 0; i < TXG_SIZE; i++) {
list_move_tail(&ndn->dn_dirty_records[i],
&odn->dn_dirty_records[i]);
}
bcopy(&odn->dn_free_ranges[0], &ndn->dn_free_ranges[0],
sizeof (odn->dn_free_ranges));
ndn->dn_allocated_txg = odn->dn_allocated_txg;
ndn->dn_free_txg = odn->dn_free_txg;
ndn->dn_assigned_txg = odn->dn_assigned_txg;
ndn->dn_dirty_txg = odn->dn_dirty_txg;
ndn->dn_dirtyctx = odn->dn_dirtyctx;
ndn->dn_dirtyctx_firstset = odn->dn_dirtyctx_firstset;
ASSERT(zfs_refcount_count(&odn->dn_tx_holds) == 0);
zfs_refcount_transfer(&ndn->dn_holds, &odn->dn_holds);
ASSERT(avl_is_empty(&ndn->dn_dbufs));
avl_swap(&ndn->dn_dbufs, &odn->dn_dbufs);
ndn->dn_dbufs_count = odn->dn_dbufs_count;
ndn->dn_bonus = odn->dn_bonus;
ndn->dn_have_spill = odn->dn_have_spill;
ndn->dn_zio = odn->dn_zio;
ndn->dn_oldused = odn->dn_oldused;
ndn->dn_oldflags = odn->dn_oldflags;
ndn->dn_olduid = odn->dn_olduid;
ndn->dn_oldgid = odn->dn_oldgid;
ndn->dn_oldprojid = odn->dn_oldprojid;
ndn->dn_newuid = odn->dn_newuid;
ndn->dn_newgid = odn->dn_newgid;
ndn->dn_newprojid = odn->dn_newprojid;
ndn->dn_id_flags = odn->dn_id_flags;
dmu_zfetch_init(&ndn->dn_zfetch, ndn);
/*
* Update back pointers. Updating the handle fixes the back pointer of
* every descendant dbuf as well as the bonus dbuf.
*/
ASSERT(ndn->dn_handle->dnh_dnode == odn);
ndn->dn_handle->dnh_dnode = ndn;
/*
* Invalidate the original dnode by clearing all of its back pointers.
*/
odn->dn_dbuf = NULL;
odn->dn_handle = NULL;
avl_create(&odn->dn_dbufs, dbuf_compare, sizeof (dmu_buf_impl_t),
offsetof(dmu_buf_impl_t, db_link));
odn->dn_dbufs_count = 0;
odn->dn_bonus = NULL;
dmu_zfetch_fini(&odn->dn_zfetch);
/*
* Set the low bit of the objset pointer to ensure that dnode_move()
* recognizes the dnode as invalid in any subsequent callback.
*/
POINTER_INVALIDATE(&odn->dn_objset);
/*
* Satisfy the destructor.
*/
for (i = 0; i < TXG_SIZE; i++) {
list_create(&odn->dn_dirty_records[i],
sizeof (dbuf_dirty_record_t),
offsetof(dbuf_dirty_record_t, dr_dirty_node));
odn->dn_free_ranges[i] = NULL;
odn->dn_next_nlevels[i] = 0;
odn->dn_next_indblkshift[i] = 0;
odn->dn_next_bonustype[i] = 0;
odn->dn_rm_spillblk[i] = 0;
odn->dn_next_bonuslen[i] = 0;
odn->dn_next_blksz[i] = 0;
}
odn->dn_allocated_txg = 0;
odn->dn_free_txg = 0;
odn->dn_assigned_txg = 0;
odn->dn_dirty_txg = 0;
odn->dn_dirtyctx = 0;
odn->dn_dirtyctx_firstset = NULL;
odn->dn_have_spill = B_FALSE;
odn->dn_zio = NULL;
odn->dn_oldused = 0;
odn->dn_oldflags = 0;
odn->dn_olduid = 0;
odn->dn_oldgid = 0;
odn->dn_oldprojid = ZFS_DEFAULT_PROJID;
odn->dn_newuid = 0;
odn->dn_newgid = 0;
odn->dn_newprojid = ZFS_DEFAULT_PROJID;
odn->dn_id_flags = 0;
/*
* Mark the dnode.
*/
ndn->dn_moved = 1;
odn->dn_moved = (uint8_t)-1;
}
/*ARGSUSED*/
static kmem_cbrc_t
dnode_move(void *buf, void *newbuf, size_t size, void *arg)
{
dnode_t *odn = buf, *ndn = newbuf;
objset_t *os;
int64_t refcount;
uint32_t dbufs;
/*
* The dnode is on the objset's list of known dnodes if the objset
* pointer is valid. We set the low bit of the objset pointer when
* freeing the dnode to invalidate it, and the memory patterns written
* by kmem (baddcafe and deadbeef) set at least one of the two low bits.
* A newly created dnode sets the objset pointer last of all to indicate
* that the dnode is known and in a valid state to be moved by this
* function.
*/
os = odn->dn_objset;
if (!POINTER_IS_VALID(os)) {
DNODE_STAT_BUMP(dnode_move_invalid);
return (KMEM_CBRC_DONT_KNOW);
}
/*
* Ensure that the objset does not go away during the move.
*/
rw_enter(&os_lock, RW_WRITER);
if (os != odn->dn_objset) {
rw_exit(&os_lock);
DNODE_STAT_BUMP(dnode_move_recheck1);
return (KMEM_CBRC_DONT_KNOW);
}
/*
* If the dnode is still valid, then so is the objset. We know that no
* valid objset can be freed while we hold os_lock, so we can safely
* ensure that the objset remains in use.
*/
mutex_enter(&os->os_lock);
/*
* Recheck the objset pointer in case the dnode was removed just before
* acquiring the lock.
*/
if (os != odn->dn_objset) {
mutex_exit(&os->os_lock);
rw_exit(&os_lock);
DNODE_STAT_BUMP(dnode_move_recheck2);
return (KMEM_CBRC_DONT_KNOW);
}
/*
* At this point we know that as long as we hold os->os_lock, the dnode
* cannot be freed and fields within the dnode can be safely accessed.
* The objset listing this dnode cannot go away as long as this dnode is
* on its list.
*/
rw_exit(&os_lock);
if (DMU_OBJECT_IS_SPECIAL(odn->dn_object)) {
mutex_exit(&os->os_lock);
DNODE_STAT_BUMP(dnode_move_special);
return (KMEM_CBRC_NO);
}
ASSERT(odn->dn_dbuf != NULL); /* only "special" dnodes have no parent */
/*
* Lock the dnode handle to prevent the dnode from obtaining any new
* holds. This also prevents the descendant dbufs and the bonus dbuf
* from accessing the dnode, so that we can discount their holds. The
* handle is safe to access because we know that while the dnode cannot
* go away, neither can its handle. Once we hold dnh_zrlock, we can
* safely move any dnode referenced only by dbufs.
*/
if (!zrl_tryenter(&odn->dn_handle->dnh_zrlock)) {
mutex_exit(&os->os_lock);
DNODE_STAT_BUMP(dnode_move_handle);
return (KMEM_CBRC_LATER);
}
/*
* Ensure a consistent view of the dnode's holds and the dnode's dbufs.
* We need to guarantee that there is a hold for every dbuf in order to
* determine whether the dnode is actively referenced. Falsely matching
* a dbuf to an active hold would lead to an unsafe move. It's possible
* that a thread already having an active dnode hold is about to add a
* dbuf, and we can't compare hold and dbuf counts while the add is in
* progress.
*/
if (!rw_tryenter(&odn->dn_struct_rwlock, RW_WRITER)) {
zrl_exit(&odn->dn_handle->dnh_zrlock);
mutex_exit(&os->os_lock);
DNODE_STAT_BUMP(dnode_move_rwlock);
return (KMEM_CBRC_LATER);
}
/*
* A dbuf may be removed (evicted) without an active dnode hold. In that
* case, the dbuf count is decremented under the handle lock before the
* dbuf's hold is released. This order ensures that if we count the hold
* after the dbuf is removed but before its hold is released, we will
* treat the unmatched hold as active and exit safely. If we count the
* hold before the dbuf is removed, the hold is discounted, and the
* removal is blocked until the move completes.
*/
refcount = zfs_refcount_count(&odn->dn_holds);
ASSERT(refcount >= 0);
dbufs = DN_DBUFS_COUNT(odn);
/* We can't have more dbufs than dnode holds. */
ASSERT3U(dbufs, <=, refcount);
DTRACE_PROBE3(dnode__move, dnode_t *, odn, int64_t, refcount,
uint32_t, dbufs);
if (refcount > dbufs) {
rw_exit(&odn->dn_struct_rwlock);
zrl_exit(&odn->dn_handle->dnh_zrlock);
mutex_exit(&os->os_lock);
DNODE_STAT_BUMP(dnode_move_active);
return (KMEM_CBRC_LATER);
}
rw_exit(&odn->dn_struct_rwlock);
/*
* At this point we know that anyone with a hold on the dnode is not
* actively referencing it. The dnode is known and in a valid state to
* move. We're holding the locks needed to execute the critical section.
*/
dnode_move_impl(odn, ndn);
list_link_replace(&odn->dn_link, &ndn->dn_link);
/* If the dnode was safe to move, the refcount cannot have changed. */
ASSERT(refcount == zfs_refcount_count(&ndn->dn_holds));
ASSERT(dbufs == DN_DBUFS_COUNT(ndn));
zrl_exit(&ndn->dn_handle->dnh_zrlock); /* handle has moved */
mutex_exit(&os->os_lock);
return (KMEM_CBRC_YES);
}
#endif /* _KERNEL */
static void
dnode_slots_hold(dnode_children_t *children, int idx, int slots)
{
ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
for (int i = idx; i < idx + slots; i++) {
dnode_handle_t *dnh = &children->dnc_children[i];
zrl_add(&dnh->dnh_zrlock);
}
}
static void
dnode_slots_rele(dnode_children_t *children, int idx, int slots)
{
ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
for (int i = idx; i < idx + slots; i++) {
dnode_handle_t *dnh = &children->dnc_children[i];
if (zrl_is_locked(&dnh->dnh_zrlock))
zrl_exit(&dnh->dnh_zrlock);
else
zrl_remove(&dnh->dnh_zrlock);
}
}
static int
dnode_slots_tryenter(dnode_children_t *children, int idx, int slots)
{
ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
for (int i = idx; i < idx + slots; i++) {
dnode_handle_t *dnh = &children->dnc_children[i];
if (!zrl_tryenter(&dnh->dnh_zrlock)) {
for (int j = idx; j < i; j++) {
dnh = &children->dnc_children[j];
zrl_exit(&dnh->dnh_zrlock);
}
return (0);
}
}
return (1);
}
static void
dnode_set_slots(dnode_children_t *children, int idx, int slots, void *ptr)
{
ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
for (int i = idx; i < idx + slots; i++) {
dnode_handle_t *dnh = &children->dnc_children[i];
dnh->dnh_dnode = ptr;
}
}
static boolean_t
dnode_check_slots_free(dnode_children_t *children, int idx, int slots)
{
ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
/*
* If all dnode slots are either already free or
* evictable return B_TRUE.
*/
for (int i = idx; i < idx + slots; i++) {
dnode_handle_t *dnh = &children->dnc_children[i];
dnode_t *dn = dnh->dnh_dnode;
if (dn == DN_SLOT_FREE) {
continue;
} else if (DN_SLOT_IS_PTR(dn)) {
mutex_enter(&dn->dn_mtx);
boolean_t can_free = (dn->dn_type == DMU_OT_NONE &&
zfs_refcount_is_zero(&dn->dn_holds) &&
!DNODE_IS_DIRTY(dn));
mutex_exit(&dn->dn_mtx);
if (!can_free)
return (B_FALSE);
else
continue;
} else {
return (B_FALSE);
}
}
return (B_TRUE);
}
static void
dnode_reclaim_slots(dnode_children_t *children, int idx, int slots)
{
ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
for (int i = idx; i < idx + slots; i++) {
dnode_handle_t *dnh = &children->dnc_children[i];
ASSERT(zrl_is_locked(&dnh->dnh_zrlock));
if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
ASSERT3S(dnh->dnh_dnode->dn_type, ==, DMU_OT_NONE);
dnode_destroy(dnh->dnh_dnode);
dnh->dnh_dnode = DN_SLOT_FREE;
}
}
}
void
dnode_free_interior_slots(dnode_t *dn)
{
dnode_children_t *children = dmu_buf_get_user(&dn->dn_dbuf->db);
int epb = dn->dn_dbuf->db.db_size >> DNODE_SHIFT;
int idx = (dn->dn_object & (epb - 1)) + 1;
int slots = dn->dn_num_slots - 1;
if (slots == 0)
return;
ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
while (!dnode_slots_tryenter(children, idx, slots)) {
DNODE_STAT_BUMP(dnode_free_interior_lock_retry);
cond_resched();
}
dnode_set_slots(children, idx, slots, DN_SLOT_FREE);
dnode_slots_rele(children, idx, slots);
}
void
dnode_special_close(dnode_handle_t *dnh)
{
dnode_t *dn = dnh->dnh_dnode;
/*
* Ensure dnode_rele_and_unlock() has released dn_mtx, after final
* zfs_refcount_remove()
*/
mutex_enter(&dn->dn_mtx);
if (zfs_refcount_count(&dn->dn_holds) > 0)
cv_wait(&dn->dn_nodnholds, &dn->dn_mtx);
mutex_exit(&dn->dn_mtx);
ASSERT3U(zfs_refcount_count(&dn->dn_holds), ==, 0);
ASSERT(dn->dn_dbuf == NULL ||
dmu_buf_get_user(&dn->dn_dbuf->db) == NULL);
zrl_add(&dnh->dnh_zrlock);
dnode_destroy(dn); /* implicit zrl_remove() */
zrl_destroy(&dnh->dnh_zrlock);
dnh->dnh_dnode = NULL;
}
void
dnode_special_open(objset_t *os, dnode_phys_t *dnp, uint64_t object,
dnode_handle_t *dnh)
{
dnode_t *dn;
zrl_init(&dnh->dnh_zrlock);
VERIFY3U(1, ==, zrl_tryenter(&dnh->dnh_zrlock));
dn = dnode_create(os, dnp, NULL, object, dnh);
DNODE_VERIFY(dn);
zrl_exit(&dnh->dnh_zrlock);
}
static void
dnode_buf_evict_async(void *dbu)
{
dnode_children_t *dnc = dbu;
DNODE_STAT_BUMP(dnode_buf_evict);
for (int i = 0; i < dnc->dnc_count; i++) {
dnode_handle_t *dnh = &dnc->dnc_children[i];
dnode_t *dn;
/*
* The dnode handle lock guards against the dnode moving to
* another valid address, so there is no need here to guard
* against changes to or from NULL.
*/
if (!DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
zrl_destroy(&dnh->dnh_zrlock);
dnh->dnh_dnode = DN_SLOT_UNINIT;
continue;
}
zrl_add(&dnh->dnh_zrlock);
dn = dnh->dnh_dnode;
/*
* If there are holds on this dnode, then there should
* be holds on the dnode's containing dbuf as well; thus
* it wouldn't be eligible for eviction and this function
* would not have been called.
*/
ASSERT(zfs_refcount_is_zero(&dn->dn_holds));
ASSERT(zfs_refcount_is_zero(&dn->dn_tx_holds));
dnode_destroy(dn); /* implicit zrl_remove() for first slot */
zrl_destroy(&dnh->dnh_zrlock);
dnh->dnh_dnode = DN_SLOT_UNINIT;
}
kmem_free(dnc, sizeof (dnode_children_t) +
dnc->dnc_count * sizeof (dnode_handle_t));
}
/*
* When the DNODE_MUST_BE_FREE flag is set, the "slots" parameter is used
* to ensure the hole at the specified object offset is large enough to
* hold the dnode being created. The slots parameter is also used to ensure
* a dnode does not span multiple dnode blocks. In both of these cases, if
* a failure occurs, ENOSPC is returned. Keep in mind, these failure cases
* are only possible when using DNODE_MUST_BE_FREE.
*
* If the DNODE_MUST_BE_ALLOCATED flag is set, "slots" must be 0.
* dnode_hold_impl() will check if the requested dnode is already consumed
* as an extra dnode slot by an large dnode, in which case it returns
* ENOENT.
*
* If the DNODE_DRY_RUN flag is set, we don't actually hold the dnode, just
* return whether the hold would succeed or not. tag and dnp should set to
* NULL in this case.
*
* errors:
* EINVAL - Invalid object number or flags.
* ENOSPC - Hole too small to fulfill "slots" request (DNODE_MUST_BE_FREE)
* EEXIST - Refers to an allocated dnode (DNODE_MUST_BE_FREE)
* - Refers to a freeing dnode (DNODE_MUST_BE_FREE)
* - Refers to an interior dnode slot (DNODE_MUST_BE_ALLOCATED)
* ENOENT - The requested dnode is not allocated (DNODE_MUST_BE_ALLOCATED)
* - The requested dnode is being freed (DNODE_MUST_BE_ALLOCATED)
* EIO - I/O error when reading the meta dnode dbuf.
*
* succeeds even for free dnodes.
*/
int
dnode_hold_impl(objset_t *os, uint64_t object, int flag, int slots,
void *tag, dnode_t **dnp)
{
int epb, idx, err;
int drop_struct_lock = FALSE;
int type;
uint64_t blk;
dnode_t *mdn, *dn;
dmu_buf_impl_t *db;
dnode_children_t *dnc;
dnode_phys_t *dn_block;
dnode_handle_t *dnh;
ASSERT(!(flag & DNODE_MUST_BE_ALLOCATED) || (slots == 0));
ASSERT(!(flag & DNODE_MUST_BE_FREE) || (slots > 0));
IMPLY(flag & DNODE_DRY_RUN, (tag == NULL) && (dnp == NULL));
/*
* If you are holding the spa config lock as writer, you shouldn't
* be asking the DMU to do *anything* unless it's the root pool
* which may require us to read from the root filesystem while
* holding some (not all) of the locks as writer.
*/
ASSERT(spa_config_held(os->os_spa, SCL_ALL, RW_WRITER) == 0 ||
(spa_is_root(os->os_spa) &&
spa_config_held(os->os_spa, SCL_STATE, RW_WRITER)));
ASSERT((flag & DNODE_MUST_BE_ALLOCATED) || (flag & DNODE_MUST_BE_FREE));
if (object == DMU_USERUSED_OBJECT || object == DMU_GROUPUSED_OBJECT ||
object == DMU_PROJECTUSED_OBJECT) {
if (object == DMU_USERUSED_OBJECT)
dn = DMU_USERUSED_DNODE(os);
else if (object == DMU_GROUPUSED_OBJECT)
dn = DMU_GROUPUSED_DNODE(os);
else
dn = DMU_PROJECTUSED_DNODE(os);
if (dn == NULL)
return (SET_ERROR(ENOENT));
type = dn->dn_type;
if ((flag & DNODE_MUST_BE_ALLOCATED) && type == DMU_OT_NONE)
return (SET_ERROR(ENOENT));
if ((flag & DNODE_MUST_BE_FREE) && type != DMU_OT_NONE)
return (SET_ERROR(EEXIST));
DNODE_VERIFY(dn);
/* Don't actually hold if dry run, just return 0 */
if (!(flag & DNODE_DRY_RUN)) {
(void) zfs_refcount_add(&dn->dn_holds, tag);
*dnp = dn;
}
return (0);
}
if (object == 0 || object >= DN_MAX_OBJECT)
return (SET_ERROR(EINVAL));
mdn = DMU_META_DNODE(os);
ASSERT(mdn->dn_object == DMU_META_DNODE_OBJECT);
DNODE_VERIFY(mdn);
if (!RW_WRITE_HELD(&mdn->dn_struct_rwlock)) {
rw_enter(&mdn->dn_struct_rwlock, RW_READER);
drop_struct_lock = TRUE;
}
blk = dbuf_whichblock(mdn, 0, object * sizeof (dnode_phys_t));
db = dbuf_hold(mdn, blk, FTAG);
if (drop_struct_lock)
rw_exit(&mdn->dn_struct_rwlock);
if (db == NULL) {
DNODE_STAT_BUMP(dnode_hold_dbuf_hold);
return (SET_ERROR(EIO));
}
/*
* We do not need to decrypt to read the dnode so it doesn't matter
* if we get the encrypted or decrypted version.
*/
err = dbuf_read(db, NULL, DB_RF_CANFAIL |
DB_RF_NO_DECRYPT | DB_RF_NOPREFETCH);
if (err) {
DNODE_STAT_BUMP(dnode_hold_dbuf_read);
dbuf_rele(db, FTAG);
return (err);
}
ASSERT3U(db->db.db_size, >=, 1<<DNODE_SHIFT);
epb = db->db.db_size >> DNODE_SHIFT;
idx = object & (epb - 1);
dn_block = (dnode_phys_t *)db->db.db_data;
ASSERT(DB_DNODE(db)->dn_type == DMU_OT_DNODE);
dnc = dmu_buf_get_user(&db->db);
dnh = NULL;
if (dnc == NULL) {
dnode_children_t *winner;
int skip = 0;
dnc = kmem_zalloc(sizeof (dnode_children_t) +
epb * sizeof (dnode_handle_t), KM_SLEEP);
dnc->dnc_count = epb;
dnh = &dnc->dnc_children[0];
/* Initialize dnode slot status from dnode_phys_t */
for (int i = 0; i < epb; i++) {
zrl_init(&dnh[i].dnh_zrlock);
if (skip) {
skip--;
continue;
}
if (dn_block[i].dn_type != DMU_OT_NONE) {
int interior = dn_block[i].dn_extra_slots;
dnode_set_slots(dnc, i, 1, DN_SLOT_ALLOCATED);
dnode_set_slots(dnc, i + 1, interior,
DN_SLOT_INTERIOR);
skip = interior;
} else {
dnh[i].dnh_dnode = DN_SLOT_FREE;
skip = 0;
}
}
dmu_buf_init_user(&dnc->dnc_dbu, NULL,
dnode_buf_evict_async, NULL);
winner = dmu_buf_set_user(&db->db, &dnc->dnc_dbu);
if (winner != NULL) {
for (int i = 0; i < epb; i++)
zrl_destroy(&dnh[i].dnh_zrlock);
kmem_free(dnc, sizeof (dnode_children_t) +
epb * sizeof (dnode_handle_t));
dnc = winner;
}
}
ASSERT(dnc->dnc_count == epb);
if (flag & DNODE_MUST_BE_ALLOCATED) {
slots = 1;
dnode_slots_hold(dnc, idx, slots);
dnh = &dnc->dnc_children[idx];
if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
dn = dnh->dnh_dnode;
} else if (dnh->dnh_dnode == DN_SLOT_INTERIOR) {
DNODE_STAT_BUMP(dnode_hold_alloc_interior);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (SET_ERROR(EEXIST));
} else if (dnh->dnh_dnode != DN_SLOT_ALLOCATED) {
DNODE_STAT_BUMP(dnode_hold_alloc_misses);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (SET_ERROR(ENOENT));
} else {
dnode_slots_rele(dnc, idx, slots);
while (!dnode_slots_tryenter(dnc, idx, slots)) {
DNODE_STAT_BUMP(dnode_hold_alloc_lock_retry);
cond_resched();
}
/*
* Someone else won the race and called dnode_create()
* after we checked DN_SLOT_IS_PTR() above but before
* we acquired the lock.
*/
if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
DNODE_STAT_BUMP(dnode_hold_alloc_lock_misses);
dn = dnh->dnh_dnode;
} else {
dn = dnode_create(os, dn_block + idx, db,
object, dnh);
}
}
mutex_enter(&dn->dn_mtx);
if (dn->dn_type == DMU_OT_NONE || dn->dn_free_txg != 0) {
DNODE_STAT_BUMP(dnode_hold_alloc_type_none);
mutex_exit(&dn->dn_mtx);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (SET_ERROR(ENOENT));
}
/* Don't actually hold if dry run, just return 0 */
if (flag & DNODE_DRY_RUN) {
mutex_exit(&dn->dn_mtx);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (0);
}
DNODE_STAT_BUMP(dnode_hold_alloc_hits);
} else if (flag & DNODE_MUST_BE_FREE) {
if (idx + slots - 1 >= DNODES_PER_BLOCK) {
DNODE_STAT_BUMP(dnode_hold_free_overflow);
dbuf_rele(db, FTAG);
return (SET_ERROR(ENOSPC));
}
dnode_slots_hold(dnc, idx, slots);
if (!dnode_check_slots_free(dnc, idx, slots)) {
DNODE_STAT_BUMP(dnode_hold_free_misses);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (SET_ERROR(ENOSPC));
}
dnode_slots_rele(dnc, idx, slots);
while (!dnode_slots_tryenter(dnc, idx, slots)) {
DNODE_STAT_BUMP(dnode_hold_free_lock_retry);
cond_resched();
}
if (!dnode_check_slots_free(dnc, idx, slots)) {
DNODE_STAT_BUMP(dnode_hold_free_lock_misses);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (SET_ERROR(ENOSPC));
}
/*
* Allocated but otherwise free dnodes which would
* be in the interior of a multi-slot dnodes need
* to be freed. Single slot dnodes can be safely
* re-purposed as a performance optimization.
*/
if (slots > 1)
dnode_reclaim_slots(dnc, idx + 1, slots - 1);
dnh = &dnc->dnc_children[idx];
if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
dn = dnh->dnh_dnode;
} else {
dn = dnode_create(os, dn_block + idx, db,
object, dnh);
}
mutex_enter(&dn->dn_mtx);
if (!zfs_refcount_is_zero(&dn->dn_holds) || dn->dn_free_txg) {
DNODE_STAT_BUMP(dnode_hold_free_refcount);
mutex_exit(&dn->dn_mtx);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (SET_ERROR(EEXIST));
}
/* Don't actually hold if dry run, just return 0 */
if (flag & DNODE_DRY_RUN) {
mutex_exit(&dn->dn_mtx);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (0);
}
dnode_set_slots(dnc, idx + 1, slots - 1, DN_SLOT_INTERIOR);
DNODE_STAT_BUMP(dnode_hold_free_hits);
} else {
dbuf_rele(db, FTAG);
return (SET_ERROR(EINVAL));
}
ASSERT0(dn->dn_free_txg);
if (zfs_refcount_add(&dn->dn_holds, tag) == 1)
dbuf_add_ref(db, dnh);
mutex_exit(&dn->dn_mtx);
/* Now we can rely on the hold to prevent the dnode from moving. */
dnode_slots_rele(dnc, idx, slots);
DNODE_VERIFY(dn);
ASSERT3P(dnp, !=, NULL);
ASSERT3P(dn->dn_dbuf, ==, db);
ASSERT3U(dn->dn_object, ==, object);
dbuf_rele(db, FTAG);
*dnp = dn;
return (0);
}
/*
* Return held dnode if the object is allocated, NULL if not.
*/
int
dnode_hold(objset_t *os, uint64_t object, void *tag, dnode_t **dnp)
{
return (dnode_hold_impl(os, object, DNODE_MUST_BE_ALLOCATED, 0, tag,
dnp));
}
/*
* Can only add a reference if there is already at least one
* reference on the dnode. Returns FALSE if unable to add a
* new reference.
*/
boolean_t
dnode_add_ref(dnode_t *dn, void *tag)
{
mutex_enter(&dn->dn_mtx);
if (zfs_refcount_is_zero(&dn->dn_holds)) {
mutex_exit(&dn->dn_mtx);
return (FALSE);
}
VERIFY(1 < zfs_refcount_add(&dn->dn_holds, tag));
mutex_exit(&dn->dn_mtx);
return (TRUE);
}
void
dnode_rele(dnode_t *dn, void *tag)
{
mutex_enter(&dn->dn_mtx);
dnode_rele_and_unlock(dn, tag, B_FALSE);
}
void
dnode_rele_and_unlock(dnode_t *dn, void *tag, boolean_t evicting)
{
uint64_t refs;
/* Get while the hold prevents the dnode from moving. */
dmu_buf_impl_t *db = dn->dn_dbuf;
dnode_handle_t *dnh = dn->dn_handle;
refs = zfs_refcount_remove(&dn->dn_holds, tag);
if (refs == 0)
cv_broadcast(&dn->dn_nodnholds);
mutex_exit(&dn->dn_mtx);
/* dnode could get destroyed at this point, so don't use it anymore */
/*
* It's unsafe to release the last hold on a dnode by dnode_rele() or
* indirectly by dbuf_rele() while relying on the dnode handle to
* prevent the dnode from moving, since releasing the last hold could
* result in the dnode's parent dbuf evicting its dnode handles. For
* that reason anyone calling dnode_rele() or dbuf_rele() without some
* other direct or indirect hold on the dnode must first drop the dnode
* handle.
*/
ASSERT(refs > 0 || dnh->dnh_zrlock.zr_owner != curthread);
/* NOTE: the DNODE_DNODE does not have a dn_dbuf */
if (refs == 0 && db != NULL) {
/*
* Another thread could add a hold to the dnode handle in
* dnode_hold_impl() while holding the parent dbuf. Since the
* hold on the parent dbuf prevents the handle from being
* destroyed, the hold on the handle is OK. We can't yet assert
* that the handle has zero references, but that will be
* asserted anyway when the handle gets destroyed.
*/
mutex_enter(&db->db_mtx);
dbuf_rele_and_unlock(db, dnh, evicting);
}
}
/*
* Test whether we can create a dnode at the specified location.
*/
int
dnode_try_claim(objset_t *os, uint64_t object, int slots)
{
return (dnode_hold_impl(os, object, DNODE_MUST_BE_FREE | DNODE_DRY_RUN,
slots, NULL, NULL));
}
void
dnode_setdirty(dnode_t *dn, dmu_tx_t *tx)
{
objset_t *os = dn->dn_objset;
uint64_t txg = tx->tx_txg;
if (DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
dsl_dataset_dirty(os->os_dsl_dataset, tx);
return;
}
DNODE_VERIFY(dn);
#ifdef ZFS_DEBUG
mutex_enter(&dn->dn_mtx);
ASSERT(dn->dn_phys->dn_type || dn->dn_allocated_txg);
ASSERT(dn->dn_free_txg == 0 || dn->dn_free_txg >= txg);
mutex_exit(&dn->dn_mtx);
#endif
/*
* Determine old uid/gid when necessary
*/
dmu_objset_userquota_get_ids(dn, B_TRUE, tx);
multilist_t *dirtylist = &os->os_dirty_dnodes[txg & TXG_MASK];
multilist_sublist_t *mls = multilist_sublist_lock_obj(dirtylist, dn);
/*
* If we are already marked dirty, we're done.
*/
if (multilist_link_active(&dn->dn_dirty_link[txg & TXG_MASK])) {
multilist_sublist_unlock(mls);
return;
}
ASSERT(!zfs_refcount_is_zero(&dn->dn_holds) ||
!avl_is_empty(&dn->dn_dbufs));
ASSERT(dn->dn_datablksz != 0);
ASSERT0(dn->dn_next_bonuslen[txg & TXG_MASK]);
ASSERT0(dn->dn_next_blksz[txg & TXG_MASK]);
ASSERT0(dn->dn_next_bonustype[txg & TXG_MASK]);
dprintf_ds(os->os_dsl_dataset, "obj=%llu txg=%llu\n",
(u_longlong_t)dn->dn_object, (u_longlong_t)txg);
multilist_sublist_insert_head(mls, dn);
multilist_sublist_unlock(mls);
/*
* The dnode maintains a hold on its containing dbuf as
* long as there are holds on it. Each instantiated child
* dbuf maintains a hold on the dnode. When the last child
* drops its hold, the dnode will drop its hold on the
* containing dbuf. We add a "dirty hold" here so that the
* dnode will hang around after we finish processing its
* children.
*/
VERIFY(dnode_add_ref(dn, (void *)(uintptr_t)tx->tx_txg));
(void) dbuf_dirty(dn->dn_dbuf, tx);
dsl_dataset_dirty(os->os_dsl_dataset, tx);
}
void
dnode_free(dnode_t *dn, dmu_tx_t *tx)
{
mutex_enter(&dn->dn_mtx);
if (dn->dn_type == DMU_OT_NONE || dn->dn_free_txg) {
mutex_exit(&dn->dn_mtx);
return;
}
dn->dn_free_txg = tx->tx_txg;
mutex_exit(&dn->dn_mtx);
dnode_setdirty(dn, tx);
}
/*
* Try to change the block size for the indicated dnode. This can only
* succeed if there are no blocks allocated or dirty beyond first block
*/
int
dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx)
{
dmu_buf_impl_t *db;
int err;
ASSERT3U(size, <=, spa_maxblocksize(dmu_objset_spa(dn->dn_objset)));
if (size == 0)
size = SPA_MINBLOCKSIZE;
else
size = P2ROUNDUP(size, SPA_MINBLOCKSIZE);
if (ibs == dn->dn_indblkshift)
ibs = 0;
if (size >> SPA_MINBLOCKSHIFT == dn->dn_datablkszsec && ibs == 0)
return (0);
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
/* Check for any allocated blocks beyond the first */
if (dn->dn_maxblkid != 0)
goto fail;
mutex_enter(&dn->dn_dbufs_mtx);
for (db = avl_first(&dn->dn_dbufs); db != NULL;
db = AVL_NEXT(&dn->dn_dbufs, db)) {
if (db->db_blkid != 0 && db->db_blkid != DMU_BONUS_BLKID &&
db->db_blkid != DMU_SPILL_BLKID) {
mutex_exit(&dn->dn_dbufs_mtx);
goto fail;
}
}
mutex_exit(&dn->dn_dbufs_mtx);
if (ibs && dn->dn_nlevels != 1)
goto fail;
/* resize the old block */
err = dbuf_hold_impl(dn, 0, 0, TRUE, FALSE, FTAG, &db);
if (err == 0) {
dbuf_new_size(db, size, tx);
} else if (err != ENOENT) {
goto fail;
}
dnode_setdblksz(dn, size);
dnode_setdirty(dn, tx);
dn->dn_next_blksz[tx->tx_txg&TXG_MASK] = size;
if (ibs) {
dn->dn_indblkshift = ibs;
dn->dn_next_indblkshift[tx->tx_txg&TXG_MASK] = ibs;
}
/* release after we have fixed the blocksize in the dnode */
if (db)
dbuf_rele(db, FTAG);
rw_exit(&dn->dn_struct_rwlock);
return (0);
fail:
rw_exit(&dn->dn_struct_rwlock);
return (SET_ERROR(ENOTSUP));
}
static void
dnode_set_nlevels_impl(dnode_t *dn, int new_nlevels, dmu_tx_t *tx)
{
uint64_t txgoff = tx->tx_txg & TXG_MASK;
int old_nlevels = dn->dn_nlevels;
dmu_buf_impl_t *db;
list_t *list;
dbuf_dirty_record_t *new, *dr, *dr_next;
ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
ASSERT3U(new_nlevels, >, dn->dn_nlevels);
dn->dn_nlevels = new_nlevels;
ASSERT3U(new_nlevels, >, dn->dn_next_nlevels[txgoff]);
dn->dn_next_nlevels[txgoff] = new_nlevels;
/* dirty the left indirects */
db = dbuf_hold_level(dn, old_nlevels, 0, FTAG);
ASSERT(db != NULL);
new = dbuf_dirty(db, tx);
dbuf_rele(db, FTAG);
/* transfer the dirty records to the new indirect */
mutex_enter(&dn->dn_mtx);
mutex_enter(&new->dt.di.dr_mtx);
list = &dn->dn_dirty_records[txgoff];
for (dr = list_head(list); dr; dr = dr_next) {
dr_next = list_next(&dn->dn_dirty_records[txgoff], dr);
IMPLY(dr->dr_dbuf == NULL, old_nlevels == 1);
if (dr->dr_dbuf == NULL ||
(dr->dr_dbuf->db_level == old_nlevels - 1 &&
dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID &&
dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID)) {
list_remove(&dn->dn_dirty_records[txgoff], dr);
list_insert_tail(&new->dt.di.dr_children, dr);
dr->dr_parent = new;
}
}
mutex_exit(&new->dt.di.dr_mtx);
mutex_exit(&dn->dn_mtx);
}
int
dnode_set_nlevels(dnode_t *dn, int nlevels, dmu_tx_t *tx)
{
int ret = 0;
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
if (dn->dn_nlevels == nlevels) {
ret = 0;
goto out;
} else if (nlevels < dn->dn_nlevels) {
ret = SET_ERROR(EINVAL);
goto out;
}
dnode_set_nlevels_impl(dn, nlevels, tx);
out:
rw_exit(&dn->dn_struct_rwlock);
return (ret);
}
/* read-holding callers must not rely on the lock being continuously held */
void
dnode_new_blkid(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx, boolean_t have_read,
boolean_t force)
{
int epbs, new_nlevels;
uint64_t sz;
ASSERT(blkid != DMU_BONUS_BLKID);
ASSERT(have_read ?
RW_READ_HELD(&dn->dn_struct_rwlock) :
RW_WRITE_HELD(&dn->dn_struct_rwlock));
/*
* if we have a read-lock, check to see if we need to do any work
* before upgrading to a write-lock.
*/
if (have_read) {
if (blkid <= dn->dn_maxblkid)
return;
if (!rw_tryupgrade(&dn->dn_struct_rwlock)) {
rw_exit(&dn->dn_struct_rwlock);
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
}
}
/*
* Raw sends (indicated by the force flag) require that we take the
* given blkid even if the value is lower than the current value.
*/
if (!force && blkid <= dn->dn_maxblkid)
goto out;
/*
* We use the (otherwise unused) top bit of dn_next_maxblkid[txgoff]
* to indicate that this field is set. This allows us to set the
* maxblkid to 0 on an existing object in dnode_sync().
*/
dn->dn_maxblkid = blkid;
dn->dn_next_maxblkid[tx->tx_txg & TXG_MASK] =
blkid | DMU_NEXT_MAXBLKID_SET;
/*
* Compute the number of levels necessary to support the new maxblkid.
* Raw sends will ensure nlevels is set correctly for us.
*/
new_nlevels = 1;
epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
for (sz = dn->dn_nblkptr;
sz <= blkid && sz >= dn->dn_nblkptr; sz <<= epbs)
new_nlevels++;
ASSERT3U(new_nlevels, <=, DN_MAX_LEVELS);
if (!force) {
if (new_nlevels > dn->dn_nlevels)
dnode_set_nlevels_impl(dn, new_nlevels, tx);
} else {
ASSERT3U(dn->dn_nlevels, >=, new_nlevels);
}
out:
if (have_read)
rw_downgrade(&dn->dn_struct_rwlock);
}
static void
dnode_dirty_l1(dnode_t *dn, uint64_t l1blkid, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = dbuf_hold_level(dn, 1, l1blkid, FTAG);
if (db != NULL) {
dmu_buf_will_dirty(&db->db, tx);
dbuf_rele(db, FTAG);
}
}
/*
* Dirty all the in-core level-1 dbufs in the range specified by start_blkid
* and end_blkid.
*/
static void
dnode_dirty_l1range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
dmu_tx_t *tx)
{
dmu_buf_impl_t *db_search;
dmu_buf_impl_t *db;
avl_index_t where;
db_search = kmem_zalloc(sizeof (dmu_buf_impl_t), KM_SLEEP);
mutex_enter(&dn->dn_dbufs_mtx);
db_search->db_level = 1;
db_search->db_blkid = start_blkid + 1;
db_search->db_state = DB_SEARCH;
for (;;) {
db = avl_find(&dn->dn_dbufs, db_search, &where);
if (db == NULL)
db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
if (db == NULL || db->db_level != 1 ||
db->db_blkid >= end_blkid) {
break;
}
/*
* Setup the next blkid we want to search for.
*/
db_search->db_blkid = db->db_blkid + 1;
ASSERT3U(db->db_blkid, >=, start_blkid);
/*
* If the dbuf transitions to DB_EVICTING while we're trying
* to dirty it, then we will be unable to discover it in
* the dbuf hash table. This will result in a call to
* dbuf_create() which needs to acquire the dn_dbufs_mtx
* lock. To avoid a deadlock, we drop the lock before
* dirtying the level-1 dbuf.
*/
mutex_exit(&dn->dn_dbufs_mtx);
dnode_dirty_l1(dn, db->db_blkid, tx);
mutex_enter(&dn->dn_dbufs_mtx);
}
#ifdef ZFS_DEBUG
/*
* Walk all the in-core level-1 dbufs and verify they have been dirtied.
*/
db_search->db_level = 1;
db_search->db_blkid = start_blkid + 1;
db_search->db_state = DB_SEARCH;
db = avl_find(&dn->dn_dbufs, db_search, &where);
if (db == NULL)
db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
for (; db != NULL; db = AVL_NEXT(&dn->dn_dbufs, db)) {
if (db->db_level != 1 || db->db_blkid >= end_blkid)
break;
if (db->db_state != DB_EVICTING)
ASSERT(db->db_dirtycnt > 0);
}
#endif
kmem_free(db_search, sizeof (dmu_buf_impl_t));
mutex_exit(&dn->dn_dbufs_mtx);
}
void
dnode_set_dirtyctx(dnode_t *dn, dmu_tx_t *tx, void *tag)
{
/*
* Don't set dirtyctx to SYNC if we're just modifying this as we
* initialize the objset.
*/
if (dn->dn_dirtyctx == DN_UNDIRTIED) {
dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
if (ds != NULL) {
rrw_enter(&ds->ds_bp_rwlock, RW_READER, tag);
}
if (!BP_IS_HOLE(dn->dn_objset->os_rootbp)) {
if (dmu_tx_is_syncing(tx))
dn->dn_dirtyctx = DN_DIRTY_SYNC;
else
dn->dn_dirtyctx = DN_DIRTY_OPEN;
dn->dn_dirtyctx_firstset = tag;
}
if (ds != NULL) {
rrw_exit(&ds->ds_bp_rwlock, tag);
}
}
}
void
dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx)
{
dmu_buf_impl_t *db;
uint64_t blkoff, blkid, nblks;
int blksz, blkshift, head, tail;
int trunc = FALSE;
int epbs;
blksz = dn->dn_datablksz;
blkshift = dn->dn_datablkshift;
epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
if (len == DMU_OBJECT_END) {
len = UINT64_MAX - off;
trunc = TRUE;
}
/*
* First, block align the region to free:
*/
if (ISP2(blksz)) {
head = P2NPHASE(off, blksz);
blkoff = P2PHASE(off, blksz);
if ((off >> blkshift) > dn->dn_maxblkid)
return;
} else {
ASSERT(dn->dn_maxblkid == 0);
if (off == 0 && len >= blksz) {
/*
* Freeing the whole block; fast-track this request.
*/
blkid = 0;
nblks = 1;
if (dn->dn_nlevels > 1) {
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
dnode_dirty_l1(dn, 0, tx);
rw_exit(&dn->dn_struct_rwlock);
}
goto done;
} else if (off >= blksz) {
/* Freeing past end-of-data */
return;
} else {
/* Freeing part of the block. */
head = blksz - off;
ASSERT3U(head, >, 0);
}
blkoff = off;
}
/* zero out any partial block data at the start of the range */
if (head) {
int res;
ASSERT3U(blkoff + head, ==, blksz);
if (len < head)
head = len;
rw_enter(&dn->dn_struct_rwlock, RW_READER);
res = dbuf_hold_impl(dn, 0, dbuf_whichblock(dn, 0, off),
TRUE, FALSE, FTAG, &db);
rw_exit(&dn->dn_struct_rwlock);
if (res == 0) {
caddr_t data;
boolean_t dirty;
db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER,
FTAG);
/* don't dirty if it isn't on disk and isn't dirty */
dirty = !list_is_empty(&db->db_dirty_records) ||
(db->db_blkptr && !BP_IS_HOLE(db->db_blkptr));
dmu_buf_unlock_parent(db, dblt, FTAG);
if (dirty) {
dmu_buf_will_dirty(&db->db, tx);
data = db->db.db_data;
bzero(data + blkoff, head);
}
dbuf_rele(db, FTAG);
}
off += head;
len -= head;
}
/* If the range was less than one block, we're done */
if (len == 0)
return;
/* If the remaining range is past end of file, we're done */
if ((off >> blkshift) > dn->dn_maxblkid)
return;
ASSERT(ISP2(blksz));
if (trunc)
tail = 0;
else
tail = P2PHASE(len, blksz);
ASSERT0(P2PHASE(off, blksz));
/* zero out any partial block data at the end of the range */
if (tail) {
int res;
if (len < tail)
tail = len;
rw_enter(&dn->dn_struct_rwlock, RW_READER);
res = dbuf_hold_impl(dn, 0, dbuf_whichblock(dn, 0, off+len),
TRUE, FALSE, FTAG, &db);
rw_exit(&dn->dn_struct_rwlock);
if (res == 0) {
boolean_t dirty;
/* don't dirty if not on disk and not dirty */
db_lock_type_t type = dmu_buf_lock_parent(db, RW_READER,
FTAG);
dirty = !list_is_empty(&db->db_dirty_records) ||
(db->db_blkptr && !BP_IS_HOLE(db->db_blkptr));
dmu_buf_unlock_parent(db, type, FTAG);
if (dirty) {
dmu_buf_will_dirty(&db->db, tx);
bzero(db->db.db_data, tail);
}
dbuf_rele(db, FTAG);
}
len -= tail;
}
/* If the range did not include a full block, we are done */
if (len == 0)
return;
ASSERT(IS_P2ALIGNED(off, blksz));
ASSERT(trunc || IS_P2ALIGNED(len, blksz));
blkid = off >> blkshift;
nblks = len >> blkshift;
if (trunc)
nblks += 1;
/*
* Dirty all the indirect blocks in this range. Note that only
* the first and last indirect blocks can actually be written
* (if they were partially freed) -- they must be dirtied, even if
* they do not exist on disk yet. The interior blocks will
* be freed by free_children(), so they will not actually be written.
* Even though these interior blocks will not be written, we
* dirty them for two reasons:
*
* - It ensures that the indirect blocks remain in memory until
* syncing context. (They have already been prefetched by
* dmu_tx_hold_free(), so we don't have to worry about reading
* them serially here.)
*
* - The dirty space accounting will put pressure on the txg sync
* mechanism to begin syncing, and to delay transactions if there
* is a large amount of freeing. Even though these indirect
* blocks will not be written, we could need to write the same
* amount of space if we copy the freed BPs into deadlists.
*/
if (dn->dn_nlevels > 1) {
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
uint64_t first, last;
first = blkid >> epbs;
dnode_dirty_l1(dn, first, tx);
if (trunc)
last = dn->dn_maxblkid >> epbs;
else
last = (blkid + nblks - 1) >> epbs;
if (last != first)
dnode_dirty_l1(dn, last, tx);
dnode_dirty_l1range(dn, first, last, tx);
int shift = dn->dn_datablkshift + dn->dn_indblkshift -
SPA_BLKPTRSHIFT;
for (uint64_t i = first + 1; i < last; i++) {
/*
* Set i to the blockid of the next non-hole
* level-1 indirect block at or after i. Note
* that dnode_next_offset() operates in terms of
* level-0-equivalent bytes.
*/
uint64_t ibyte = i << shift;
int err = dnode_next_offset(dn, DNODE_FIND_HAVELOCK,
&ibyte, 2, 1, 0);
i = ibyte >> shift;
if (i >= last)
break;
/*
* Normally we should not see an error, either
* from dnode_next_offset() or dbuf_hold_level()
* (except for ESRCH from dnode_next_offset).
* If there is an i/o error, then when we read
* this block in syncing context, it will use
* ZIO_FLAG_MUSTSUCCEED, and thus hang/panic according
* to the "failmode" property. dnode_next_offset()
* doesn't have a flag to indicate MUSTSUCCEED.
*/
if (err != 0)
break;
dnode_dirty_l1(dn, i, tx);
}
rw_exit(&dn->dn_struct_rwlock);
}
done:
/*
* Add this range to the dnode range list.
* We will finish up this free operation in the syncing phase.
*/
mutex_enter(&dn->dn_mtx);
{
int txgoff = tx->tx_txg & TXG_MASK;
if (dn->dn_free_ranges[txgoff] == NULL) {
dn->dn_free_ranges[txgoff] = range_tree_create(NULL,
RANGE_SEG64, NULL, 0, 0);
}
range_tree_clear(dn->dn_free_ranges[txgoff], blkid, nblks);
range_tree_add(dn->dn_free_ranges[txgoff], blkid, nblks);
}
dprintf_dnode(dn, "blkid=%llu nblks=%llu txg=%llu\n",
(u_longlong_t)blkid, (u_longlong_t)nblks,
(u_longlong_t)tx->tx_txg);
mutex_exit(&dn->dn_mtx);
dbuf_free_range(dn, blkid, blkid + nblks - 1, tx);
dnode_setdirty(dn, tx);
}
static boolean_t
dnode_spill_freed(dnode_t *dn)
{
int i;
mutex_enter(&dn->dn_mtx);
for (i = 0; i < TXG_SIZE; i++) {
if (dn->dn_rm_spillblk[i] == DN_KILL_SPILLBLK)
break;
}
mutex_exit(&dn->dn_mtx);
return (i < TXG_SIZE);
}
/* return TRUE if this blkid was freed in a recent txg, or FALSE if it wasn't */
uint64_t
dnode_block_freed(dnode_t *dn, uint64_t blkid)
{
void *dp = spa_get_dsl(dn->dn_objset->os_spa);
int i;
if (blkid == DMU_BONUS_BLKID)
return (FALSE);
/*
* If we're in the process of opening the pool, dp will not be
* set yet, but there shouldn't be anything dirty.
*/
if (dp == NULL)
return (FALSE);
if (dn->dn_free_txg)
return (TRUE);
if (blkid == DMU_SPILL_BLKID)
return (dnode_spill_freed(dn));
mutex_enter(&dn->dn_mtx);
for (i = 0; i < TXG_SIZE; i++) {
if (dn->dn_free_ranges[i] != NULL &&
range_tree_contains(dn->dn_free_ranges[i], blkid, 1))
break;
}
mutex_exit(&dn->dn_mtx);
return (i < TXG_SIZE);
}
/* call from syncing context when we actually write/free space for this dnode */
void
dnode_diduse_space(dnode_t *dn, int64_t delta)
{
uint64_t space;
dprintf_dnode(dn, "dn=%p dnp=%p used=%llu delta=%lld\n",
dn, dn->dn_phys,
(u_longlong_t)dn->dn_phys->dn_used,
(longlong_t)delta);
mutex_enter(&dn->dn_mtx);
space = DN_USED_BYTES(dn->dn_phys);
if (delta > 0) {
ASSERT3U(space + delta, >=, space); /* no overflow */
} else {
ASSERT3U(space, >=, -delta); /* no underflow */
}
space += delta;
if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_DNODE_BYTES) {
ASSERT((dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES) == 0);
ASSERT0(P2PHASE(space, 1<<DEV_BSHIFT));
dn->dn_phys->dn_used = space >> DEV_BSHIFT;
} else {
dn->dn_phys->dn_used = space;
dn->dn_phys->dn_flags |= DNODE_FLAG_USED_BYTES;
}
mutex_exit(&dn->dn_mtx);
}
/*
* Scans a block at the indicated "level" looking for a hole or data,
* depending on 'flags'.
*
* If level > 0, then we are scanning an indirect block looking at its
* pointers. If level == 0, then we are looking at a block of dnodes.
*
* If we don't find what we are looking for in the block, we return ESRCH.
* Otherwise, return with *offset pointing to the beginning (if searching
* forwards) or end (if searching backwards) of the range covered by the
* block pointer we matched on (or dnode).
*
* The basic search algorithm used below by dnode_next_offset() is to
* use this function to search up the block tree (widen the search) until
* we find something (i.e., we don't return ESRCH) and then search back
* down the tree (narrow the search) until we reach our original search
* level.
*/
static int
dnode_next_offset_level(dnode_t *dn, int flags, uint64_t *offset,
int lvl, uint64_t blkfill, uint64_t txg)
{
dmu_buf_impl_t *db = NULL;
void *data = NULL;
uint64_t epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
uint64_t epb = 1ULL << epbs;
uint64_t minfill, maxfill;
boolean_t hole;
int i, inc, error, span;
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
hole = ((flags & DNODE_FIND_HOLE) != 0);
inc = (flags & DNODE_FIND_BACKWARDS) ? -1 : 1;
ASSERT(txg == 0 || !hole);
if (lvl == dn->dn_phys->dn_nlevels) {
error = 0;
epb = dn->dn_phys->dn_nblkptr;
data = dn->dn_phys->dn_blkptr;
} else {
uint64_t blkid = dbuf_whichblock(dn, lvl, *offset);
error = dbuf_hold_impl(dn, lvl, blkid, TRUE, FALSE, FTAG, &db);
if (error) {
if (error != ENOENT)
return (error);
if (hole)
return (0);
/*
* This can only happen when we are searching up
* the block tree for data. We don't really need to
* adjust the offset, as we will just end up looking
* at the pointer to this block in its parent, and its
* going to be unallocated, so we will skip over it.
*/
return (SET_ERROR(ESRCH));
}
error = dbuf_read(db, NULL,
DB_RF_CANFAIL | DB_RF_HAVESTRUCT |
DB_RF_NO_DECRYPT | DB_RF_NOPREFETCH);
if (error) {
dbuf_rele(db, FTAG);
return (error);
}
data = db->db.db_data;
rw_enter(&db->db_rwlock, RW_READER);
}
if (db != NULL && txg != 0 && (db->db_blkptr == NULL ||
db->db_blkptr->blk_birth <= txg ||
BP_IS_HOLE(db->db_blkptr))) {
/*
* This can only happen when we are searching up the tree
* and these conditions mean that we need to keep climbing.
*/
error = SET_ERROR(ESRCH);
} else if (lvl == 0) {
dnode_phys_t *dnp = data;
ASSERT(dn->dn_type == DMU_OT_DNODE);
ASSERT(!(flags & DNODE_FIND_BACKWARDS));
for (i = (*offset >> DNODE_SHIFT) & (blkfill - 1);
i < blkfill; i += dnp[i].dn_extra_slots + 1) {
if ((dnp[i].dn_type == DMU_OT_NONE) == hole)
break;
}
if (i == blkfill)
error = SET_ERROR(ESRCH);
*offset = (*offset & ~(DNODE_BLOCK_SIZE - 1)) +
(i << DNODE_SHIFT);
} else {
blkptr_t *bp = data;
uint64_t start = *offset;
span = (lvl - 1) * epbs + dn->dn_datablkshift;
minfill = 0;
maxfill = blkfill << ((lvl - 1) * epbs);
if (hole)
maxfill--;
else
minfill++;
if (span >= 8 * sizeof (*offset)) {
/* This only happens on the highest indirection level */
ASSERT3U((lvl - 1), ==, dn->dn_phys->dn_nlevels - 1);
*offset = 0;
} else {
*offset = *offset >> span;
}
for (i = BF64_GET(*offset, 0, epbs);
i >= 0 && i < epb; i += inc) {
if (BP_GET_FILL(&bp[i]) >= minfill &&
BP_GET_FILL(&bp[i]) <= maxfill &&
(hole || bp[i].blk_birth > txg))
break;
if (inc > 0 || *offset > 0)
*offset += inc;
}
if (span >= 8 * sizeof (*offset)) {
*offset = start;
} else {
*offset = *offset << span;
}
if (inc < 0) {
/* traversing backwards; position offset at the end */
ASSERT3U(*offset, <=, start);
*offset = MIN(*offset + (1ULL << span) - 1, start);
} else if (*offset < start) {
*offset = start;
}
if (i < 0 || i >= epb)
error = SET_ERROR(ESRCH);
}
if (db != NULL) {
rw_exit(&db->db_rwlock);
dbuf_rele(db, FTAG);
}
return (error);
}
/*
* Find the next hole, data, or sparse region at or after *offset.
* The value 'blkfill' tells us how many items we expect to find
* in an L0 data block; this value is 1 for normal objects,
* DNODES_PER_BLOCK for the meta dnode, and some fraction of
* DNODES_PER_BLOCK when searching for sparse regions thereof.
*
* Examples:
*
* dnode_next_offset(dn, flags, offset, 1, 1, 0);
* Finds the next/previous hole/data in a file.
* Used in dmu_offset_next().
*
* dnode_next_offset(mdn, flags, offset, 0, DNODES_PER_BLOCK, txg);
* Finds the next free/allocated dnode an objset's meta-dnode.
* Only finds objects that have new contents since txg (ie.
* bonus buffer changes and content removal are ignored).
* Used in dmu_object_next().
*
* dnode_next_offset(mdn, DNODE_FIND_HOLE, offset, 2, DNODES_PER_BLOCK >> 2, 0);
* Finds the next L2 meta-dnode bp that's at most 1/4 full.
* Used in dmu_object_alloc().
*/
int
dnode_next_offset(dnode_t *dn, int flags, uint64_t *offset,
int minlvl, uint64_t blkfill, uint64_t txg)
{
uint64_t initial_offset = *offset;
int lvl, maxlvl;
int error = 0;
if (!(flags & DNODE_FIND_HAVELOCK))
rw_enter(&dn->dn_struct_rwlock, RW_READER);
if (dn->dn_phys->dn_nlevels == 0) {
error = SET_ERROR(ESRCH);
goto out;
}
if (dn->dn_datablkshift == 0) {
if (*offset < dn->dn_datablksz) {
if (flags & DNODE_FIND_HOLE)
*offset = dn->dn_datablksz;
} else {
error = SET_ERROR(ESRCH);
}
goto out;
}
maxlvl = dn->dn_phys->dn_nlevels;
for (lvl = minlvl; lvl <= maxlvl; lvl++) {
error = dnode_next_offset_level(dn,
flags, offset, lvl, blkfill, txg);
if (error != ESRCH)
break;
}
while (error == 0 && --lvl >= minlvl) {
error = dnode_next_offset_level(dn,
flags, offset, lvl, blkfill, txg);
}
/*
* There's always a "virtual hole" at the end of the object, even
* if all BP's which physically exist are non-holes.
*/
if ((flags & DNODE_FIND_HOLE) && error == ESRCH && txg == 0 &&
minlvl == 1 && blkfill == 1 && !(flags & DNODE_FIND_BACKWARDS)) {
error = 0;
}
if (error == 0 && (flags & DNODE_FIND_BACKWARDS ?
initial_offset < *offset : initial_offset > *offset))
error = SET_ERROR(ESRCH);
out:
if (!(flags & DNODE_FIND_HAVELOCK))
rw_exit(&dn->dn_struct_rwlock);
return (error);
}
#if defined(_KERNEL)
EXPORT_SYMBOL(dnode_hold);
EXPORT_SYMBOL(dnode_rele);
EXPORT_SYMBOL(dnode_set_nlevels);
EXPORT_SYMBOL(dnode_set_blksz);
EXPORT_SYMBOL(dnode_free_range);
EXPORT_SYMBOL(dnode_evict_dbufs);
EXPORT_SYMBOL(dnode_evict_bonus);
#endif
diff --git a/sys/contrib/openzfs/module/zfs/dsl_prop.c b/sys/contrib/openzfs/module/zfs/dsl_prop.c
index f6ff9ae47192..0787fcdad9b4 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_prop.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_prop.c
@@ -1,1287 +1,1287 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 by Delphix. All rights reserved.
* Copyright (c) 2013 Martin Matuska. All rights reserved.
* Copyright 2019 Joyent, Inc.
*/
#include <sys/zfs_context.h>
#include <sys/dmu.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_tx.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_synctask.h>
#include <sys/spa.h>
#include <sys/zap.h>
#include <sys/fs/zfs.h>
#include "zfs_prop.h"
#define ZPROP_INHERIT_SUFFIX "$inherit"
#define ZPROP_RECVD_SUFFIX "$recvd"
static int
dodefault(zfs_prop_t prop, int intsz, int numints, void *buf)
{
/*
* The setonce properties are read-only, BUT they still
* have a default value that can be used as the initial
* value.
*/
if (prop == ZPROP_INVAL ||
(zfs_prop_readonly(prop) && !zfs_prop_setonce(prop)))
return (SET_ERROR(ENOENT));
if (zfs_prop_get_type(prop) == PROP_TYPE_STRING) {
if (intsz != 1)
return (SET_ERROR(EOVERFLOW));
(void) strncpy(buf, zfs_prop_default_string(prop),
numints);
} else {
if (intsz != 8 || numints < 1)
return (SET_ERROR(EOVERFLOW));
*(uint64_t *)buf = zfs_prop_default_numeric(prop);
}
return (0);
}
int
dsl_prop_get_dd(dsl_dir_t *dd, const char *propname,
int intsz, int numints, void *buf, char *setpoint, boolean_t snapshot)
{
int err;
dsl_dir_t *target = dd;
objset_t *mos = dd->dd_pool->dp_meta_objset;
zfs_prop_t prop;
boolean_t inheritable;
boolean_t inheriting = B_FALSE;
char *inheritstr;
char *recvdstr;
ASSERT(dsl_pool_config_held(dd->dd_pool));
if (setpoint)
setpoint[0] = '\0';
prop = zfs_name_to_prop(propname);
inheritable = (prop == ZPROP_INVAL || zfs_prop_inheritable(prop));
inheritstr = kmem_asprintf("%s%s", propname, ZPROP_INHERIT_SUFFIX);
recvdstr = kmem_asprintf("%s%s", propname, ZPROP_RECVD_SUFFIX);
/*
* Note: dd may become NULL, therefore we shouldn't dereference it
* after this loop.
*/
for (; dd != NULL; dd = dd->dd_parent) {
if (dd != target || snapshot) {
if (!inheritable) {
err = SET_ERROR(ENOENT);
break;
}
inheriting = B_TRUE;
}
/* Check for a local value. */
err = zap_lookup(mos, dsl_dir_phys(dd)->dd_props_zapobj,
propname, intsz, numints, buf);
if (err != ENOENT) {
if (setpoint != NULL && err == 0)
dsl_dir_name(dd, setpoint);
break;
}
/*
* Skip the check for a received value if there is an explicit
* inheritance entry.
*/
err = zap_contains(mos, dsl_dir_phys(dd)->dd_props_zapobj,
inheritstr);
if (err != 0 && err != ENOENT)
break;
if (err == ENOENT) {
/* Check for a received value. */
err = zap_lookup(mos, dsl_dir_phys(dd)->dd_props_zapobj,
recvdstr, intsz, numints, buf);
if (err != ENOENT) {
if (setpoint != NULL && err == 0) {
if (inheriting) {
dsl_dir_name(dd, setpoint);
} else {
(void) strlcpy(setpoint,
ZPROP_SOURCE_VAL_RECVD,
MAXNAMELEN);
}
}
break;
}
}
/*
* If we found an explicit inheritance entry, err is zero even
* though we haven't yet found the value, so reinitializing err
* at the end of the loop (instead of at the beginning) ensures
* that err has a valid post-loop value.
*/
err = SET_ERROR(ENOENT);
}
if (err == ENOENT)
err = dodefault(prop, intsz, numints, buf);
kmem_strfree(inheritstr);
kmem_strfree(recvdstr);
return (err);
}
int
dsl_prop_get_ds(dsl_dataset_t *ds, const char *propname,
int intsz, int numints, void *buf, char *setpoint)
{
zfs_prop_t prop = zfs_name_to_prop(propname);
boolean_t inheritable;
uint64_t zapobj;
ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
inheritable = (prop == ZPROP_INVAL || zfs_prop_inheritable(prop));
zapobj = dsl_dataset_phys(ds)->ds_props_obj;
if (zapobj != 0) {
objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
int err;
ASSERT(ds->ds_is_snapshot);
/* Check for a local value. */
err = zap_lookup(mos, zapobj, propname, intsz, numints, buf);
if (err != ENOENT) {
if (setpoint != NULL && err == 0)
dsl_dataset_name(ds, setpoint);
return (err);
}
/*
* Skip the check for a received value if there is an explicit
* inheritance entry.
*/
if (inheritable) {
char *inheritstr = kmem_asprintf("%s%s", propname,
ZPROP_INHERIT_SUFFIX);
err = zap_contains(mos, zapobj, inheritstr);
kmem_strfree(inheritstr);
if (err != 0 && err != ENOENT)
return (err);
}
if (err == ENOENT) {
/* Check for a received value. */
char *recvdstr = kmem_asprintf("%s%s", propname,
ZPROP_RECVD_SUFFIX);
err = zap_lookup(mos, zapobj, recvdstr,
intsz, numints, buf);
kmem_strfree(recvdstr);
if (err != ENOENT) {
if (setpoint != NULL && err == 0)
(void) strlcpy(setpoint,
ZPROP_SOURCE_VAL_RECVD,
MAXNAMELEN);
return (err);
}
}
}
return (dsl_prop_get_dd(ds->ds_dir, propname,
intsz, numints, buf, setpoint, ds->ds_is_snapshot));
}
static dsl_prop_record_t *
dsl_prop_record_find(dsl_dir_t *dd, const char *propname)
{
dsl_prop_record_t *pr = NULL;
ASSERT(MUTEX_HELD(&dd->dd_lock));
for (pr = list_head(&dd->dd_props);
pr != NULL; pr = list_next(&dd->dd_props, pr)) {
if (strcmp(pr->pr_propname, propname) == 0)
break;
}
return (pr);
}
static dsl_prop_record_t *
dsl_prop_record_create(dsl_dir_t *dd, const char *propname)
{
dsl_prop_record_t *pr;
ASSERT(MUTEX_HELD(&dd->dd_lock));
pr = kmem_alloc(sizeof (dsl_prop_record_t), KM_SLEEP);
pr->pr_propname = spa_strdup(propname);
list_create(&pr->pr_cbs, sizeof (dsl_prop_cb_record_t),
offsetof(dsl_prop_cb_record_t, cbr_pr_node));
list_insert_head(&dd->dd_props, pr);
return (pr);
}
void
dsl_prop_init(dsl_dir_t *dd)
{
list_create(&dd->dd_props, sizeof (dsl_prop_record_t),
offsetof(dsl_prop_record_t, pr_node));
}
void
dsl_prop_fini(dsl_dir_t *dd)
{
dsl_prop_record_t *pr;
while ((pr = list_remove_head(&dd->dd_props)) != NULL) {
list_destroy(&pr->pr_cbs);
spa_strfree((char *)pr->pr_propname);
kmem_free(pr, sizeof (dsl_prop_record_t));
}
list_destroy(&dd->dd_props);
}
/*
* Register interest in the named property. We'll call the callback
* once to notify it of the current property value, and again each time
* the property changes, until this callback is unregistered.
*
* Return 0 on success, errno if the prop is not an integer value.
*/
int
dsl_prop_register(dsl_dataset_t *ds, const char *propname,
dsl_prop_changed_cb_t *callback, void *cbarg)
{
dsl_dir_t *dd = ds->ds_dir;
uint64_t value;
dsl_prop_record_t *pr;
dsl_prop_cb_record_t *cbr;
int err;
dsl_pool_t *dp __maybe_unused = dd->dd_pool;
ASSERT(dsl_pool_config_held(dp));
err = dsl_prop_get_int_ds(ds, propname, &value);
if (err != 0)
return (err);
cbr = kmem_alloc(sizeof (dsl_prop_cb_record_t), KM_SLEEP);
cbr->cbr_ds = ds;
cbr->cbr_func = callback;
cbr->cbr_arg = cbarg;
mutex_enter(&dd->dd_lock);
pr = dsl_prop_record_find(dd, propname);
if (pr == NULL)
pr = dsl_prop_record_create(dd, propname);
cbr->cbr_pr = pr;
list_insert_head(&pr->pr_cbs, cbr);
list_insert_head(&ds->ds_prop_cbs, cbr);
mutex_exit(&dd->dd_lock);
cbr->cbr_func(cbr->cbr_arg, value);
return (0);
}
int
dsl_prop_get(const char *dsname, const char *propname,
int intsz, int numints, void *buf, char *setpoint)
{
objset_t *os;
int error;
error = dmu_objset_hold(dsname, FTAG, &os);
if (error != 0)
return (error);
error = dsl_prop_get_ds(dmu_objset_ds(os), propname,
intsz, numints, buf, setpoint);
dmu_objset_rele(os, FTAG);
return (error);
}
/*
* Get the current property value. It may have changed by the time this
* function returns, so it is NOT safe to follow up with
* dsl_prop_register() and assume that the value has not changed in
* between.
*
* Return 0 on success, ENOENT if ddname is invalid.
*/
int
dsl_prop_get_integer(const char *ddname, const char *propname,
uint64_t *valuep, char *setpoint)
{
return (dsl_prop_get(ddname, propname, 8, 1, valuep, setpoint));
}
int
dsl_prop_get_int_ds(dsl_dataset_t *ds, const char *propname,
uint64_t *valuep)
{
return (dsl_prop_get_ds(ds, propname, 8, 1, valuep, NULL));
}
/*
* Predict the effective value of the given special property if it were set with
* the given value and source. This is not a general purpose function. It exists
* only to handle the special requirements of the quota and reservation
* properties. The fact that these properties are non-inheritable greatly
* simplifies the prediction logic.
*
* Returns 0 on success, a positive error code on failure, or -1 if called with
* a property not handled by this function.
*/
int
dsl_prop_predict(dsl_dir_t *dd, const char *propname,
zprop_source_t source, uint64_t value, uint64_t *newvalp)
{
zfs_prop_t prop = zfs_name_to_prop(propname);
objset_t *mos;
uint64_t zapobj;
uint64_t version;
char *recvdstr;
int err = 0;
switch (prop) {
case ZFS_PROP_QUOTA:
case ZFS_PROP_RESERVATION:
case ZFS_PROP_REFQUOTA:
case ZFS_PROP_REFRESERVATION:
break;
default:
return (-1);
}
mos = dd->dd_pool->dp_meta_objset;
zapobj = dsl_dir_phys(dd)->dd_props_zapobj;
recvdstr = kmem_asprintf("%s%s", propname, ZPROP_RECVD_SUFFIX);
version = spa_version(dd->dd_pool->dp_spa);
if (version < SPA_VERSION_RECVD_PROPS) {
if (source & ZPROP_SRC_NONE)
source = ZPROP_SRC_NONE;
else if (source & ZPROP_SRC_RECEIVED)
source = ZPROP_SRC_LOCAL;
}
switch ((int)source) {
case ZPROP_SRC_NONE:
/* Revert to the received value, if any. */
err = zap_lookup(mos, zapobj, recvdstr, 8, 1, newvalp);
if (err == ENOENT)
*newvalp = 0;
break;
case ZPROP_SRC_LOCAL:
*newvalp = value;
break;
case ZPROP_SRC_RECEIVED:
/*
* If there's no local setting, then the new received value will
* be the effective value.
*/
err = zap_lookup(mos, zapobj, propname, 8, 1, newvalp);
if (err == ENOENT)
*newvalp = value;
break;
case (ZPROP_SRC_NONE | ZPROP_SRC_RECEIVED):
/*
* We're clearing the received value, so the local setting (if
* it exists) remains the effective value.
*/
err = zap_lookup(mos, zapobj, propname, 8, 1, newvalp);
if (err == ENOENT)
*newvalp = 0;
break;
default:
panic("unexpected property source: %d", source);
}
kmem_strfree(recvdstr);
if (err == ENOENT)
return (0);
return (err);
}
/*
* Unregister this callback. Return 0 on success, ENOENT if ddname is
* invalid, or ENOMSG if no matching callback registered.
*
* NOTE: This function is no longer used internally but has been preserved
* to prevent breaking external consumers (Lustre, etc).
*/
int
dsl_prop_unregister(dsl_dataset_t *ds, const char *propname,
dsl_prop_changed_cb_t *callback, void *cbarg)
{
dsl_dir_t *dd = ds->ds_dir;
dsl_prop_cb_record_t *cbr;
mutex_enter(&dd->dd_lock);
for (cbr = list_head(&ds->ds_prop_cbs);
cbr; cbr = list_next(&ds->ds_prop_cbs, cbr)) {
if (cbr->cbr_ds == ds &&
cbr->cbr_func == callback &&
cbr->cbr_arg == cbarg &&
strcmp(cbr->cbr_pr->pr_propname, propname) == 0)
break;
}
if (cbr == NULL) {
mutex_exit(&dd->dd_lock);
return (SET_ERROR(ENOMSG));
}
list_remove(&ds->ds_prop_cbs, cbr);
list_remove(&cbr->cbr_pr->pr_cbs, cbr);
mutex_exit(&dd->dd_lock);
kmem_free(cbr, sizeof (dsl_prop_cb_record_t));
return (0);
}
/*
* Unregister all callbacks that are registered with the
* given callback argument.
*/
void
dsl_prop_unregister_all(dsl_dataset_t *ds, void *cbarg)
{
dsl_prop_cb_record_t *cbr, *next_cbr;
dsl_dir_t *dd = ds->ds_dir;
mutex_enter(&dd->dd_lock);
next_cbr = list_head(&ds->ds_prop_cbs);
while (next_cbr != NULL) {
cbr = next_cbr;
next_cbr = list_next(&ds->ds_prop_cbs, cbr);
if (cbr->cbr_arg == cbarg) {
list_remove(&ds->ds_prop_cbs, cbr);
list_remove(&cbr->cbr_pr->pr_cbs, cbr);
kmem_free(cbr, sizeof (dsl_prop_cb_record_t));
}
}
mutex_exit(&dd->dd_lock);
}
boolean_t
dsl_prop_hascb(dsl_dataset_t *ds)
{
return (!list_is_empty(&ds->ds_prop_cbs));
}
/* ARGSUSED */
static int
dsl_prop_notify_all_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
{
dsl_dir_t *dd = ds->ds_dir;
dsl_prop_record_t *pr;
dsl_prop_cb_record_t *cbr;
mutex_enter(&dd->dd_lock);
for (pr = list_head(&dd->dd_props);
pr; pr = list_next(&dd->dd_props, pr)) {
for (cbr = list_head(&pr->pr_cbs); cbr;
cbr = list_next(&pr->pr_cbs, cbr)) {
uint64_t value;
/*
* Callback entries do not have holds on their
* datasets so that datasets with registered
* callbacks are still eligible for eviction.
* Unlike operations to update properties on a
* single dataset, we are performing a recursive
* descent of related head datasets. The caller
* of this function only has a dataset hold on
* the passed in head dataset, not the snapshots
* associated with this dataset. Without a hold,
* the dataset pointer within callback records
* for snapshots can be invalidated by eviction
* at any time.
*
* Use dsl_dataset_try_add_ref() to verify
* that the dataset for a snapshot has not
* begun eviction processing and to prevent
* eviction from occurring for the duration of
* the callback. If the hold attempt fails,
* this object is already being evicted and the
* callback can be safely ignored.
*/
if (ds != cbr->cbr_ds &&
!dsl_dataset_try_add_ref(dp, cbr->cbr_ds, FTAG))
continue;
if (dsl_prop_get_ds(cbr->cbr_ds,
cbr->cbr_pr->pr_propname, sizeof (value), 1,
&value, NULL) == 0)
cbr->cbr_func(cbr->cbr_arg, value);
if (ds != cbr->cbr_ds)
dsl_dataset_rele(cbr->cbr_ds, FTAG);
}
}
mutex_exit(&dd->dd_lock);
return (0);
}
/*
* Update all property values for ddobj & its descendants. This is used
* when renaming the dir.
*/
void
dsl_prop_notify_all(dsl_dir_t *dd)
{
dsl_pool_t *dp = dd->dd_pool;
ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
(void) dmu_objset_find_dp(dp, dd->dd_object, dsl_prop_notify_all_cb,
NULL, DS_FIND_CHILDREN);
}
static void
dsl_prop_changed_notify(dsl_pool_t *dp, uint64_t ddobj,
const char *propname, uint64_t value, int first)
{
dsl_dir_t *dd;
dsl_prop_record_t *pr;
dsl_prop_cb_record_t *cbr;
objset_t *mos = dp->dp_meta_objset;
zap_cursor_t zc;
zap_attribute_t *za;
int err;
ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
err = dsl_dir_hold_obj(dp, ddobj, NULL, FTAG, &dd);
if (err)
return;
if (!first) {
/*
* If the prop is set here, then this change is not
* being inherited here or below; stop the recursion.
*/
err = zap_contains(mos, dsl_dir_phys(dd)->dd_props_zapobj,
propname);
if (err == 0) {
dsl_dir_rele(dd, FTAG);
return;
}
ASSERT3U(err, ==, ENOENT);
}
mutex_enter(&dd->dd_lock);
pr = dsl_prop_record_find(dd, propname);
if (pr != NULL) {
for (cbr = list_head(&pr->pr_cbs); cbr;
cbr = list_next(&pr->pr_cbs, cbr)) {
uint64_t propobj;
/*
* cbr->cbr_ds may be invalidated due to eviction,
* requiring the use of dsl_dataset_try_add_ref().
* See comment block in dsl_prop_notify_all_cb()
* for details.
*/
if (!dsl_dataset_try_add_ref(dp, cbr->cbr_ds, FTAG))
continue;
propobj = dsl_dataset_phys(cbr->cbr_ds)->ds_props_obj;
/*
* If the property is not set on this ds, then it is
* inherited here; call the callback.
*/
if (propobj == 0 ||
zap_contains(mos, propobj, propname) != 0)
cbr->cbr_func(cbr->cbr_arg, value);
dsl_dataset_rele(cbr->cbr_ds, FTAG);
}
}
mutex_exit(&dd->dd_lock);
za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
for (zap_cursor_init(&zc, mos,
dsl_dir_phys(dd)->dd_child_dir_zapobj);
zap_cursor_retrieve(&zc, za) == 0;
zap_cursor_advance(&zc)) {
dsl_prop_changed_notify(dp, za->za_first_integer,
propname, value, FALSE);
}
kmem_free(za, sizeof (zap_attribute_t));
zap_cursor_fini(&zc);
dsl_dir_rele(dd, FTAG);
}
void
dsl_prop_set_sync_impl(dsl_dataset_t *ds, const char *propname,
zprop_source_t source, int intsz, int numints, const void *value,
dmu_tx_t *tx)
{
objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
uint64_t zapobj, intval, dummy, count;
int isint;
char valbuf[32];
const char *valstr = NULL;
char *inheritstr;
char *recvdstr;
char *tbuf = NULL;
int err;
uint64_t version = spa_version(ds->ds_dir->dd_pool->dp_spa);
isint = (dodefault(zfs_name_to_prop(propname), 8, 1, &intval) == 0);
if (ds->ds_is_snapshot) {
ASSERT(version >= SPA_VERSION_SNAP_PROPS);
if (dsl_dataset_phys(ds)->ds_props_obj == 0 &&
(source & ZPROP_SRC_NONE) == 0) {
dmu_buf_will_dirty(ds->ds_dbuf, tx);
dsl_dataset_phys(ds)->ds_props_obj =
zap_create(mos,
DMU_OT_DSL_PROPS, DMU_OT_NONE, 0, tx);
}
zapobj = dsl_dataset_phys(ds)->ds_props_obj;
} else {
zapobj = dsl_dir_phys(ds->ds_dir)->dd_props_zapobj;
}
/* If we are removing objects from a non-existent ZAP just return */
if (zapobj == 0)
return;
if (version < SPA_VERSION_RECVD_PROPS) {
if (source & ZPROP_SRC_NONE)
source = ZPROP_SRC_NONE;
else if (source & ZPROP_SRC_RECEIVED)
source = ZPROP_SRC_LOCAL;
}
inheritstr = kmem_asprintf("%s%s", propname, ZPROP_INHERIT_SUFFIX);
recvdstr = kmem_asprintf("%s%s", propname, ZPROP_RECVD_SUFFIX);
switch ((int)source) {
case ZPROP_SRC_NONE:
/*
* revert to received value, if any (inherit -S)
* - remove propname
* - remove propname$inherit
*/
err = zap_remove(mos, zapobj, propname, tx);
ASSERT(err == 0 || err == ENOENT);
err = zap_remove(mos, zapobj, inheritstr, tx);
ASSERT(err == 0 || err == ENOENT);
break;
case ZPROP_SRC_LOCAL:
/*
* remove propname$inherit
* set propname -> value
*/
err = zap_remove(mos, zapobj, inheritstr, tx);
ASSERT(err == 0 || err == ENOENT);
VERIFY0(zap_update(mos, zapobj, propname,
intsz, numints, value, tx));
break;
case ZPROP_SRC_INHERITED:
/*
* explicitly inherit
* - remove propname
* - set propname$inherit
*/
err = zap_remove(mos, zapobj, propname, tx);
ASSERT(err == 0 || err == ENOENT);
if (version >= SPA_VERSION_RECVD_PROPS &&
dsl_prop_get_int_ds(ds, ZPROP_HAS_RECVD, &dummy) == 0) {
dummy = 0;
VERIFY0(zap_update(mos, zapobj, inheritstr,
8, 1, &dummy, tx));
}
break;
case ZPROP_SRC_RECEIVED:
/*
* set propname$recvd -> value
*/
err = zap_update(mos, zapobj, recvdstr,
intsz, numints, value, tx);
ASSERT(err == 0);
break;
case (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED):
/*
* clear local and received settings
* - remove propname
* - remove propname$inherit
* - remove propname$recvd
*/
err = zap_remove(mos, zapobj, propname, tx);
ASSERT(err == 0 || err == ENOENT);
err = zap_remove(mos, zapobj, inheritstr, tx);
ASSERT(err == 0 || err == ENOENT);
- /* FALLTHRU */
+ /* FALLTHROUGH */
case (ZPROP_SRC_NONE | ZPROP_SRC_RECEIVED):
/*
* remove propname$recvd
*/
err = zap_remove(mos, zapobj, recvdstr, tx);
ASSERT(err == 0 || err == ENOENT);
break;
default:
cmn_err(CE_PANIC, "unexpected property source: %d", source);
}
kmem_strfree(inheritstr);
kmem_strfree(recvdstr);
/*
* If we are left with an empty snap zap we can destroy it.
* This will prevent unnecessary calls to zap_lookup() in
* the "zfs list" and "zfs get" code paths.
*/
if (ds->ds_is_snapshot &&
zap_count(mos, zapobj, &count) == 0 && count == 0) {
dmu_buf_will_dirty(ds->ds_dbuf, tx);
dsl_dataset_phys(ds)->ds_props_obj = 0;
zap_destroy(mos, zapobj, tx);
}
if (isint) {
VERIFY0(dsl_prop_get_int_ds(ds, propname, &intval));
if (ds->ds_is_snapshot) {
dsl_prop_cb_record_t *cbr;
/*
* It's a snapshot; nothing can inherit this
* property, so just look for callbacks on this
* ds here.
*/
mutex_enter(&ds->ds_dir->dd_lock);
for (cbr = list_head(&ds->ds_prop_cbs); cbr;
cbr = list_next(&ds->ds_prop_cbs, cbr)) {
if (strcmp(cbr->cbr_pr->pr_propname,
propname) == 0)
cbr->cbr_func(cbr->cbr_arg, intval);
}
mutex_exit(&ds->ds_dir->dd_lock);
} else {
dsl_prop_changed_notify(ds->ds_dir->dd_pool,
ds->ds_dir->dd_object, propname, intval, TRUE);
}
(void) snprintf(valbuf, sizeof (valbuf),
"%lld", (longlong_t)intval);
valstr = valbuf;
} else {
if (source == ZPROP_SRC_LOCAL) {
valstr = value;
} else {
tbuf = kmem_alloc(ZAP_MAXVALUELEN, KM_SLEEP);
if (dsl_prop_get_ds(ds, propname, 1,
ZAP_MAXVALUELEN, tbuf, NULL) == 0)
valstr = tbuf;
}
}
spa_history_log_internal_ds(ds, (source == ZPROP_SRC_NONE ||
source == ZPROP_SRC_INHERITED) ? "inherit" : "set", tx,
"%s=%s", propname, (valstr == NULL ? "" : valstr));
if (tbuf != NULL)
kmem_free(tbuf, ZAP_MAXVALUELEN);
}
int
dsl_prop_set_int(const char *dsname, const char *propname,
zprop_source_t source, uint64_t value)
{
nvlist_t *nvl = fnvlist_alloc();
int error;
fnvlist_add_uint64(nvl, propname, value);
error = dsl_props_set(dsname, source, nvl);
fnvlist_free(nvl);
return (error);
}
int
dsl_prop_set_string(const char *dsname, const char *propname,
zprop_source_t source, const char *value)
{
nvlist_t *nvl = fnvlist_alloc();
int error;
fnvlist_add_string(nvl, propname, value);
error = dsl_props_set(dsname, source, nvl);
fnvlist_free(nvl);
return (error);
}
int
dsl_prop_inherit(const char *dsname, const char *propname,
zprop_source_t source)
{
nvlist_t *nvl = fnvlist_alloc();
int error;
fnvlist_add_boolean(nvl, propname);
error = dsl_props_set(dsname, source, nvl);
fnvlist_free(nvl);
return (error);
}
int
dsl_props_set_check(void *arg, dmu_tx_t *tx)
{
dsl_props_set_arg_t *dpsa = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
uint64_t version;
nvpair_t *elem = NULL;
int err;
err = dsl_dataset_hold(dp, dpsa->dpsa_dsname, FTAG, &ds);
if (err != 0)
return (err);
version = spa_version(ds->ds_dir->dd_pool->dp_spa);
while ((elem = nvlist_next_nvpair(dpsa->dpsa_props, elem)) != NULL) {
if (strlen(nvpair_name(elem)) >= ZAP_MAXNAMELEN) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(ENAMETOOLONG));
}
if (nvpair_type(elem) == DATA_TYPE_STRING) {
char *valstr = fnvpair_value_string(elem);
if (strlen(valstr) >= (version <
SPA_VERSION_STMF_PROP ?
ZAP_OLDMAXVALUELEN : ZAP_MAXVALUELEN)) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(E2BIG));
}
}
}
if (ds->ds_is_snapshot && version < SPA_VERSION_SNAP_PROPS) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(ENOTSUP));
}
dsl_dataset_rele(ds, FTAG);
return (0);
}
void
dsl_props_set_sync_impl(dsl_dataset_t *ds, zprop_source_t source,
nvlist_t *props, dmu_tx_t *tx)
{
nvpair_t *elem = NULL;
while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
nvpair_t *pair = elem;
const char *name = nvpair_name(pair);
if (nvpair_type(pair) == DATA_TYPE_NVLIST) {
/*
* This usually happens when we reuse the nvlist_t data
* returned by the counterpart dsl_prop_get_all_impl().
* For instance we do this to restore the original
* received properties when an error occurs in the
* zfs_ioc_recv() codepath.
*/
nvlist_t *attrs = fnvpair_value_nvlist(pair);
pair = fnvlist_lookup_nvpair(attrs, ZPROP_VALUE);
}
if (nvpair_type(pair) == DATA_TYPE_STRING) {
const char *value = fnvpair_value_string(pair);
dsl_prop_set_sync_impl(ds, name,
source, 1, strlen(value) + 1, value, tx);
} else if (nvpair_type(pair) == DATA_TYPE_UINT64) {
uint64_t intval = fnvpair_value_uint64(pair);
dsl_prop_set_sync_impl(ds, name,
source, sizeof (intval), 1, &intval, tx);
} else if (nvpair_type(pair) == DATA_TYPE_BOOLEAN) {
dsl_prop_set_sync_impl(ds, name,
source, 0, 0, NULL, tx);
} else {
panic("invalid nvpair type");
}
}
}
void
dsl_props_set_sync(void *arg, dmu_tx_t *tx)
{
dsl_props_set_arg_t *dpsa = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
VERIFY0(dsl_dataset_hold(dp, dpsa->dpsa_dsname, FTAG, &ds));
dsl_props_set_sync_impl(ds, dpsa->dpsa_source, dpsa->dpsa_props, tx);
dsl_dataset_rele(ds, FTAG);
}
/*
* All-or-nothing; if any prop can't be set, nothing will be modified.
*/
int
dsl_props_set(const char *dsname, zprop_source_t source, nvlist_t *props)
{
dsl_props_set_arg_t dpsa;
int nblks = 0;
dpsa.dpsa_dsname = dsname;
dpsa.dpsa_source = source;
dpsa.dpsa_props = props;
/*
* If the source includes NONE, then we will only be removing entries
* from the ZAP object. In that case don't check for ENOSPC.
*/
if ((source & ZPROP_SRC_NONE) == 0)
nblks = 2 * fnvlist_num_pairs(props);
return (dsl_sync_task(dsname, dsl_props_set_check, dsl_props_set_sync,
&dpsa, nblks, ZFS_SPACE_CHECK_RESERVED));
}
typedef enum dsl_prop_getflags {
DSL_PROP_GET_INHERITING = 0x1, /* searching parent of target ds */
DSL_PROP_GET_SNAPSHOT = 0x2, /* snapshot dataset */
DSL_PROP_GET_LOCAL = 0x4, /* local properties */
DSL_PROP_GET_RECEIVED = 0x8, /* received properties */
} dsl_prop_getflags_t;
static int
dsl_prop_get_all_impl(objset_t *mos, uint64_t propobj,
const char *setpoint, dsl_prop_getflags_t flags, nvlist_t *nv)
{
zap_cursor_t zc;
zap_attribute_t za;
int err = 0;
for (zap_cursor_init(&zc, mos, propobj);
(err = zap_cursor_retrieve(&zc, &za)) == 0;
zap_cursor_advance(&zc)) {
nvlist_t *propval;
zfs_prop_t prop;
char buf[ZAP_MAXNAMELEN];
char *valstr;
const char *suffix;
const char *propname;
const char *source;
suffix = strchr(za.za_name, '$');
if (suffix == NULL) {
/*
* Skip local properties if we only want received
* properties.
*/
if (flags & DSL_PROP_GET_RECEIVED)
continue;
propname = za.za_name;
source = setpoint;
} else if (strcmp(suffix, ZPROP_INHERIT_SUFFIX) == 0) {
/* Skip explicitly inherited entries. */
continue;
} else if (strcmp(suffix, ZPROP_RECVD_SUFFIX) == 0) {
if (flags & DSL_PROP_GET_LOCAL)
continue;
(void) strncpy(buf, za.za_name, (suffix - za.za_name));
buf[suffix - za.za_name] = '\0';
propname = buf;
if (!(flags & DSL_PROP_GET_RECEIVED)) {
/* Skip if locally overridden. */
err = zap_contains(mos, propobj, propname);
if (err == 0)
continue;
if (err != ENOENT)
break;
/* Skip if explicitly inherited. */
valstr = kmem_asprintf("%s%s", propname,
ZPROP_INHERIT_SUFFIX);
err = zap_contains(mos, propobj, valstr);
kmem_strfree(valstr);
if (err == 0)
continue;
if (err != ENOENT)
break;
}
source = ((flags & DSL_PROP_GET_INHERITING) ?
setpoint : ZPROP_SOURCE_VAL_RECVD);
} else {
/*
* For backward compatibility, skip suffixes we don't
* recognize.
*/
continue;
}
prop = zfs_name_to_prop(propname);
/* Skip non-inheritable properties. */
if ((flags & DSL_PROP_GET_INHERITING) && prop != ZPROP_INVAL &&
!zfs_prop_inheritable(prop))
continue;
/* Skip properties not valid for this type. */
if ((flags & DSL_PROP_GET_SNAPSHOT) && prop != ZPROP_INVAL &&
!zfs_prop_valid_for_type(prop, ZFS_TYPE_SNAPSHOT, B_FALSE))
continue;
/* Skip properties already defined. */
if (nvlist_exists(nv, propname))
continue;
VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
if (za.za_integer_length == 1) {
/*
* String property
*/
char *tmp = kmem_alloc(za.za_num_integers,
KM_SLEEP);
err = zap_lookup(mos, propobj,
za.za_name, 1, za.za_num_integers, tmp);
if (err != 0) {
kmem_free(tmp, za.za_num_integers);
break;
}
VERIFY(nvlist_add_string(propval, ZPROP_VALUE,
tmp) == 0);
kmem_free(tmp, za.za_num_integers);
} else {
/*
* Integer property
*/
ASSERT(za.za_integer_length == 8);
(void) nvlist_add_uint64(propval, ZPROP_VALUE,
za.za_first_integer);
}
VERIFY(nvlist_add_string(propval, ZPROP_SOURCE, source) == 0);
VERIFY(nvlist_add_nvlist(nv, propname, propval) == 0);
nvlist_free(propval);
}
zap_cursor_fini(&zc);
if (err == ENOENT)
err = 0;
return (err);
}
/*
* Iterate over all properties for this dataset and return them in an nvlist.
*/
static int
dsl_prop_get_all_ds(dsl_dataset_t *ds, nvlist_t **nvp,
dsl_prop_getflags_t flags)
{
dsl_dir_t *dd = ds->ds_dir;
dsl_pool_t *dp = dd->dd_pool;
objset_t *mos = dp->dp_meta_objset;
int err = 0;
char setpoint[ZFS_MAX_DATASET_NAME_LEN];
VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
if (ds->ds_is_snapshot)
flags |= DSL_PROP_GET_SNAPSHOT;
ASSERT(dsl_pool_config_held(dp));
if (dsl_dataset_phys(ds)->ds_props_obj != 0) {
ASSERT(flags & DSL_PROP_GET_SNAPSHOT);
dsl_dataset_name(ds, setpoint);
err = dsl_prop_get_all_impl(mos,
dsl_dataset_phys(ds)->ds_props_obj, setpoint, flags, *nvp);
if (err)
goto out;
}
for (; dd != NULL; dd = dd->dd_parent) {
if (dd != ds->ds_dir || (flags & DSL_PROP_GET_SNAPSHOT)) {
if (flags & (DSL_PROP_GET_LOCAL |
DSL_PROP_GET_RECEIVED))
break;
flags |= DSL_PROP_GET_INHERITING;
}
dsl_dir_name(dd, setpoint);
err = dsl_prop_get_all_impl(mos,
dsl_dir_phys(dd)->dd_props_zapobj, setpoint, flags, *nvp);
if (err)
break;
}
out:
if (err) {
nvlist_free(*nvp);
*nvp = NULL;
}
return (err);
}
boolean_t
dsl_prop_get_hasrecvd(const char *dsname)
{
uint64_t dummy;
return (0 ==
dsl_prop_get_integer(dsname, ZPROP_HAS_RECVD, &dummy, NULL));
}
static int
dsl_prop_set_hasrecvd_impl(const char *dsname, zprop_source_t source)
{
uint64_t version;
spa_t *spa;
int error = 0;
VERIFY0(spa_open(dsname, &spa, FTAG));
version = spa_version(spa);
spa_close(spa, FTAG);
if (version >= SPA_VERSION_RECVD_PROPS)
error = dsl_prop_set_int(dsname, ZPROP_HAS_RECVD, source, 0);
return (error);
}
/*
* Call after successfully receiving properties to ensure that only the first
* receive on or after SPA_VERSION_RECVD_PROPS blows away local properties.
*/
int
dsl_prop_set_hasrecvd(const char *dsname)
{
int error = 0;
if (!dsl_prop_get_hasrecvd(dsname))
error = dsl_prop_set_hasrecvd_impl(dsname, ZPROP_SRC_LOCAL);
return (error);
}
void
dsl_prop_unset_hasrecvd(const char *dsname)
{
VERIFY0(dsl_prop_set_hasrecvd_impl(dsname, ZPROP_SRC_NONE));
}
int
dsl_prop_get_all(objset_t *os, nvlist_t **nvp)
{
return (dsl_prop_get_all_ds(os->os_dsl_dataset, nvp, 0));
}
int
dsl_prop_get_received(const char *dsname, nvlist_t **nvp)
{
objset_t *os;
int error;
/*
* Received properties are not distinguishable from local properties
* until the dataset has received properties on or after
* SPA_VERSION_RECVD_PROPS.
*/
dsl_prop_getflags_t flags = (dsl_prop_get_hasrecvd(dsname) ?
DSL_PROP_GET_RECEIVED : DSL_PROP_GET_LOCAL);
error = dmu_objset_hold(dsname, FTAG, &os);
if (error != 0)
return (error);
error = dsl_prop_get_all_ds(os->os_dsl_dataset, nvp, flags);
dmu_objset_rele(os, FTAG);
return (error);
}
void
dsl_prop_nvlist_add_uint64(nvlist_t *nv, zfs_prop_t prop, uint64_t value)
{
nvlist_t *propval;
const char *propname = zfs_prop_to_name(prop);
uint64_t default_value;
if (nvlist_lookup_nvlist(nv, propname, &propval) == 0) {
VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, value) == 0);
return;
}
VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, value) == 0);
/* Indicate the default source if we can. */
if (dodefault(prop, 8, 1, &default_value) == 0 &&
value == default_value) {
VERIFY(nvlist_add_string(propval, ZPROP_SOURCE, "") == 0);
}
VERIFY(nvlist_add_nvlist(nv, propname, propval) == 0);
nvlist_free(propval);
}
void
dsl_prop_nvlist_add_string(nvlist_t *nv, zfs_prop_t prop, const char *value)
{
nvlist_t *propval;
const char *propname = zfs_prop_to_name(prop);
if (nvlist_lookup_nvlist(nv, propname, &propval) == 0) {
VERIFY(nvlist_add_string(propval, ZPROP_VALUE, value) == 0);
return;
}
VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_string(propval, ZPROP_VALUE, value) == 0);
VERIFY(nvlist_add_nvlist(nv, propname, propval) == 0);
nvlist_free(propval);
}
#if defined(_KERNEL)
EXPORT_SYMBOL(dsl_prop_register);
EXPORT_SYMBOL(dsl_prop_unregister);
EXPORT_SYMBOL(dsl_prop_unregister_all);
EXPORT_SYMBOL(dsl_prop_get);
EXPORT_SYMBOL(dsl_prop_get_integer);
EXPORT_SYMBOL(dsl_prop_get_all);
EXPORT_SYMBOL(dsl_prop_get_received);
EXPORT_SYMBOL(dsl_prop_get_ds);
EXPORT_SYMBOL(dsl_prop_get_int_ds);
EXPORT_SYMBOL(dsl_prop_get_dd);
EXPORT_SYMBOL(dsl_props_set);
EXPORT_SYMBOL(dsl_prop_set_int);
EXPORT_SYMBOL(dsl_prop_set_string);
EXPORT_SYMBOL(dsl_prop_inherit);
EXPORT_SYMBOL(dsl_prop_predict);
EXPORT_SYMBOL(dsl_prop_nvlist_add_uint64);
EXPORT_SYMBOL(dsl_prop_nvlist_add_string);
#endif
diff --git a/sys/contrib/openzfs/module/zfs/metaslab.c b/sys/contrib/openzfs/module/zfs/metaslab.c
index 93d409ceb433..df0d83327c0b 100644
--- a/sys/contrib/openzfs/module/zfs/metaslab.c
+++ b/sys/contrib/openzfs/module/zfs/metaslab.c
@@ -1,6250 +1,6257 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2019 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
* Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2017, Intel Corporation.
*/
#include <sys/zfs_context.h>
#include <sys/dmu.h>
#include <sys/dmu_tx.h>
#include <sys/space_map.h>
#include <sys/metaslab_impl.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_draid.h>
#include <sys/zio.h>
#include <sys/spa_impl.h>
#include <sys/zfeature.h>
#include <sys/vdev_indirect_mapping.h>
#include <sys/zap.h>
#include <sys/btree.h>
#define WITH_DF_BLOCK_ALLOCATOR
#define GANG_ALLOCATION(flags) \
((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER))
/*
* Metaslab granularity, in bytes. This is roughly similar to what would be
* referred to as the "stripe size" in traditional RAID arrays. In normal
* operation, we will try to write this amount of data to a top-level vdev
* before moving on to the next one.
*/
unsigned long metaslab_aliquot = 512 << 10;
/*
* For testing, make some blocks above a certain size be gang blocks.
*/
unsigned long metaslab_force_ganging = SPA_MAXBLOCKSIZE + 1;
/*
* In pools where the log space map feature is not enabled we touch
* multiple metaslabs (and their respective space maps) with each
* transaction group. Thus, we benefit from having a small space map
* block size since it allows us to issue more I/O operations scattered
* around the disk. So a sane default for the space map block size
* is 8~16K.
*/
int zfs_metaslab_sm_blksz_no_log = (1 << 14);
/*
* When the log space map feature is enabled, we accumulate a lot of
* changes per metaslab that are flushed once in a while so we benefit
* from a bigger block size like 128K for the metaslab space maps.
*/
int zfs_metaslab_sm_blksz_with_log = (1 << 17);
/*
* The in-core space map representation is more compact than its on-disk form.
* The zfs_condense_pct determines how much more compact the in-core
* space map representation must be before we compact it on-disk.
* Values should be greater than or equal to 100.
*/
int zfs_condense_pct = 200;
/*
* Condensing a metaslab is not guaranteed to actually reduce the amount of
* space used on disk. In particular, a space map uses data in increments of
* MAX(1 << ashift, space_map_blksz), so a metaslab might use the
* same number of blocks after condensing. Since the goal of condensing is to
* reduce the number of IOPs required to read the space map, we only want to
* condense when we can be sure we will reduce the number of blocks used by the
* space map. Unfortunately, we cannot precisely compute whether or not this is
* the case in metaslab_should_condense since we are holding ms_lock. Instead,
* we apply the following heuristic: do not condense a spacemap unless the
* uncondensed size consumes greater than zfs_metaslab_condense_block_threshold
* blocks.
*/
int zfs_metaslab_condense_block_threshold = 4;
/*
* The zfs_mg_noalloc_threshold defines which metaslab groups should
* be eligible for allocation. The value is defined as a percentage of
* free space. Metaslab groups that have more free space than
* zfs_mg_noalloc_threshold are always eligible for allocations. Once
* a metaslab group's free space is less than or equal to the
* zfs_mg_noalloc_threshold the allocator will avoid allocating to that
* group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
* Once all groups in the pool reach zfs_mg_noalloc_threshold then all
* groups are allowed to accept allocations. Gang blocks are always
* eligible to allocate on any metaslab group. The default value of 0 means
* no metaslab group will be excluded based on this criterion.
*/
int zfs_mg_noalloc_threshold = 0;
/*
* Metaslab groups are considered eligible for allocations if their
* fragmentation metric (measured as a percentage) is less than or
* equal to zfs_mg_fragmentation_threshold. If a metaslab group
* exceeds this threshold then it will be skipped unless all metaslab
* groups within the metaslab class have also crossed this threshold.
*
* This tunable was introduced to avoid edge cases where we continue
* allocating from very fragmented disks in our pool while other, less
* fragmented disks, exists. On the other hand, if all disks in the
* pool are uniformly approaching the threshold, the threshold can
* be a speed bump in performance, where we keep switching the disks
* that we allocate from (e.g. we allocate some segments from disk A
* making it bypassing the threshold while freeing segments from disk
* B getting its fragmentation below the threshold).
*
* Empirically, we've seen that our vdev selection for allocations is
* good enough that fragmentation increases uniformly across all vdevs
* the majority of the time. Thus we set the threshold percentage high
* enough to avoid hitting the speed bump on pools that are being pushed
* to the edge.
*/
int zfs_mg_fragmentation_threshold = 95;
/*
* Allow metaslabs to keep their active state as long as their fragmentation
* percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An
* active metaslab that exceeds this threshold will no longer keep its active
* status allowing better metaslabs to be selected.
*/
int zfs_metaslab_fragmentation_threshold = 70;
/*
* When set will load all metaslabs when pool is first opened.
*/
int metaslab_debug_load = 0;
/*
* When set will prevent metaslabs from being unloaded.
*/
int metaslab_debug_unload = 0;
/*
* Minimum size which forces the dynamic allocator to change
* it's allocation strategy. Once the space map cannot satisfy
* an allocation of this size then it switches to using more
* aggressive strategy (i.e search by size rather than offset).
*/
uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE;
/*
* The minimum free space, in percent, which must be available
* in a space map to continue allocations in a first-fit fashion.
* Once the space map's free space drops below this level we dynamically
* switch to using best-fit allocations.
*/
int metaslab_df_free_pct = 4;
/*
* Maximum distance to search forward from the last offset. Without this
* limit, fragmented pools can see >100,000 iterations and
* metaslab_block_picker() becomes the performance limiting factor on
* high-performance storage.
*
* With the default setting of 16MB, we typically see less than 500
* iterations, even with very fragmented, ashift=9 pools. The maximum number
* of iterations possible is:
* metaslab_df_max_search / (2 * (1<<ashift))
* With the default setting of 16MB this is 16*1024 (with ashift=9) or
* 2048 (with ashift=12).
*/
int metaslab_df_max_search = 16 * 1024 * 1024;
/*
* Forces the metaslab_block_picker function to search for at least this many
* segments forwards until giving up on finding a segment that the allocation
* will fit into.
*/
uint32_t metaslab_min_search_count = 100;
/*
* If we are not searching forward (due to metaslab_df_max_search,
* metaslab_df_free_pct, or metaslab_df_alloc_threshold), this tunable
* controls what segment is used. If it is set, we will use the largest free
* segment. If it is not set, we will use a segment of exactly the requested
* size (or larger).
*/
int metaslab_df_use_largest_segment = B_FALSE;
/*
* Percentage of all cpus that can be used by the metaslab taskq.
*/
int metaslab_load_pct = 50;
/*
* These tunables control how long a metaslab will remain loaded after the
* last allocation from it. A metaslab can't be unloaded until at least
* metaslab_unload_delay TXG's and metaslab_unload_delay_ms milliseconds
* have elapsed. However, zfs_metaslab_mem_limit may cause it to be
* unloaded sooner. These settings are intended to be generous -- to keep
* metaslabs loaded for a long time, reducing the rate of metaslab loading.
*/
int metaslab_unload_delay = 32;
int metaslab_unload_delay_ms = 10 * 60 * 1000; /* ten minutes */
/*
* Max number of metaslabs per group to preload.
*/
int metaslab_preload_limit = 10;
/*
* Enable/disable preloading of metaslab.
*/
int metaslab_preload_enabled = B_TRUE;
/*
* Enable/disable fragmentation weighting on metaslabs.
*/
int metaslab_fragmentation_factor_enabled = B_TRUE;
/*
* Enable/disable lba weighting (i.e. outer tracks are given preference).
*/
int metaslab_lba_weighting_enabled = B_TRUE;
/*
* Enable/disable metaslab group biasing.
*/
int metaslab_bias_enabled = B_TRUE;
/*
* Enable/disable remapping of indirect DVAs to their concrete vdevs.
*/
boolean_t zfs_remap_blkptr_enable = B_TRUE;
/*
* Enable/disable segment-based metaslab selection.
*/
int zfs_metaslab_segment_weight_enabled = B_TRUE;
/*
* When using segment-based metaslab selection, we will continue
* allocating from the active metaslab until we have exhausted
* zfs_metaslab_switch_threshold of its buckets.
*/
int zfs_metaslab_switch_threshold = 2;
/*
* Internal switch to enable/disable the metaslab allocation tracing
* facility.
*/
boolean_t metaslab_trace_enabled = B_FALSE;
/*
* Maximum entries that the metaslab allocation tracing facility will keep
* in a given list when running in non-debug mode. We limit the number
* of entries in non-debug mode to prevent us from using up too much memory.
* The limit should be sufficiently large that we don't expect any allocation
* to every exceed this value. In debug mode, the system will panic if this
* limit is ever reached allowing for further investigation.
*/
uint64_t metaslab_trace_max_entries = 5000;
/*
* Maximum number of metaslabs per group that can be disabled
* simultaneously.
*/
int max_disabled_ms = 3;
/*
* Time (in seconds) to respect ms_max_size when the metaslab is not loaded.
* To avoid 64-bit overflow, don't set above UINT32_MAX.
*/
unsigned long zfs_metaslab_max_size_cache_sec = 3600; /* 1 hour */
/*
* Maximum percentage of memory to use on storing loaded metaslabs. If loading
* a metaslab would take it over this percentage, the oldest selected metaslab
* is automatically unloaded.
*/
int zfs_metaslab_mem_limit = 25;
/*
* Force the per-metaslab range trees to use 64-bit integers to store
* segments. Used for debugging purposes.
*/
boolean_t zfs_metaslab_force_large_segs = B_FALSE;
/*
* By default we only store segments over a certain size in the size-sorted
* metaslab trees (ms_allocatable_by_size and
* ms_unflushed_frees_by_size). This dramatically reduces memory usage and
* improves load and unload times at the cost of causing us to use slightly
* larger segments than we would otherwise in some cases.
*/
uint32_t metaslab_by_size_min_shift = 14;
/*
* If not set, we will first try normal allocation. If that fails then
* we will do a gang allocation. If that fails then we will do a "try hard"
* gang allocation. If that fails then we will have a multi-layer gang
* block.
*
* If set, we will first try normal allocation. If that fails then
* we will do a "try hard" allocation. If that fails we will do a gang
* allocation. If that fails we will do a "try hard" gang allocation. If
* that fails then we will have a multi-layer gang block.
*/
int zfs_metaslab_try_hard_before_gang = B_FALSE;
/*
* When not trying hard, we only consider the best zfs_metaslab_find_max_tries
* metaslabs. This improves performance, especially when there are many
* metaslabs per vdev and the allocation can't actually be satisfied (so we
* would otherwise iterate all the metaslabs). If there is a metaslab with a
* worse weight but it can actually satisfy the allocation, we won't find it
* until trying hard. This may happen if the worse metaslab is not loaded
* (and the true weight is better than we have calculated), or due to weight
* bucketization. E.g. we are looking for a 60K segment, and the best
* metaslabs all have free segments in the 32-63K bucket, but the best
* zfs_metaslab_find_max_tries metaslabs have ms_max_size <60KB, and a
* subsequent metaslab has ms_max_size >60KB (but fewer segments in this
* bucket, and therefore a lower weight).
*/
int zfs_metaslab_find_max_tries = 100;
static uint64_t metaslab_weight(metaslab_t *, boolean_t);
static void metaslab_set_fragmentation(metaslab_t *, boolean_t);
static void metaslab_free_impl(vdev_t *, uint64_t, uint64_t, boolean_t);
static void metaslab_check_free_impl(vdev_t *, uint64_t, uint64_t);
static void metaslab_passivate(metaslab_t *msp, uint64_t weight);
static uint64_t metaslab_weight_from_range_tree(metaslab_t *msp);
static void metaslab_flush_update(metaslab_t *, dmu_tx_t *);
static unsigned int metaslab_idx_func(multilist_t *, void *);
static void metaslab_evict(metaslab_t *, uint64_t);
static void metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg);
kmem_cache_t *metaslab_alloc_trace_cache;
typedef struct metaslab_stats {
kstat_named_t metaslabstat_trace_over_limit;
kstat_named_t metaslabstat_reload_tree;
kstat_named_t metaslabstat_too_many_tries;
kstat_named_t metaslabstat_try_hard;
} metaslab_stats_t;
static metaslab_stats_t metaslab_stats = {
{ "trace_over_limit", KSTAT_DATA_UINT64 },
{ "reload_tree", KSTAT_DATA_UINT64 },
{ "too_many_tries", KSTAT_DATA_UINT64 },
{ "try_hard", KSTAT_DATA_UINT64 },
};
#define METASLABSTAT_BUMP(stat) \
atomic_inc_64(&metaslab_stats.stat.value.ui64);
kstat_t *metaslab_ksp;
void
metaslab_stat_init(void)
{
ASSERT(metaslab_alloc_trace_cache == NULL);
metaslab_alloc_trace_cache = kmem_cache_create(
"metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t),
0, NULL, NULL, NULL, NULL, NULL, 0);
metaslab_ksp = kstat_create("zfs", 0, "metaslab_stats",
"misc", KSTAT_TYPE_NAMED, sizeof (metaslab_stats) /
sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
if (metaslab_ksp != NULL) {
metaslab_ksp->ks_data = &metaslab_stats;
kstat_install(metaslab_ksp);
}
}
void
metaslab_stat_fini(void)
{
if (metaslab_ksp != NULL) {
kstat_delete(metaslab_ksp);
metaslab_ksp = NULL;
}
kmem_cache_destroy(metaslab_alloc_trace_cache);
metaslab_alloc_trace_cache = NULL;
}
/*
* ==========================================================================
* Metaslab classes
* ==========================================================================
*/
metaslab_class_t *
metaslab_class_create(spa_t *spa, metaslab_ops_t *ops)
{
metaslab_class_t *mc;
mc = kmem_zalloc(offsetof(metaslab_class_t,
mc_allocator[spa->spa_alloc_count]), KM_SLEEP);
mc->mc_spa = spa;
mc->mc_ops = ops;
mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL);
multilist_create(&mc->mc_metaslab_txg_list, sizeof (metaslab_t),
offsetof(metaslab_t, ms_class_txg_node), metaslab_idx_func);
for (int i = 0; i < spa->spa_alloc_count; i++) {
metaslab_class_allocator_t *mca = &mc->mc_allocator[i];
mca->mca_rotor = NULL;
zfs_refcount_create_tracked(&mca->mca_alloc_slots);
}
return (mc);
}
void
metaslab_class_destroy(metaslab_class_t *mc)
{
spa_t *spa = mc->mc_spa;
ASSERT(mc->mc_alloc == 0);
ASSERT(mc->mc_deferred == 0);
ASSERT(mc->mc_space == 0);
ASSERT(mc->mc_dspace == 0);
for (int i = 0; i < spa->spa_alloc_count; i++) {
metaslab_class_allocator_t *mca = &mc->mc_allocator[i];
ASSERT(mca->mca_rotor == NULL);
zfs_refcount_destroy(&mca->mca_alloc_slots);
}
mutex_destroy(&mc->mc_lock);
multilist_destroy(&mc->mc_metaslab_txg_list);
kmem_free(mc, offsetof(metaslab_class_t,
mc_allocator[spa->spa_alloc_count]));
}
int
metaslab_class_validate(metaslab_class_t *mc)
{
metaslab_group_t *mg;
vdev_t *vd;
/*
* Must hold one of the spa_config locks.
*/
ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
if ((mg = mc->mc_allocator[0].mca_rotor) == NULL)
return (0);
do {
vd = mg->mg_vd;
ASSERT(vd->vdev_mg != NULL);
ASSERT3P(vd->vdev_top, ==, vd);
ASSERT3P(mg->mg_class, ==, mc);
ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
} while ((mg = mg->mg_next) != mc->mc_allocator[0].mca_rotor);
return (0);
}
static void
metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
{
atomic_add_64(&mc->mc_alloc, alloc_delta);
atomic_add_64(&mc->mc_deferred, defer_delta);
atomic_add_64(&mc->mc_space, space_delta);
atomic_add_64(&mc->mc_dspace, dspace_delta);
}
uint64_t
metaslab_class_get_alloc(metaslab_class_t *mc)
{
return (mc->mc_alloc);
}
uint64_t
metaslab_class_get_deferred(metaslab_class_t *mc)
{
return (mc->mc_deferred);
}
uint64_t
metaslab_class_get_space(metaslab_class_t *mc)
{
return (mc->mc_space);
}
uint64_t
metaslab_class_get_dspace(metaslab_class_t *mc)
{
return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
}
void
metaslab_class_histogram_verify(metaslab_class_t *mc)
{
spa_t *spa = mc->mc_spa;
vdev_t *rvd = spa->spa_root_vdev;
uint64_t *mc_hist;
int i;
if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
return;
mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
KM_SLEEP);
mutex_enter(&mc->mc_lock);
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = vdev_get_mg(tvd, mc);
/*
* Skip any holes, uninitialized top-levels, or
* vdevs that are not in this metalab class.
*/
if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
mg->mg_class != mc) {
continue;
}
IMPLY(mg == mg->mg_vd->vdev_log_mg,
mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
mc_hist[i] += mg->mg_histogram[i];
}
for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]);
}
mutex_exit(&mc->mc_lock);
kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
}
/*
* Calculate the metaslab class's fragmentation metric. The metric
* is weighted based on the space contribution of each metaslab group.
* The return value will be a number between 0 and 100 (inclusive), or
* ZFS_FRAG_INVALID if the metric has not been set. See comment above the
* zfs_frag_table for more information about the metric.
*/
uint64_t
metaslab_class_fragmentation(metaslab_class_t *mc)
{
vdev_t *rvd = mc->mc_spa->spa_root_vdev;
uint64_t fragmentation = 0;
spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg;
/*
* Skip any holes, uninitialized top-levels,
* or vdevs that are not in this metalab class.
*/
if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
mg->mg_class != mc) {
continue;
}
/*
* If a metaslab group does not contain a fragmentation
* metric then just bail out.
*/
if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
return (ZFS_FRAG_INVALID);
}
/*
* Determine how much this metaslab_group is contributing
* to the overall pool fragmentation metric.
*/
fragmentation += mg->mg_fragmentation *
metaslab_group_get_space(mg);
}
fragmentation /= metaslab_class_get_space(mc);
ASSERT3U(fragmentation, <=, 100);
spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
return (fragmentation);
}
/*
* Calculate the amount of expandable space that is available in
* this metaslab class. If a device is expanded then its expandable
* space will be the amount of allocatable space that is currently not
* part of this metaslab class.
*/
uint64_t
metaslab_class_expandable_space(metaslab_class_t *mc)
{
vdev_t *rvd = mc->mc_spa->spa_root_vdev;
uint64_t space = 0;
spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg;
if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
mg->mg_class != mc) {
continue;
}
/*
* Calculate if we have enough space to add additional
* metaslabs. We report the expandable space in terms
* of the metaslab size since that's the unit of expansion.
*/
space += P2ALIGN(tvd->vdev_max_asize - tvd->vdev_asize,
1ULL << tvd->vdev_ms_shift);
}
spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
return (space);
}
void
metaslab_class_evict_old(metaslab_class_t *mc, uint64_t txg)
{
multilist_t *ml = &mc->mc_metaslab_txg_list;
for (int i = 0; i < multilist_get_num_sublists(ml); i++) {
multilist_sublist_t *mls = multilist_sublist_lock(ml, i);
metaslab_t *msp = multilist_sublist_head(mls);
multilist_sublist_unlock(mls);
while (msp != NULL) {
mutex_enter(&msp->ms_lock);
/*
* If the metaslab has been removed from the list
* (which could happen if we were at the memory limit
* and it was evicted during this loop), then we can't
* proceed and we should restart the sublist.
*/
if (!multilist_link_active(&msp->ms_class_txg_node)) {
mutex_exit(&msp->ms_lock);
i--;
break;
}
mls = multilist_sublist_lock(ml, i);
metaslab_t *next_msp = multilist_sublist_next(mls, msp);
multilist_sublist_unlock(mls);
if (txg >
msp->ms_selected_txg + metaslab_unload_delay &&
gethrtime() > msp->ms_selected_time +
(uint64_t)MSEC2NSEC(metaslab_unload_delay_ms)) {
metaslab_evict(msp, txg);
} else {
/*
* Once we've hit a metaslab selected too
* recently to evict, we're done evicting for
* now.
*/
mutex_exit(&msp->ms_lock);
break;
}
mutex_exit(&msp->ms_lock);
msp = next_msp;
}
}
}
static int
metaslab_compare(const void *x1, const void *x2)
{
const metaslab_t *m1 = (const metaslab_t *)x1;
const metaslab_t *m2 = (const metaslab_t *)x2;
int sort1 = 0;
int sort2 = 0;
if (m1->ms_allocator != -1 && m1->ms_primary)
sort1 = 1;
else if (m1->ms_allocator != -1 && !m1->ms_primary)
sort1 = 2;
if (m2->ms_allocator != -1 && m2->ms_primary)
sort2 = 1;
else if (m2->ms_allocator != -1 && !m2->ms_primary)
sort2 = 2;
/*
* Sort inactive metaslabs first, then primaries, then secondaries. When
* selecting a metaslab to allocate from, an allocator first tries its
* primary, then secondary active metaslab. If it doesn't have active
* metaslabs, or can't allocate from them, it searches for an inactive
* metaslab to activate. If it can't find a suitable one, it will steal
* a primary or secondary metaslab from another allocator.
*/
if (sort1 < sort2)
return (-1);
if (sort1 > sort2)
return (1);
int cmp = TREE_CMP(m2->ms_weight, m1->ms_weight);
if (likely(cmp))
return (cmp);
IMPLY(TREE_CMP(m1->ms_start, m2->ms_start) == 0, m1 == m2);
return (TREE_CMP(m1->ms_start, m2->ms_start));
}
/*
* ==========================================================================
* Metaslab groups
* ==========================================================================
*/
/*
* Update the allocatable flag and the metaslab group's capacity.
* The allocatable flag is set to true if the capacity is below
* the zfs_mg_noalloc_threshold or has a fragmentation value that is
* greater than zfs_mg_fragmentation_threshold. If a metaslab group
* transitions from allocatable to non-allocatable or vice versa then the
* metaslab group's class is updated to reflect the transition.
*/
static void
metaslab_group_alloc_update(metaslab_group_t *mg)
{
vdev_t *vd = mg->mg_vd;
metaslab_class_t *mc = mg->mg_class;
vdev_stat_t *vs = &vd->vdev_stat;
boolean_t was_allocatable;
boolean_t was_initialized;
ASSERT(vd == vd->vdev_top);
ASSERT3U(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_READER), ==,
SCL_ALLOC);
mutex_enter(&mg->mg_lock);
was_allocatable = mg->mg_allocatable;
was_initialized = mg->mg_initialized;
mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) /
(vs->vs_space + 1);
mutex_enter(&mc->mc_lock);
/*
* If the metaslab group was just added then it won't
* have any space until we finish syncing out this txg.
* At that point we will consider it initialized and available
* for allocations. We also don't consider non-activated
* metaslab groups (e.g. vdevs that are in the middle of being removed)
* to be initialized, because they can't be used for allocation.
*/
mg->mg_initialized = metaslab_group_initialized(mg);
if (!was_initialized && mg->mg_initialized) {
mc->mc_groups++;
} else if (was_initialized && !mg->mg_initialized) {
ASSERT3U(mc->mc_groups, >, 0);
mc->mc_groups--;
}
if (mg->mg_initialized)
mg->mg_no_free_space = B_FALSE;
/*
* A metaslab group is considered allocatable if it has plenty
* of free space or is not heavily fragmented. We only take
* fragmentation into account if the metaslab group has a valid
* fragmentation metric (i.e. a value between 0 and 100).
*/
mg->mg_allocatable = (mg->mg_activation_count > 0 &&
mg->mg_free_capacity > zfs_mg_noalloc_threshold &&
(mg->mg_fragmentation == ZFS_FRAG_INVALID ||
mg->mg_fragmentation <= zfs_mg_fragmentation_threshold));
/*
* The mc_alloc_groups maintains a count of the number of
* groups in this metaslab class that are still above the
* zfs_mg_noalloc_threshold. This is used by the allocating
* threads to determine if they should avoid allocations to
* a given group. The allocator will avoid allocations to a group
* if that group has reached or is below the zfs_mg_noalloc_threshold
* and there are still other groups that are above the threshold.
* When a group transitions from allocatable to non-allocatable or
* vice versa we update the metaslab class to reflect that change.
* When the mc_alloc_groups value drops to 0 that means that all
* groups have reached the zfs_mg_noalloc_threshold making all groups
* eligible for allocations. This effectively means that all devices
* are balanced again.
*/
if (was_allocatable && !mg->mg_allocatable)
mc->mc_alloc_groups--;
else if (!was_allocatable && mg->mg_allocatable)
mc->mc_alloc_groups++;
mutex_exit(&mc->mc_lock);
mutex_exit(&mg->mg_lock);
}
int
metaslab_sort_by_flushed(const void *va, const void *vb)
{
const metaslab_t *a = va;
const metaslab_t *b = vb;
int cmp = TREE_CMP(a->ms_unflushed_txg, b->ms_unflushed_txg);
if (likely(cmp))
return (cmp);
uint64_t a_vdev_id = a->ms_group->mg_vd->vdev_id;
uint64_t b_vdev_id = b->ms_group->mg_vd->vdev_id;
cmp = TREE_CMP(a_vdev_id, b_vdev_id);
if (cmp)
return (cmp);
return (TREE_CMP(a->ms_id, b->ms_id));
}
metaslab_group_t *
metaslab_group_create(metaslab_class_t *mc, vdev_t *vd, int allocators)
{
metaslab_group_t *mg;
mg = kmem_zalloc(offsetof(metaslab_group_t,
mg_allocator[allocators]), KM_SLEEP);
mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&mg->mg_ms_disabled_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&mg->mg_ms_disabled_cv, NULL, CV_DEFAULT, NULL);
avl_create(&mg->mg_metaslab_tree, metaslab_compare,
sizeof (metaslab_t), offsetof(metaslab_t, ms_group_node));
mg->mg_vd = vd;
mg->mg_class = mc;
mg->mg_activation_count = 0;
mg->mg_initialized = B_FALSE;
mg->mg_no_free_space = B_TRUE;
mg->mg_allocators = allocators;
for (int i = 0; i < allocators; i++) {
metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
zfs_refcount_create_tracked(&mga->mga_alloc_queue_depth);
}
mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct,
maxclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT | TASKQ_DYNAMIC);
return (mg);
}
void
metaslab_group_destroy(metaslab_group_t *mg)
{
ASSERT(mg->mg_prev == NULL);
ASSERT(mg->mg_next == NULL);
/*
* We may have gone below zero with the activation count
* either because we never activated in the first place or
* because we're done, and possibly removing the vdev.
*/
ASSERT(mg->mg_activation_count <= 0);
taskq_destroy(mg->mg_taskq);
avl_destroy(&mg->mg_metaslab_tree);
mutex_destroy(&mg->mg_lock);
mutex_destroy(&mg->mg_ms_disabled_lock);
cv_destroy(&mg->mg_ms_disabled_cv);
for (int i = 0; i < mg->mg_allocators; i++) {
metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
zfs_refcount_destroy(&mga->mga_alloc_queue_depth);
}
kmem_free(mg, offsetof(metaslab_group_t,
mg_allocator[mg->mg_allocators]));
}
void
metaslab_group_activate(metaslab_group_t *mg)
{
metaslab_class_t *mc = mg->mg_class;
spa_t *spa = mc->mc_spa;
metaslab_group_t *mgprev, *mgnext;
ASSERT3U(spa_config_held(spa, SCL_ALLOC, RW_WRITER), !=, 0);
ASSERT(mg->mg_prev == NULL);
ASSERT(mg->mg_next == NULL);
ASSERT(mg->mg_activation_count <= 0);
if (++mg->mg_activation_count <= 0)
return;
mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children);
metaslab_group_alloc_update(mg);
if ((mgprev = mc->mc_allocator[0].mca_rotor) == NULL) {
mg->mg_prev = mg;
mg->mg_next = mg;
} else {
mgnext = mgprev->mg_next;
mg->mg_prev = mgprev;
mg->mg_next = mgnext;
mgprev->mg_next = mg;
mgnext->mg_prev = mg;
}
for (int i = 0; i < spa->spa_alloc_count; i++) {
mc->mc_allocator[i].mca_rotor = mg;
mg = mg->mg_next;
}
}
/*
* Passivate a metaslab group and remove it from the allocation rotor.
* Callers must hold both the SCL_ALLOC and SCL_ZIO lock prior to passivating
* a metaslab group. This function will momentarily drop spa_config_locks
* that are lower than the SCL_ALLOC lock (see comment below).
*/
void
metaslab_group_passivate(metaslab_group_t *mg)
{
metaslab_class_t *mc = mg->mg_class;
spa_t *spa = mc->mc_spa;
metaslab_group_t *mgprev, *mgnext;
int locks = spa_config_held(spa, SCL_ALL, RW_WRITER);
ASSERT3U(spa_config_held(spa, SCL_ALLOC | SCL_ZIO, RW_WRITER), ==,
(SCL_ALLOC | SCL_ZIO));
if (--mg->mg_activation_count != 0) {
for (int i = 0; i < spa->spa_alloc_count; i++)
ASSERT(mc->mc_allocator[i].mca_rotor != mg);
ASSERT(mg->mg_prev == NULL);
ASSERT(mg->mg_next == NULL);
ASSERT(mg->mg_activation_count < 0);
return;
}
/*
* The spa_config_lock is an array of rwlocks, ordered as
* follows (from highest to lowest):
* SCL_CONFIG > SCL_STATE > SCL_L2ARC > SCL_ALLOC >
* SCL_ZIO > SCL_FREE > SCL_VDEV
* (For more information about the spa_config_lock see spa_misc.c)
* The higher the lock, the broader its coverage. When we passivate
* a metaslab group, we must hold both the SCL_ALLOC and the SCL_ZIO
* config locks. However, the metaslab group's taskq might be trying
* to preload metaslabs so we must drop the SCL_ZIO lock and any
* lower locks to allow the I/O to complete. At a minimum,
* we continue to hold the SCL_ALLOC lock, which prevents any future
* allocations from taking place and any changes to the vdev tree.
*/
spa_config_exit(spa, locks & ~(SCL_ZIO - 1), spa);
taskq_wait_outstanding(mg->mg_taskq, 0);
spa_config_enter(spa, locks & ~(SCL_ZIO - 1), spa, RW_WRITER);
metaslab_group_alloc_update(mg);
for (int i = 0; i < mg->mg_allocators; i++) {
metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
metaslab_t *msp = mga->mga_primary;
if (msp != NULL) {
mutex_enter(&msp->ms_lock);
metaslab_passivate(msp,
metaslab_weight_from_range_tree(msp));
mutex_exit(&msp->ms_lock);
}
msp = mga->mga_secondary;
if (msp != NULL) {
mutex_enter(&msp->ms_lock);
metaslab_passivate(msp,
metaslab_weight_from_range_tree(msp));
mutex_exit(&msp->ms_lock);
}
}
mgprev = mg->mg_prev;
mgnext = mg->mg_next;
if (mg == mgnext) {
mgnext = NULL;
} else {
mgprev->mg_next = mgnext;
mgnext->mg_prev = mgprev;
}
for (int i = 0; i < spa->spa_alloc_count; i++) {
if (mc->mc_allocator[i].mca_rotor == mg)
mc->mc_allocator[i].mca_rotor = mgnext;
}
mg->mg_prev = NULL;
mg->mg_next = NULL;
}
boolean_t
metaslab_group_initialized(metaslab_group_t *mg)
{
vdev_t *vd = mg->mg_vd;
vdev_stat_t *vs = &vd->vdev_stat;
return (vs->vs_space != 0 && mg->mg_activation_count > 0);
}
uint64_t
metaslab_group_get_space(metaslab_group_t *mg)
{
/*
* Note that the number of nodes in mg_metaslab_tree may be one less
* than vdev_ms_count, due to the embedded log metaslab.
*/
mutex_enter(&mg->mg_lock);
uint64_t ms_count = avl_numnodes(&mg->mg_metaslab_tree);
mutex_exit(&mg->mg_lock);
return ((1ULL << mg->mg_vd->vdev_ms_shift) * ms_count);
}
void
metaslab_group_histogram_verify(metaslab_group_t *mg)
{
uint64_t *mg_hist;
avl_tree_t *t = &mg->mg_metaslab_tree;
uint64_t ashift = mg->mg_vd->vdev_ashift;
if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
return;
mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
KM_SLEEP);
ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=,
SPACE_MAP_HISTOGRAM_SIZE + ashift);
mutex_enter(&mg->mg_lock);
for (metaslab_t *msp = avl_first(t);
msp != NULL; msp = AVL_NEXT(t, msp)) {
VERIFY3P(msp->ms_group, ==, mg);
/* skip if not active */
if (msp->ms_sm == NULL)
continue;
for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
mg_hist[i + ashift] +=
msp->ms_sm->sm_phys->smp_histogram[i];
}
}
for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++)
VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]);
mutex_exit(&mg->mg_lock);
kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
}
static void
metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp)
{
metaslab_class_t *mc = mg->mg_class;
uint64_t ashift = mg->mg_vd->vdev_ashift;
ASSERT(MUTEX_HELD(&msp->ms_lock));
if (msp->ms_sm == NULL)
return;
mutex_enter(&mg->mg_lock);
mutex_enter(&mc->mc_lock);
for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
IMPLY(mg == mg->mg_vd->vdev_log_mg,
mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
mg->mg_histogram[i + ashift] +=
msp->ms_sm->sm_phys->smp_histogram[i];
mc->mc_histogram[i + ashift] +=
msp->ms_sm->sm_phys->smp_histogram[i];
}
mutex_exit(&mc->mc_lock);
mutex_exit(&mg->mg_lock);
}
void
metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp)
{
metaslab_class_t *mc = mg->mg_class;
uint64_t ashift = mg->mg_vd->vdev_ashift;
ASSERT(MUTEX_HELD(&msp->ms_lock));
if (msp->ms_sm == NULL)
return;
mutex_enter(&mg->mg_lock);
mutex_enter(&mc->mc_lock);
for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
ASSERT3U(mg->mg_histogram[i + ashift], >=,
msp->ms_sm->sm_phys->smp_histogram[i]);
ASSERT3U(mc->mc_histogram[i + ashift], >=,
msp->ms_sm->sm_phys->smp_histogram[i]);
IMPLY(mg == mg->mg_vd->vdev_log_mg,
mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
mg->mg_histogram[i + ashift] -=
msp->ms_sm->sm_phys->smp_histogram[i];
mc->mc_histogram[i + ashift] -=
msp->ms_sm->sm_phys->smp_histogram[i];
}
mutex_exit(&mc->mc_lock);
mutex_exit(&mg->mg_lock);
}
static void
metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
{
ASSERT(msp->ms_group == NULL);
mutex_enter(&mg->mg_lock);
msp->ms_group = mg;
msp->ms_weight = 0;
avl_add(&mg->mg_metaslab_tree, msp);
mutex_exit(&mg->mg_lock);
mutex_enter(&msp->ms_lock);
metaslab_group_histogram_add(mg, msp);
mutex_exit(&msp->ms_lock);
}
static void
metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
{
mutex_enter(&msp->ms_lock);
metaslab_group_histogram_remove(mg, msp);
mutex_exit(&msp->ms_lock);
mutex_enter(&mg->mg_lock);
ASSERT(msp->ms_group == mg);
avl_remove(&mg->mg_metaslab_tree, msp);
metaslab_class_t *mc = msp->ms_group->mg_class;
multilist_sublist_t *mls =
multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
if (multilist_link_active(&msp->ms_class_txg_node))
multilist_sublist_remove(mls, msp);
multilist_sublist_unlock(mls);
msp->ms_group = NULL;
mutex_exit(&mg->mg_lock);
}
static void
metaslab_group_sort_impl(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT(MUTEX_HELD(&mg->mg_lock));
ASSERT(msp->ms_group == mg);
avl_remove(&mg->mg_metaslab_tree, msp);
msp->ms_weight = weight;
avl_add(&mg->mg_metaslab_tree, msp);
}
static void
metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
{
/*
* Although in principle the weight can be any value, in
* practice we do not use values in the range [1, 511].
*/
ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0);
ASSERT(MUTEX_HELD(&msp->ms_lock));
mutex_enter(&mg->mg_lock);
metaslab_group_sort_impl(mg, msp, weight);
mutex_exit(&mg->mg_lock);
}
/*
* Calculate the fragmentation for a given metaslab group. We can use
* a simple average here since all metaslabs within the group must have
* the same size. The return value will be a value between 0 and 100
* (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this
* group have a fragmentation metric.
*/
uint64_t
metaslab_group_fragmentation(metaslab_group_t *mg)
{
vdev_t *vd = mg->mg_vd;
uint64_t fragmentation = 0;
uint64_t valid_ms = 0;
for (int m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *msp = vd->vdev_ms[m];
if (msp->ms_fragmentation == ZFS_FRAG_INVALID)
continue;
if (msp->ms_group != mg)
continue;
valid_ms++;
fragmentation += msp->ms_fragmentation;
}
if (valid_ms <= mg->mg_vd->vdev_ms_count / 2)
return (ZFS_FRAG_INVALID);
fragmentation /= valid_ms;
ASSERT3U(fragmentation, <=, 100);
return (fragmentation);
}
/*
* Determine if a given metaslab group should skip allocations. A metaslab
* group should avoid allocations if its free capacity is less than the
* zfs_mg_noalloc_threshold or its fragmentation metric is greater than
* zfs_mg_fragmentation_threshold and there is at least one metaslab group
* that can still handle allocations. If the allocation throttle is enabled
* then we skip allocations to devices that have reached their maximum
* allocation queue depth unless the selected metaslab group is the only
* eligible group remaining.
*/
static boolean_t
metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor,
uint64_t psize, int allocator, int d)
{
spa_t *spa = mg->mg_vd->vdev_spa;
metaslab_class_t *mc = mg->mg_class;
/*
* We can only consider skipping this metaslab group if it's
* in the normal metaslab class and there are other metaslab
* groups to select from. Otherwise, we always consider it eligible
* for allocations.
*/
if ((mc != spa_normal_class(spa) &&
mc != spa_special_class(spa) &&
mc != spa_dedup_class(spa)) ||
mc->mc_groups <= 1)
return (B_TRUE);
/*
* If the metaslab group's mg_allocatable flag is set (see comments
* in metaslab_group_alloc_update() for more information) and
* the allocation throttle is disabled then allow allocations to this
* device. However, if the allocation throttle is enabled then
* check if we have reached our allocation limit (mga_alloc_queue_depth)
* to determine if we should allow allocations to this metaslab group.
* If all metaslab groups are no longer considered allocatable
* (mc_alloc_groups == 0) or we're trying to allocate the smallest
* gang block size then we allow allocations on this metaslab group
* regardless of the mg_allocatable or throttle settings.
*/
if (mg->mg_allocatable) {
metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
int64_t qdepth;
uint64_t qmax = mga->mga_cur_max_alloc_queue_depth;
if (!mc->mc_alloc_throttle_enabled)
return (B_TRUE);
/*
* If this metaslab group does not have any free space, then
* there is no point in looking further.
*/
if (mg->mg_no_free_space)
return (B_FALSE);
/*
* Relax allocation throttling for ditto blocks. Due to
* random imbalances in allocation it tends to push copies
* to one vdev, that looks a bit better at the moment.
*/
qmax = qmax * (4 + d) / 4;
qdepth = zfs_refcount_count(&mga->mga_alloc_queue_depth);
/*
* If this metaslab group is below its qmax or it's
* the only allocatable metasable group, then attempt
* to allocate from it.
*/
if (qdepth < qmax || mc->mc_alloc_groups == 1)
return (B_TRUE);
ASSERT3U(mc->mc_alloc_groups, >, 1);
/*
* Since this metaslab group is at or over its qmax, we
* need to determine if there are metaslab groups after this
* one that might be able to handle this allocation. This is
* racy since we can't hold the locks for all metaslab
* groups at the same time when we make this check.
*/
for (metaslab_group_t *mgp = mg->mg_next;
mgp != rotor; mgp = mgp->mg_next) {
metaslab_group_allocator_t *mgap =
&mgp->mg_allocator[allocator];
qmax = mgap->mga_cur_max_alloc_queue_depth;
qmax = qmax * (4 + d) / 4;
qdepth =
zfs_refcount_count(&mgap->mga_alloc_queue_depth);
/*
* If there is another metaslab group that
* might be able to handle the allocation, then
* we return false so that we skip this group.
*/
if (qdepth < qmax && !mgp->mg_no_free_space)
return (B_FALSE);
}
/*
* We didn't find another group to handle the allocation
* so we can't skip this metaslab group even though
* we are at or over our qmax.
*/
return (B_TRUE);
} else if (mc->mc_alloc_groups == 0 || psize == SPA_MINBLOCKSIZE) {
return (B_TRUE);
}
return (B_FALSE);
}
/*
* ==========================================================================
* Range tree callbacks
* ==========================================================================
*/
/*
* Comparison function for the private size-ordered tree using 32-bit
* ranges. Tree is sorted by size, larger sizes at the end of the tree.
*/
static int
metaslab_rangesize32_compare(const void *x1, const void *x2)
{
const range_seg32_t *r1 = x1;
const range_seg32_t *r2 = x2;
uint64_t rs_size1 = r1->rs_end - r1->rs_start;
uint64_t rs_size2 = r2->rs_end - r2->rs_start;
int cmp = TREE_CMP(rs_size1, rs_size2);
if (likely(cmp))
return (cmp);
return (TREE_CMP(r1->rs_start, r2->rs_start));
}
/*
* Comparison function for the private size-ordered tree using 64-bit
* ranges. Tree is sorted by size, larger sizes at the end of the tree.
*/
static int
metaslab_rangesize64_compare(const void *x1, const void *x2)
{
const range_seg64_t *r1 = x1;
const range_seg64_t *r2 = x2;
uint64_t rs_size1 = r1->rs_end - r1->rs_start;
uint64_t rs_size2 = r2->rs_end - r2->rs_start;
int cmp = TREE_CMP(rs_size1, rs_size2);
if (likely(cmp))
return (cmp);
return (TREE_CMP(r1->rs_start, r2->rs_start));
}
typedef struct metaslab_rt_arg {
zfs_btree_t *mra_bt;
uint32_t mra_floor_shift;
} metaslab_rt_arg_t;
struct mssa_arg {
range_tree_t *rt;
metaslab_rt_arg_t *mra;
};
static void
metaslab_size_sorted_add(void *arg, uint64_t start, uint64_t size)
{
struct mssa_arg *mssap = arg;
range_tree_t *rt = mssap->rt;
metaslab_rt_arg_t *mrap = mssap->mra;
range_seg_max_t seg = {0};
rs_set_start(&seg, rt, start);
rs_set_end(&seg, rt, start + size);
metaslab_rt_add(rt, &seg, mrap);
}
static void
metaslab_size_tree_full_load(range_tree_t *rt)
{
metaslab_rt_arg_t *mrap = rt->rt_arg;
METASLABSTAT_BUMP(metaslabstat_reload_tree);
ASSERT0(zfs_btree_numnodes(mrap->mra_bt));
mrap->mra_floor_shift = 0;
struct mssa_arg arg = {0};
arg.rt = rt;
arg.mra = mrap;
range_tree_walk(rt, metaslab_size_sorted_add, &arg);
}
/*
* Create any block allocator specific components. The current allocators
* rely on using both a size-ordered range_tree_t and an array of uint64_t's.
*/
/* ARGSUSED */
static void
metaslab_rt_create(range_tree_t *rt, void *arg)
{
metaslab_rt_arg_t *mrap = arg;
zfs_btree_t *size_tree = mrap->mra_bt;
size_t size;
int (*compare) (const void *, const void *);
switch (rt->rt_type) {
case RANGE_SEG32:
size = sizeof (range_seg32_t);
compare = metaslab_rangesize32_compare;
break;
case RANGE_SEG64:
size = sizeof (range_seg64_t);
compare = metaslab_rangesize64_compare;
break;
default:
panic("Invalid range seg type %d", rt->rt_type);
}
zfs_btree_create(size_tree, compare, size);
mrap->mra_floor_shift = metaslab_by_size_min_shift;
}
/* ARGSUSED */
static void
metaslab_rt_destroy(range_tree_t *rt, void *arg)
{
metaslab_rt_arg_t *mrap = arg;
zfs_btree_t *size_tree = mrap->mra_bt;
zfs_btree_destroy(size_tree);
kmem_free(mrap, sizeof (*mrap));
}
/* ARGSUSED */
static void
metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg)
{
metaslab_rt_arg_t *mrap = arg;
zfs_btree_t *size_tree = mrap->mra_bt;
if (rs_get_end(rs, rt) - rs_get_start(rs, rt) <
(1 << mrap->mra_floor_shift))
return;
zfs_btree_add(size_tree, rs);
}
/* ARGSUSED */
static void
metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg)
{
metaslab_rt_arg_t *mrap = arg;
zfs_btree_t *size_tree = mrap->mra_bt;
if (rs_get_end(rs, rt) - rs_get_start(rs, rt) < (1 <<
mrap->mra_floor_shift))
return;
zfs_btree_remove(size_tree, rs);
}
/* ARGSUSED */
static void
metaslab_rt_vacate(range_tree_t *rt, void *arg)
{
metaslab_rt_arg_t *mrap = arg;
zfs_btree_t *size_tree = mrap->mra_bt;
zfs_btree_clear(size_tree);
zfs_btree_destroy(size_tree);
metaslab_rt_create(rt, arg);
}
static range_tree_ops_t metaslab_rt_ops = {
.rtop_create = metaslab_rt_create,
.rtop_destroy = metaslab_rt_destroy,
.rtop_add = metaslab_rt_add,
.rtop_remove = metaslab_rt_remove,
.rtop_vacate = metaslab_rt_vacate
};
/*
* ==========================================================================
* Common allocator routines
* ==========================================================================
*/
/*
* Return the maximum contiguous segment within the metaslab.
*/
uint64_t
metaslab_largest_allocatable(metaslab_t *msp)
{
zfs_btree_t *t = &msp->ms_allocatable_by_size;
range_seg_t *rs;
if (t == NULL)
return (0);
if (zfs_btree_numnodes(t) == 0)
metaslab_size_tree_full_load(msp->ms_allocatable);
rs = zfs_btree_last(t, NULL);
if (rs == NULL)
return (0);
return (rs_get_end(rs, msp->ms_allocatable) - rs_get_start(rs,
msp->ms_allocatable));
}
/*
* Return the maximum contiguous segment within the unflushed frees of this
* metaslab.
*/
static uint64_t
metaslab_largest_unflushed_free(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
if (msp->ms_unflushed_frees == NULL)
return (0);
if (zfs_btree_numnodes(&msp->ms_unflushed_frees_by_size) == 0)
metaslab_size_tree_full_load(msp->ms_unflushed_frees);
range_seg_t *rs = zfs_btree_last(&msp->ms_unflushed_frees_by_size,
NULL);
if (rs == NULL)
return (0);
/*
* When a range is freed from the metaslab, that range is added to
* both the unflushed frees and the deferred frees. While the block
* will eventually be usable, if the metaslab were loaded the range
* would not be added to the ms_allocatable tree until TXG_DEFER_SIZE
* txgs had passed. As a result, when attempting to estimate an upper
* bound for the largest currently-usable free segment in the
* metaslab, we need to not consider any ranges currently in the defer
* trees. This algorithm approximates the largest available chunk in
* the largest range in the unflushed_frees tree by taking the first
* chunk. While this may be a poor estimate, it should only remain so
* briefly and should eventually self-correct as frees are no longer
* deferred. Similar logic applies to the ms_freed tree. See
* metaslab_load() for more details.
*
* There are two primary sources of inaccuracy in this estimate. Both
* are tolerated for performance reasons. The first source is that we
* only check the largest segment for overlaps. Smaller segments may
* have more favorable overlaps with the other trees, resulting in
* larger usable chunks. Second, we only look at the first chunk in
* the largest segment; there may be other usable chunks in the
* largest segment, but we ignore them.
*/
uint64_t rstart = rs_get_start(rs, msp->ms_unflushed_frees);
uint64_t rsize = rs_get_end(rs, msp->ms_unflushed_frees) - rstart;
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
uint64_t start = 0;
uint64_t size = 0;
boolean_t found = range_tree_find_in(msp->ms_defer[t], rstart,
rsize, &start, &size);
if (found) {
if (rstart == start)
return (0);
rsize = start - rstart;
}
}
uint64_t start = 0;
uint64_t size = 0;
boolean_t found = range_tree_find_in(msp->ms_freed, rstart,
rsize, &start, &size);
if (found)
rsize = start - rstart;
return (rsize);
}
static range_seg_t *
metaslab_block_find(zfs_btree_t *t, range_tree_t *rt, uint64_t start,
uint64_t size, zfs_btree_index_t *where)
{
range_seg_t *rs;
range_seg_max_t rsearch;
rs_set_start(&rsearch, rt, start);
rs_set_end(&rsearch, rt, start + size);
rs = zfs_btree_find(t, &rsearch, where);
if (rs == NULL) {
rs = zfs_btree_next(t, where, where);
}
return (rs);
}
#if defined(WITH_DF_BLOCK_ALLOCATOR) || \
defined(WITH_CF_BLOCK_ALLOCATOR)
/*
* This is a helper function that can be used by the allocator to find a
* suitable block to allocate. This will search the specified B-tree looking
* for a block that matches the specified criteria.
*/
static uint64_t
metaslab_block_picker(range_tree_t *rt, uint64_t *cursor, uint64_t size,
uint64_t max_search)
{
if (*cursor == 0)
*cursor = rt->rt_start;
zfs_btree_t *bt = &rt->rt_root;
zfs_btree_index_t where;
range_seg_t *rs = metaslab_block_find(bt, rt, *cursor, size, &where);
uint64_t first_found;
int count_searched = 0;
if (rs != NULL)
first_found = rs_get_start(rs, rt);
while (rs != NULL && (rs_get_start(rs, rt) - first_found <=
max_search || count_searched < metaslab_min_search_count)) {
uint64_t offset = rs_get_start(rs, rt);
if (offset + size <= rs_get_end(rs, rt)) {
*cursor = offset + size;
return (offset);
}
rs = zfs_btree_next(bt, &where, &where);
count_searched++;
}
*cursor = 0;
return (-1ULL);
}
#endif /* WITH_DF/CF_BLOCK_ALLOCATOR */
#if defined(WITH_DF_BLOCK_ALLOCATOR)
/*
* ==========================================================================
* Dynamic Fit (df) block allocator
*
* Search for a free chunk of at least this size, starting from the last
* offset (for this alignment of block) looking for up to
* metaslab_df_max_search bytes (16MB). If a large enough free chunk is not
* found within 16MB, then return a free chunk of exactly the requested size (or
* larger).
*
* If it seems like searching from the last offset will be unproductive, skip
* that and just return a free chunk of exactly the requested size (or larger).
* This is based on metaslab_df_alloc_threshold and metaslab_df_free_pct. This
* mechanism is probably not very useful and may be removed in the future.
*
* The behavior when not searching can be changed to return the largest free
* chunk, instead of a free chunk of exactly the requested size, by setting
* metaslab_df_use_largest_segment.
* ==========================================================================
*/
static uint64_t
metaslab_df_alloc(metaslab_t *msp, uint64_t size)
{
/*
* Find the largest power of 2 block size that evenly divides the
* requested size. This is used to try to allocate blocks with similar
* alignment from the same area of the metaslab (i.e. same cursor
* bucket) but it does not guarantee that other allocations sizes
* may exist in the same region.
*/
uint64_t align = size & -size;
uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
range_tree_t *rt = msp->ms_allocatable;
int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
uint64_t offset;
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* If we're running low on space, find a segment based on size,
* rather than iterating based on offset.
*/
if (metaslab_largest_allocatable(msp) < metaslab_df_alloc_threshold ||
free_pct < metaslab_df_free_pct) {
offset = -1;
} else {
offset = metaslab_block_picker(rt,
cursor, size, metaslab_df_max_search);
}
if (offset == -1) {
range_seg_t *rs;
if (zfs_btree_numnodes(&msp->ms_allocatable_by_size) == 0)
metaslab_size_tree_full_load(msp->ms_allocatable);
if (metaslab_df_use_largest_segment) {
/* use largest free segment */
rs = zfs_btree_last(&msp->ms_allocatable_by_size, NULL);
} else {
zfs_btree_index_t where;
/* use segment of this size, or next largest */
rs = metaslab_block_find(&msp->ms_allocatable_by_size,
rt, msp->ms_start, size, &where);
}
if (rs != NULL && rs_get_start(rs, rt) + size <= rs_get_end(rs,
rt)) {
offset = rs_get_start(rs, rt);
*cursor = offset + size;
}
}
return (offset);
}
static metaslab_ops_t metaslab_df_ops = {
metaslab_df_alloc
};
metaslab_ops_t *zfs_metaslab_ops = &metaslab_df_ops;
#endif /* WITH_DF_BLOCK_ALLOCATOR */
#if defined(WITH_CF_BLOCK_ALLOCATOR)
/*
* ==========================================================================
* Cursor fit block allocator -
* Select the largest region in the metaslab, set the cursor to the beginning
* of the range and the cursor_end to the end of the range. As allocations
* are made advance the cursor. Continue allocating from the cursor until
* the range is exhausted and then find a new range.
* ==========================================================================
*/
static uint64_t
metaslab_cf_alloc(metaslab_t *msp, uint64_t size)
{
range_tree_t *rt = msp->ms_allocatable;
zfs_btree_t *t = &msp->ms_allocatable_by_size;
uint64_t *cursor = &msp->ms_lbas[0];
uint64_t *cursor_end = &msp->ms_lbas[1];
uint64_t offset = 0;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT3U(*cursor_end, >=, *cursor);
if ((*cursor + size) > *cursor_end) {
range_seg_t *rs;
if (zfs_btree_numnodes(t) == 0)
metaslab_size_tree_full_load(msp->ms_allocatable);
rs = zfs_btree_last(t, NULL);
if (rs == NULL || (rs_get_end(rs, rt) - rs_get_start(rs, rt)) <
size)
return (-1ULL);
*cursor = rs_get_start(rs, rt);
*cursor_end = rs_get_end(rs, rt);
}
offset = *cursor;
*cursor += size;
return (offset);
}
static metaslab_ops_t metaslab_cf_ops = {
metaslab_cf_alloc
};
metaslab_ops_t *zfs_metaslab_ops = &metaslab_cf_ops;
#endif /* WITH_CF_BLOCK_ALLOCATOR */
#if defined(WITH_NDF_BLOCK_ALLOCATOR)
/*
* ==========================================================================
* New dynamic fit allocator -
* Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
* contiguous blocks. If no region is found then just use the largest segment
* that remains.
* ==========================================================================
*/
/*
* Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
* to request from the allocator.
*/
uint64_t metaslab_ndf_clump_shift = 4;
static uint64_t
metaslab_ndf_alloc(metaslab_t *msp, uint64_t size)
{
zfs_btree_t *t = &msp->ms_allocatable->rt_root;
range_tree_t *rt = msp->ms_allocatable;
zfs_btree_index_t where;
range_seg_t *rs;
range_seg_max_t rsearch;
uint64_t hbit = highbit64(size);
uint64_t *cursor = &msp->ms_lbas[hbit - 1];
uint64_t max_size = metaslab_largest_allocatable(msp);
ASSERT(MUTEX_HELD(&msp->ms_lock));
if (max_size < size)
return (-1ULL);
rs_set_start(&rsearch, rt, *cursor);
rs_set_end(&rsearch, rt, *cursor + size);
rs = zfs_btree_find(t, &rsearch, &where);
if (rs == NULL || (rs_get_end(rs, rt) - rs_get_start(rs, rt)) < size) {
t = &msp->ms_allocatable_by_size;
rs_set_start(&rsearch, rt, 0);
rs_set_end(&rsearch, rt, MIN(max_size, 1ULL << (hbit +
metaslab_ndf_clump_shift)));
rs = zfs_btree_find(t, &rsearch, &where);
if (rs == NULL)
rs = zfs_btree_next(t, &where, &where);
ASSERT(rs != NULL);
}
if ((rs_get_end(rs, rt) - rs_get_start(rs, rt)) >= size) {
*cursor = rs_get_start(rs, rt) + size;
return (rs_get_start(rs, rt));
}
return (-1ULL);
}
static metaslab_ops_t metaslab_ndf_ops = {
metaslab_ndf_alloc
};
metaslab_ops_t *zfs_metaslab_ops = &metaslab_ndf_ops;
#endif /* WITH_NDF_BLOCK_ALLOCATOR */
/*
* ==========================================================================
* Metaslabs
* ==========================================================================
*/
/*
* Wait for any in-progress metaslab loads to complete.
*/
static void
metaslab_load_wait(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
while (msp->ms_loading) {
ASSERT(!msp->ms_loaded);
cv_wait(&msp->ms_load_cv, &msp->ms_lock);
}
}
/*
* Wait for any in-progress flushing to complete.
*/
static void
metaslab_flush_wait(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
while (msp->ms_flushing)
cv_wait(&msp->ms_flush_cv, &msp->ms_lock);
}
static unsigned int
metaslab_idx_func(multilist_t *ml, void *arg)
{
metaslab_t *msp = arg;
/*
* ms_id values are allocated sequentially, so full 64bit
* division would be a waste of time, so limit it to 32 bits.
*/
return ((unsigned int)msp->ms_id % multilist_get_num_sublists(ml));
}
uint64_t
metaslab_allocated_space(metaslab_t *msp)
{
return (msp->ms_allocated_space);
}
/*
* Verify that the space accounting on disk matches the in-core range_trees.
*/
static void
metaslab_verify_space(metaslab_t *msp, uint64_t txg)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
uint64_t allocating = 0;
uint64_t sm_free_space, msp_free_space;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT(!msp->ms_condensing);
if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
return;
/*
* We can only verify the metaslab space when we're called
* from syncing context with a loaded metaslab that has an
* allocated space map. Calling this in non-syncing context
* does not provide a consistent view of the metaslab since
* we're performing allocations in the future.
*/
if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL ||
!msp->ms_loaded)
return;
/*
* Even though the smp_alloc field can get negative,
* when it comes to a metaslab's space map, that should
* never be the case.
*/
ASSERT3S(space_map_allocated(msp->ms_sm), >=, 0);
ASSERT3U(space_map_allocated(msp->ms_sm), >=,
range_tree_space(msp->ms_unflushed_frees));
ASSERT3U(metaslab_allocated_space(msp), ==,
space_map_allocated(msp->ms_sm) +
range_tree_space(msp->ms_unflushed_allocs) -
range_tree_space(msp->ms_unflushed_frees));
sm_free_space = msp->ms_size - metaslab_allocated_space(msp);
/*
* Account for future allocations since we would have
* already deducted that space from the ms_allocatable.
*/
for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
allocating +=
range_tree_space(msp->ms_allocating[(txg + t) & TXG_MASK]);
}
ASSERT3U(allocating + msp->ms_allocated_this_txg, ==,
msp->ms_allocating_total);
ASSERT3U(msp->ms_deferspace, ==,
range_tree_space(msp->ms_defer[0]) +
range_tree_space(msp->ms_defer[1]));
msp_free_space = range_tree_space(msp->ms_allocatable) + allocating +
msp->ms_deferspace + range_tree_space(msp->ms_freed);
VERIFY3U(sm_free_space, ==, msp_free_space);
}
static void
metaslab_aux_histograms_clear(metaslab_t *msp)
{
/*
* Auxiliary histograms are only cleared when resetting them,
* which can only happen while the metaslab is loaded.
*/
ASSERT(msp->ms_loaded);
bzero(msp->ms_synchist, sizeof (msp->ms_synchist));
for (int t = 0; t < TXG_DEFER_SIZE; t++)
bzero(msp->ms_deferhist[t], sizeof (msp->ms_deferhist[t]));
}
static void
metaslab_aux_histogram_add(uint64_t *histogram, uint64_t shift,
range_tree_t *rt)
{
/*
* This is modeled after space_map_histogram_add(), so refer to that
* function for implementation details. We want this to work like
* the space map histogram, and not the range tree histogram, as we
* are essentially constructing a delta that will be later subtracted
* from the space map histogram.
*/
int idx = 0;
for (int i = shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
ASSERT3U(i, >=, idx + shift);
histogram[idx] += rt->rt_histogram[i] << (i - idx - shift);
if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) {
ASSERT3U(idx + shift, ==, i);
idx++;
ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE);
}
}
}
/*
* Called at every sync pass that the metaslab gets synced.
*
* The reason is that we want our auxiliary histograms to be updated
* wherever the metaslab's space map histogram is updated. This way
* we stay consistent on which parts of the metaslab space map's
* histogram are currently not available for allocations (e.g because
* they are in the defer, freed, and freeing trees).
*/
static void
metaslab_aux_histograms_update(metaslab_t *msp)
{
space_map_t *sm = msp->ms_sm;
ASSERT(sm != NULL);
/*
* This is similar to the metaslab's space map histogram updates
* that take place in metaslab_sync(). The only difference is that
* we only care about segments that haven't made it into the
* ms_allocatable tree yet.
*/
if (msp->ms_loaded) {
metaslab_aux_histograms_clear(msp);
metaslab_aux_histogram_add(msp->ms_synchist,
sm->sm_shift, msp->ms_freed);
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
metaslab_aux_histogram_add(msp->ms_deferhist[t],
sm->sm_shift, msp->ms_defer[t]);
}
}
metaslab_aux_histogram_add(msp->ms_synchist,
sm->sm_shift, msp->ms_freeing);
}
/*
* Called every time we are done syncing (writing to) the metaslab,
* i.e. at the end of each sync pass.
* [see the comment in metaslab_impl.h for ms_synchist, ms_deferhist]
*/
static void
metaslab_aux_histograms_update_done(metaslab_t *msp, boolean_t defer_allowed)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
space_map_t *sm = msp->ms_sm;
if (sm == NULL) {
/*
* We came here from metaslab_init() when creating/opening a
* pool, looking at a metaslab that hasn't had any allocations
* yet.
*/
return;
}
/*
* This is similar to the actions that we take for the ms_freed
* and ms_defer trees in metaslab_sync_done().
*/
uint64_t hist_index = spa_syncing_txg(spa) % TXG_DEFER_SIZE;
if (defer_allowed) {
bcopy(msp->ms_synchist, msp->ms_deferhist[hist_index],
sizeof (msp->ms_synchist));
} else {
bzero(msp->ms_deferhist[hist_index],
sizeof (msp->ms_deferhist[hist_index]));
}
bzero(msp->ms_synchist, sizeof (msp->ms_synchist));
}
/*
* Ensure that the metaslab's weight and fragmentation are consistent
* with the contents of the histogram (either the range tree's histogram
* or the space map's depending whether the metaslab is loaded).
*/
static void
metaslab_verify_weight_and_frag(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
return;
/*
* We can end up here from vdev_remove_complete(), in which case we
* cannot do these assertions because we hold spa config locks and
* thus we are not allowed to read from the DMU.
*
* We check if the metaslab group has been removed and if that's
* the case we return immediately as that would mean that we are
* here from the aforementioned code path.
*/
if (msp->ms_group == NULL)
return;
/*
* Devices being removed always return a weight of 0 and leave
* fragmentation and ms_max_size as is - there is nothing for
* us to verify here.
*/
vdev_t *vd = msp->ms_group->mg_vd;
if (vd->vdev_removing)
return;
/*
* If the metaslab is dirty it probably means that we've done
* some allocations or frees that have changed our histograms
* and thus the weight.
*/
for (int t = 0; t < TXG_SIZE; t++) {
if (txg_list_member(&vd->vdev_ms_list, msp, t))
return;
}
/*
* This verification checks that our in-memory state is consistent
* with what's on disk. If the pool is read-only then there aren't
* any changes and we just have the initially-loaded state.
*/
if (!spa_writeable(msp->ms_group->mg_vd->vdev_spa))
return;
/* some extra verification for in-core tree if you can */
if (msp->ms_loaded) {
range_tree_stat_verify(msp->ms_allocatable);
VERIFY(space_map_histogram_verify(msp->ms_sm,
msp->ms_allocatable));
}
uint64_t weight = msp->ms_weight;
uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
boolean_t space_based = WEIGHT_IS_SPACEBASED(msp->ms_weight);
uint64_t frag = msp->ms_fragmentation;
uint64_t max_segsize = msp->ms_max_size;
msp->ms_weight = 0;
msp->ms_fragmentation = 0;
/*
* This function is used for verification purposes and thus should
* not introduce any side-effects/mutations on the system's state.
*
* Regardless of whether metaslab_weight() thinks this metaslab
* should be active or not, we want to ensure that the actual weight
* (and therefore the value of ms_weight) would be the same if it
* was to be recalculated at this point.
*
* In addition we set the nodirty flag so metaslab_weight() does
* not dirty the metaslab for future TXGs (e.g. when trying to
* force condensing to upgrade the metaslab spacemaps).
*/
msp->ms_weight = metaslab_weight(msp, B_TRUE) | was_active;
VERIFY3U(max_segsize, ==, msp->ms_max_size);
/*
* If the weight type changed then there is no point in doing
* verification. Revert fields to their original values.
*/
if ((space_based && !WEIGHT_IS_SPACEBASED(msp->ms_weight)) ||
(!space_based && WEIGHT_IS_SPACEBASED(msp->ms_weight))) {
msp->ms_fragmentation = frag;
msp->ms_weight = weight;
return;
}
VERIFY3U(msp->ms_fragmentation, ==, frag);
VERIFY3U(msp->ms_weight, ==, weight);
}
/*
* If we're over the zfs_metaslab_mem_limit, select the loaded metaslab from
* this class that was used longest ago, and attempt to unload it. We don't
* want to spend too much time in this loop to prevent performance
* degradation, and we expect that most of the time this operation will
* succeed. Between that and the normal unloading processing during txg sync,
* we expect this to keep the metaslab memory usage under control.
*/
static void
metaslab_potentially_evict(metaslab_class_t *mc)
{
#ifdef _KERNEL
uint64_t allmem = arc_all_memory();
uint64_t inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache);
uint64_t size = spl_kmem_cache_entry_size(zfs_btree_leaf_cache);
int tries = 0;
for (; allmem * zfs_metaslab_mem_limit / 100 < inuse * size &&
tries < multilist_get_num_sublists(&mc->mc_metaslab_txg_list) * 2;
tries++) {
unsigned int idx = multilist_get_random_index(
&mc->mc_metaslab_txg_list);
multilist_sublist_t *mls =
multilist_sublist_lock(&mc->mc_metaslab_txg_list, idx);
metaslab_t *msp = multilist_sublist_head(mls);
multilist_sublist_unlock(mls);
while (msp != NULL && allmem * zfs_metaslab_mem_limit / 100 <
inuse * size) {
VERIFY3P(mls, ==, multilist_sublist_lock(
&mc->mc_metaslab_txg_list, idx));
ASSERT3U(idx, ==,
metaslab_idx_func(&mc->mc_metaslab_txg_list, msp));
if (!multilist_link_active(&msp->ms_class_txg_node)) {
multilist_sublist_unlock(mls);
break;
}
metaslab_t *next_msp = multilist_sublist_next(mls, msp);
multilist_sublist_unlock(mls);
/*
* If the metaslab is currently loading there are two
* cases. If it's the metaslab we're evicting, we
* can't continue on or we'll panic when we attempt to
* recursively lock the mutex. If it's another
* metaslab that's loading, it can be safely skipped,
* since we know it's very new and therefore not a
* good eviction candidate. We check later once the
* lock is held that the metaslab is fully loaded
* before actually unloading it.
*/
if (msp->ms_loading) {
msp = next_msp;
inuse =
spl_kmem_cache_inuse(zfs_btree_leaf_cache);
continue;
}
/*
* We can't unload metaslabs with no spacemap because
* they're not ready to be unloaded yet. We can't
* unload metaslabs with outstanding allocations
* because doing so could cause the metaslab's weight
* to decrease while it's unloaded, which violates an
* invariant that we use to prevent unnecessary
* loading. We also don't unload metaslabs that are
* currently active because they are high-weight
* metaslabs that are likely to be used in the near
* future.
*/
mutex_enter(&msp->ms_lock);
if (msp->ms_allocator == -1 && msp->ms_sm != NULL &&
msp->ms_allocating_total == 0) {
metaslab_unload(msp);
}
mutex_exit(&msp->ms_lock);
msp = next_msp;
inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache);
}
}
#endif
}
static int
metaslab_load_impl(metaslab_t *msp)
{
int error = 0;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT(msp->ms_loading);
ASSERT(!msp->ms_condensing);
/*
* We temporarily drop the lock to unblock other operations while we
* are reading the space map. Therefore, metaslab_sync() and
* metaslab_sync_done() can run at the same time as we do.
*
* If we are using the log space maps, metaslab_sync() can't write to
* the metaslab's space map while we are loading as we only write to
* it when we are flushing the metaslab, and that can't happen while
* we are loading it.
*
* If we are not using log space maps though, metaslab_sync() can
* append to the space map while we are loading. Therefore we load
* only entries that existed when we started the load. Additionally,
* metaslab_sync_done() has to wait for the load to complete because
* there are potential races like metaslab_load() loading parts of the
* space map that are currently being appended by metaslab_sync(). If
* we didn't, the ms_allocatable would have entries that
* metaslab_sync_done() would try to re-add later.
*
* That's why before dropping the lock we remember the synced length
* of the metaslab and read up to that point of the space map,
* ignoring entries appended by metaslab_sync() that happen after we
* drop the lock.
*/
uint64_t length = msp->ms_synced_length;
mutex_exit(&msp->ms_lock);
hrtime_t load_start = gethrtime();
metaslab_rt_arg_t *mrap;
if (msp->ms_allocatable->rt_arg == NULL) {
mrap = kmem_zalloc(sizeof (*mrap), KM_SLEEP);
} else {
mrap = msp->ms_allocatable->rt_arg;
msp->ms_allocatable->rt_ops = NULL;
msp->ms_allocatable->rt_arg = NULL;
}
mrap->mra_bt = &msp->ms_allocatable_by_size;
mrap->mra_floor_shift = metaslab_by_size_min_shift;
if (msp->ms_sm != NULL) {
error = space_map_load_length(msp->ms_sm, msp->ms_allocatable,
SM_FREE, length);
/* Now, populate the size-sorted tree. */
metaslab_rt_create(msp->ms_allocatable, mrap);
msp->ms_allocatable->rt_ops = &metaslab_rt_ops;
msp->ms_allocatable->rt_arg = mrap;
struct mssa_arg arg = {0};
arg.rt = msp->ms_allocatable;
arg.mra = mrap;
range_tree_walk(msp->ms_allocatable, metaslab_size_sorted_add,
&arg);
} else {
/*
* Add the size-sorted tree first, since we don't need to load
* the metaslab from the spacemap.
*/
metaslab_rt_create(msp->ms_allocatable, mrap);
msp->ms_allocatable->rt_ops = &metaslab_rt_ops;
msp->ms_allocatable->rt_arg = mrap;
/*
* The space map has not been allocated yet, so treat
* all the space in the metaslab as free and add it to the
* ms_allocatable tree.
*/
range_tree_add(msp->ms_allocatable,
msp->ms_start, msp->ms_size);
if (msp->ms_new) {
/*
* If the ms_sm doesn't exist, this means that this
* metaslab hasn't gone through metaslab_sync() and
* thus has never been dirtied. So we shouldn't
* expect any unflushed allocs or frees from previous
* TXGs.
*/
ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
}
}
/*
* We need to grab the ms_sync_lock to prevent metaslab_sync() from
* changing the ms_sm (or log_sm) and the metaslab's range trees
* while we are about to use them and populate the ms_allocatable.
* The ms_lock is insufficient for this because metaslab_sync() doesn't
* hold the ms_lock while writing the ms_checkpointing tree to disk.
*/
mutex_enter(&msp->ms_sync_lock);
mutex_enter(&msp->ms_lock);
ASSERT(!msp->ms_condensing);
ASSERT(!msp->ms_flushing);
if (error != 0) {
mutex_exit(&msp->ms_sync_lock);
return (error);
}
ASSERT3P(msp->ms_group, !=, NULL);
msp->ms_loaded = B_TRUE;
/*
* Apply all the unflushed changes to ms_allocatable right
* away so any manipulations we do below have a clear view
* of what is allocated and what is free.
*/
range_tree_walk(msp->ms_unflushed_allocs,
range_tree_remove, msp->ms_allocatable);
range_tree_walk(msp->ms_unflushed_frees,
range_tree_add, msp->ms_allocatable);
ASSERT3P(msp->ms_group, !=, NULL);
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
if (spa_syncing_log_sm(spa) != NULL) {
ASSERT(spa_feature_is_enabled(spa,
SPA_FEATURE_LOG_SPACEMAP));
/*
* If we use a log space map we add all the segments
* that are in ms_unflushed_frees so they are available
* for allocation.
*
* ms_allocatable needs to contain all free segments
* that are ready for allocations (thus not segments
* from ms_freeing, ms_freed, and the ms_defer trees).
* But if we grab the lock in this code path at a sync
* pass later that 1, then it also contains the
* segments of ms_freed (they were added to it earlier
* in this path through ms_unflushed_frees). So we
* need to remove all the segments that exist in
* ms_freed from ms_allocatable as they will be added
* later in metaslab_sync_done().
*
* When there's no log space map, the ms_allocatable
* correctly doesn't contain any segments that exist
* in ms_freed [see ms_synced_length].
*/
range_tree_walk(msp->ms_freed,
range_tree_remove, msp->ms_allocatable);
}
/*
* If we are not using the log space map, ms_allocatable
* contains the segments that exist in the ms_defer trees
* [see ms_synced_length]. Thus we need to remove them
* from ms_allocatable as they will be added again in
* metaslab_sync_done().
*
* If we are using the log space map, ms_allocatable still
* contains the segments that exist in the ms_defer trees.
* Not because it read them through the ms_sm though. But
* because these segments are part of ms_unflushed_frees
* whose segments we add to ms_allocatable earlier in this
* code path.
*/
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
range_tree_walk(msp->ms_defer[t],
range_tree_remove, msp->ms_allocatable);
}
/*
* Call metaslab_recalculate_weight_and_sort() now that the
* metaslab is loaded so we get the metaslab's real weight.
*
* Unless this metaslab was created with older software and
* has not yet been converted to use segment-based weight, we
* expect the new weight to be better or equal to the weight
* that the metaslab had while it was not loaded. This is
* because the old weight does not take into account the
* consolidation of adjacent segments between TXGs. [see
* comment for ms_synchist and ms_deferhist[] for more info]
*/
uint64_t weight = msp->ms_weight;
uint64_t max_size = msp->ms_max_size;
metaslab_recalculate_weight_and_sort(msp);
if (!WEIGHT_IS_SPACEBASED(weight))
ASSERT3U(weight, <=, msp->ms_weight);
msp->ms_max_size = metaslab_largest_allocatable(msp);
ASSERT3U(max_size, <=, msp->ms_max_size);
hrtime_t load_end = gethrtime();
msp->ms_load_time = load_end;
zfs_dbgmsg("metaslab_load: txg %llu, spa %s, vdev_id %llu, "
"ms_id %llu, smp_length %llu, "
"unflushed_allocs %llu, unflushed_frees %llu, "
"freed %llu, defer %llu + %llu, unloaded time %llu ms, "
"loading_time %lld ms, ms_max_size %llu, "
"max size error %lld, "
"old_weight %llx, new_weight %llx",
(u_longlong_t)spa_syncing_txg(spa), spa_name(spa),
(u_longlong_t)msp->ms_group->mg_vd->vdev_id,
(u_longlong_t)msp->ms_id,
(u_longlong_t)space_map_length(msp->ms_sm),
(u_longlong_t)range_tree_space(msp->ms_unflushed_allocs),
(u_longlong_t)range_tree_space(msp->ms_unflushed_frees),
(u_longlong_t)range_tree_space(msp->ms_freed),
(u_longlong_t)range_tree_space(msp->ms_defer[0]),
(u_longlong_t)range_tree_space(msp->ms_defer[1]),
(longlong_t)((load_start - msp->ms_unload_time) / 1000000),
(longlong_t)((load_end - load_start) / 1000000),
(u_longlong_t)msp->ms_max_size,
(u_longlong_t)msp->ms_max_size - max_size,
(u_longlong_t)weight, (u_longlong_t)msp->ms_weight);
metaslab_verify_space(msp, spa_syncing_txg(spa));
mutex_exit(&msp->ms_sync_lock);
return (0);
}
int
metaslab_load(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* There may be another thread loading the same metaslab, if that's
* the case just wait until the other thread is done and return.
*/
metaslab_load_wait(msp);
if (msp->ms_loaded)
return (0);
VERIFY(!msp->ms_loading);
ASSERT(!msp->ms_condensing);
/*
* We set the loading flag BEFORE potentially dropping the lock to
* wait for an ongoing flush (see ms_flushing below). This way other
* threads know that there is already a thread that is loading this
* metaslab.
*/
msp->ms_loading = B_TRUE;
/*
* Wait for any in-progress flushing to finish as we drop the ms_lock
* both here (during space_map_load()) and in metaslab_flush() (when
* we flush our changes to the ms_sm).
*/
if (msp->ms_flushing)
metaslab_flush_wait(msp);
/*
* In the possibility that we were waiting for the metaslab to be
* flushed (where we temporarily dropped the ms_lock), ensure that
* no one else loaded the metaslab somehow.
*/
ASSERT(!msp->ms_loaded);
/*
* If we're loading a metaslab in the normal class, consider evicting
* another one to keep our memory usage under the limit defined by the
* zfs_metaslab_mem_limit tunable.
*/
if (spa_normal_class(msp->ms_group->mg_class->mc_spa) ==
msp->ms_group->mg_class) {
metaslab_potentially_evict(msp->ms_group->mg_class);
}
int error = metaslab_load_impl(msp);
ASSERT(MUTEX_HELD(&msp->ms_lock));
msp->ms_loading = B_FALSE;
cv_broadcast(&msp->ms_load_cv);
return (error);
}
void
metaslab_unload(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* This can happen if a metaslab is selected for eviction (in
* metaslab_potentially_evict) and then unloaded during spa_sync (via
* metaslab_class_evict_old).
*/
if (!msp->ms_loaded)
return;
range_tree_vacate(msp->ms_allocatable, NULL, NULL);
msp->ms_loaded = B_FALSE;
msp->ms_unload_time = gethrtime();
msp->ms_activation_weight = 0;
msp->ms_weight &= ~METASLAB_ACTIVE_MASK;
if (msp->ms_group != NULL) {
metaslab_class_t *mc = msp->ms_group->mg_class;
multilist_sublist_t *mls =
multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
if (multilist_link_active(&msp->ms_class_txg_node))
multilist_sublist_remove(mls, msp);
multilist_sublist_unlock(mls);
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
zfs_dbgmsg("metaslab_unload: txg %llu, spa %s, vdev_id %llu, "
"ms_id %llu, weight %llx, "
"selected txg %llu (%llu ms ago), alloc_txg %llu, "
"loaded %llu ms ago, max_size %llu",
(u_longlong_t)spa_syncing_txg(spa), spa_name(spa),
(u_longlong_t)msp->ms_group->mg_vd->vdev_id,
(u_longlong_t)msp->ms_id,
(u_longlong_t)msp->ms_weight,
(u_longlong_t)msp->ms_selected_txg,
(u_longlong_t)(msp->ms_unload_time -
msp->ms_selected_time) / 1000 / 1000,
(u_longlong_t)msp->ms_alloc_txg,
(u_longlong_t)(msp->ms_unload_time -
msp->ms_load_time) / 1000 / 1000,
(u_longlong_t)msp->ms_max_size);
}
/*
* We explicitly recalculate the metaslab's weight based on its space
* map (as it is now not loaded). We want unload metaslabs to always
* have their weights calculated from the space map histograms, while
* loaded ones have it calculated from their in-core range tree
* [see metaslab_load()]. This way, the weight reflects the information
* available in-core, whether it is loaded or not.
*
* If ms_group == NULL means that we came here from metaslab_fini(),
* at which point it doesn't make sense for us to do the recalculation
* and the sorting.
*/
if (msp->ms_group != NULL)
metaslab_recalculate_weight_and_sort(msp);
}
/*
* We want to optimize the memory use of the per-metaslab range
* trees. To do this, we store the segments in the range trees in
* units of sectors, zero-indexing from the start of the metaslab. If
* the vdev_ms_shift - the vdev_ashift is less than 32, we can store
* the ranges using two uint32_ts, rather than two uint64_ts.
*/
range_seg_type_t
metaslab_calculate_range_tree_type(vdev_t *vdev, metaslab_t *msp,
uint64_t *start, uint64_t *shift)
{
if (vdev->vdev_ms_shift - vdev->vdev_ashift < 32 &&
!zfs_metaslab_force_large_segs) {
*shift = vdev->vdev_ashift;
*start = msp->ms_start;
return (RANGE_SEG32);
} else {
*shift = 0;
*start = 0;
return (RANGE_SEG64);
}
}
void
metaslab_set_selected_txg(metaslab_t *msp, uint64_t txg)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
metaslab_class_t *mc = msp->ms_group->mg_class;
multilist_sublist_t *mls =
multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
if (multilist_link_active(&msp->ms_class_txg_node))
multilist_sublist_remove(mls, msp);
msp->ms_selected_txg = txg;
msp->ms_selected_time = gethrtime();
multilist_sublist_insert_tail(mls, msp);
multilist_sublist_unlock(mls);
}
void
metaslab_space_update(vdev_t *vd, metaslab_class_t *mc, int64_t alloc_delta,
int64_t defer_delta, int64_t space_delta)
{
vdev_space_update(vd, alloc_delta, defer_delta, space_delta);
ASSERT3P(vd->vdev_spa->spa_root_vdev, ==, vd->vdev_parent);
ASSERT(vd->vdev_ms_count != 0);
metaslab_class_space_update(mc, alloc_delta, defer_delta, space_delta,
vdev_deflated_space(vd, space_delta));
}
int
metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object,
uint64_t txg, metaslab_t **msp)
{
vdev_t *vd = mg->mg_vd;
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa->spa_meta_objset;
metaslab_t *ms;
int error;
ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&ms->ms_sync_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL);
cv_init(&ms->ms_flush_cv, NULL, CV_DEFAULT, NULL);
multilist_link_init(&ms->ms_class_txg_node);
ms->ms_id = id;
ms->ms_start = id << vd->vdev_ms_shift;
ms->ms_size = 1ULL << vd->vdev_ms_shift;
ms->ms_allocator = -1;
ms->ms_new = B_TRUE;
vdev_ops_t *ops = vd->vdev_ops;
if (ops->vdev_op_metaslab_init != NULL)
ops->vdev_op_metaslab_init(vd, &ms->ms_start, &ms->ms_size);
/*
* We only open space map objects that already exist. All others
* will be opened when we finally allocate an object for it.
*
* Note:
* When called from vdev_expand(), we can't call into the DMU as
* we are holding the spa_config_lock as a writer and we would
* deadlock [see relevant comment in vdev_metaslab_init()]. in
* that case, the object parameter is zero though, so we won't
* call into the DMU.
*/
if (object != 0) {
error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start,
ms->ms_size, vd->vdev_ashift);
if (error != 0) {
kmem_free(ms, sizeof (metaslab_t));
return (error);
}
ASSERT(ms->ms_sm != NULL);
ms->ms_allocated_space = space_map_allocated(ms->ms_sm);
}
uint64_t shift, start;
range_seg_type_t type =
metaslab_calculate_range_tree_type(vd, ms, &start, &shift);
ms->ms_allocatable = range_tree_create(NULL, type, NULL, start, shift);
for (int t = 0; t < TXG_SIZE; t++) {
ms->ms_allocating[t] = range_tree_create(NULL, type,
NULL, start, shift);
}
ms->ms_freeing = range_tree_create(NULL, type, NULL, start, shift);
ms->ms_freed = range_tree_create(NULL, type, NULL, start, shift);
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
ms->ms_defer[t] = range_tree_create(NULL, type, NULL,
start, shift);
}
ms->ms_checkpointing =
range_tree_create(NULL, type, NULL, start, shift);
ms->ms_unflushed_allocs =
range_tree_create(NULL, type, NULL, start, shift);
metaslab_rt_arg_t *mrap = kmem_zalloc(sizeof (*mrap), KM_SLEEP);
mrap->mra_bt = &ms->ms_unflushed_frees_by_size;
mrap->mra_floor_shift = metaslab_by_size_min_shift;
ms->ms_unflushed_frees = range_tree_create(&metaslab_rt_ops,
type, mrap, start, shift);
ms->ms_trim = range_tree_create(NULL, type, NULL, start, shift);
metaslab_group_add(mg, ms);
metaslab_set_fragmentation(ms, B_FALSE);
/*
* If we're opening an existing pool (txg == 0) or creating
* a new one (txg == TXG_INITIAL), all space is available now.
* If we're adding space to an existing pool, the new space
* does not become available until after this txg has synced.
* The metaslab's weight will also be initialized when we sync
* out this txg. This ensures that we don't attempt to allocate
* from it before we have initialized it completely.
*/
if (txg <= TXG_INITIAL) {
metaslab_sync_done(ms, 0);
metaslab_space_update(vd, mg->mg_class,
metaslab_allocated_space(ms), 0, 0);
}
if (txg != 0) {
vdev_dirty(vd, 0, NULL, txg);
vdev_dirty(vd, VDD_METASLAB, ms, txg);
}
*msp = ms;
return (0);
}
static void
metaslab_fini_flush_data(metaslab_t *msp)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
if (metaslab_unflushed_txg(msp) == 0) {
ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL),
==, NULL);
return;
}
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
mutex_enter(&spa->spa_flushed_ms_lock);
avl_remove(&spa->spa_metaslabs_by_flushed, msp);
mutex_exit(&spa->spa_flushed_ms_lock);
spa_log_sm_decrement_mscount(spa, metaslab_unflushed_txg(msp));
spa_log_summary_decrement_mscount(spa, metaslab_unflushed_txg(msp));
}
uint64_t
metaslab_unflushed_changes_memused(metaslab_t *ms)
{
return ((range_tree_numsegs(ms->ms_unflushed_allocs) +
range_tree_numsegs(ms->ms_unflushed_frees)) *
ms->ms_unflushed_allocs->rt_root.bt_elem_size);
}
void
metaslab_fini(metaslab_t *msp)
{
metaslab_group_t *mg = msp->ms_group;
vdev_t *vd = mg->mg_vd;
spa_t *spa = vd->vdev_spa;
metaslab_fini_flush_data(msp);
metaslab_group_remove(mg, msp);
mutex_enter(&msp->ms_lock);
VERIFY(msp->ms_group == NULL);
/*
* If this metaslab hasn't been through metaslab_sync_done() yet its
* space hasn't been accounted for in its vdev and doesn't need to be
* subtracted.
*/
if (!msp->ms_new) {
metaslab_space_update(vd, mg->mg_class,
-metaslab_allocated_space(msp), 0, -msp->ms_size);
}
space_map_close(msp->ms_sm);
msp->ms_sm = NULL;
metaslab_unload(msp);
range_tree_destroy(msp->ms_allocatable);
range_tree_destroy(msp->ms_freeing);
range_tree_destroy(msp->ms_freed);
ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
metaslab_unflushed_changes_memused(msp));
spa->spa_unflushed_stats.sus_memused -=
metaslab_unflushed_changes_memused(msp);
range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
range_tree_destroy(msp->ms_unflushed_allocs);
range_tree_destroy(msp->ms_checkpointing);
range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
range_tree_destroy(msp->ms_unflushed_frees);
for (int t = 0; t < TXG_SIZE; t++) {
range_tree_destroy(msp->ms_allocating[t]);
}
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
range_tree_destroy(msp->ms_defer[t]);
}
ASSERT0(msp->ms_deferspace);
for (int t = 0; t < TXG_SIZE; t++)
ASSERT(!txg_list_member(&vd->vdev_ms_list, msp, t));
range_tree_vacate(msp->ms_trim, NULL, NULL);
range_tree_destroy(msp->ms_trim);
mutex_exit(&msp->ms_lock);
cv_destroy(&msp->ms_load_cv);
cv_destroy(&msp->ms_flush_cv);
mutex_destroy(&msp->ms_lock);
mutex_destroy(&msp->ms_sync_lock);
ASSERT3U(msp->ms_allocator, ==, -1);
kmem_free(msp, sizeof (metaslab_t));
}
#define FRAGMENTATION_TABLE_SIZE 17
/*
* This table defines a segment size based fragmentation metric that will
* allow each metaslab to derive its own fragmentation value. This is done
* by calculating the space in each bucket of the spacemap histogram and
* multiplying that by the fragmentation metric in this table. Doing
* this for all buckets and dividing it by the total amount of free
* space in this metaslab (i.e. the total free space in all buckets) gives
* us the fragmentation metric. This means that a high fragmentation metric
* equates to most of the free space being comprised of small segments.
* Conversely, if the metric is low, then most of the free space is in
* large segments. A 10% change in fragmentation equates to approximately
* double the number of segments.
*
* This table defines 0% fragmented space using 16MB segments. Testing has
* shown that segments that are greater than or equal to 16MB do not suffer
* from drastic performance problems. Using this value, we derive the rest
* of the table. Since the fragmentation value is never stored on disk, it
* is possible to change these calculations in the future.
*/
int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = {
100, /* 512B */
100, /* 1K */
98, /* 2K */
95, /* 4K */
90, /* 8K */
80, /* 16K */
70, /* 32K */
60, /* 64K */
50, /* 128K */
40, /* 256K */
30, /* 512K */
20, /* 1M */
15, /* 2M */
10, /* 4M */
5, /* 8M */
0 /* 16M */
};
/*
* Calculate the metaslab's fragmentation metric and set ms_fragmentation.
* Setting this value to ZFS_FRAG_INVALID means that the metaslab has not
* been upgraded and does not support this metric. Otherwise, the return
* value should be in the range [0, 100].
*/
static void
metaslab_set_fragmentation(metaslab_t *msp, boolean_t nodirty)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
uint64_t fragmentation = 0;
uint64_t total = 0;
boolean_t feature_enabled = spa_feature_is_enabled(spa,
SPA_FEATURE_SPACEMAP_HISTOGRAM);
if (!feature_enabled) {
msp->ms_fragmentation = ZFS_FRAG_INVALID;
return;
}
/*
* A null space map means that the entire metaslab is free
* and thus is not fragmented.
*/
if (msp->ms_sm == NULL) {
msp->ms_fragmentation = 0;
return;
}
/*
* If this metaslab's space map has not been upgraded, flag it
* so that we upgrade next time we encounter it.
*/
if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) {
uint64_t txg = spa_syncing_txg(spa);
vdev_t *vd = msp->ms_group->mg_vd;
/*
* If we've reached the final dirty txg, then we must
* be shutting down the pool. We don't want to dirty
* any data past this point so skip setting the condense
* flag. We can retry this action the next time the pool
* is imported. We also skip marking this metaslab for
* condensing if the caller has explicitly set nodirty.
*/
if (!nodirty &&
spa_writeable(spa) && txg < spa_final_dirty_txg(spa)) {
msp->ms_condense_wanted = B_TRUE;
vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
zfs_dbgmsg("txg %llu, requesting force condense: "
"ms_id %llu, vdev_id %llu", (u_longlong_t)txg,
(u_longlong_t)msp->ms_id,
(u_longlong_t)vd->vdev_id);
}
msp->ms_fragmentation = ZFS_FRAG_INVALID;
return;
}
for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
uint64_t space = 0;
uint8_t shift = msp->ms_sm->sm_shift;
int idx = MIN(shift - SPA_MINBLOCKSHIFT + i,
FRAGMENTATION_TABLE_SIZE - 1);
if (msp->ms_sm->sm_phys->smp_histogram[i] == 0)
continue;
space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift);
total += space;
ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE);
fragmentation += space * zfs_frag_table[idx];
}
if (total > 0)
fragmentation /= total;
ASSERT3U(fragmentation, <=, 100);
msp->ms_fragmentation = fragmentation;
}
/*
* Compute a weight -- a selection preference value -- for the given metaslab.
* This is based on the amount of free space, the level of fragmentation,
* the LBA range, and whether the metaslab is loaded.
*/
static uint64_t
metaslab_space_weight(metaslab_t *msp)
{
metaslab_group_t *mg = msp->ms_group;
vdev_t *vd = mg->mg_vd;
uint64_t weight, space;
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* The baseline weight is the metaslab's free space.
*/
space = msp->ms_size - metaslab_allocated_space(msp);
if (metaslab_fragmentation_factor_enabled &&
msp->ms_fragmentation != ZFS_FRAG_INVALID) {
/*
* Use the fragmentation information to inversely scale
* down the baseline weight. We need to ensure that we
* don't exclude this metaslab completely when it's 100%
* fragmented. To avoid this we reduce the fragmented value
* by 1.
*/
space = (space * (100 - (msp->ms_fragmentation - 1))) / 100;
/*
* If space < SPA_MINBLOCKSIZE, then we will not allocate from
* this metaslab again. The fragmentation metric may have
* decreased the space to something smaller than
* SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE
* so that we can consume any remaining space.
*/
if (space > 0 && space < SPA_MINBLOCKSIZE)
space = SPA_MINBLOCKSIZE;
}
weight = space;
/*
* Modern disks have uniform bit density and constant angular velocity.
* Therefore, the outer recording zones are faster (higher bandwidth)
* than the inner zones by the ratio of outer to inner track diameter,
* which is typically around 2:1. We account for this by assigning
* higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
* In effect, this means that we'll select the metaslab with the most
* free bandwidth rather than simply the one with the most free space.
*/
if (!vd->vdev_nonrot && metaslab_lba_weighting_enabled) {
weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count;
ASSERT(weight >= space && weight <= 2 * space);
}
/*
* If this metaslab is one we're actively using, adjust its
* weight to make it preferable to any inactive metaslab so
* we'll polish it off. If the fragmentation on this metaslab
* has exceed our threshold, then don't mark it active.
*/
if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID &&
msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) {
weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
}
WEIGHT_SET_SPACEBASED(weight);
return (weight);
}
/*
* Return the weight of the specified metaslab, according to the segment-based
* weighting algorithm. The metaslab must be loaded. This function can
* be called within a sync pass since it relies only on the metaslab's
* range tree which is always accurate when the metaslab is loaded.
*/
static uint64_t
metaslab_weight_from_range_tree(metaslab_t *msp)
{
uint64_t weight = 0;
uint32_t segments = 0;
ASSERT(msp->ms_loaded);
for (int i = RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT;
i--) {
uint8_t shift = msp->ms_group->mg_vd->vdev_ashift;
int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
segments <<= 1;
segments += msp->ms_allocatable->rt_histogram[i];
/*
* The range tree provides more precision than the space map
* and must be downgraded so that all values fit within the
* space map's histogram. This allows us to compare loaded
* vs. unloaded metaslabs to determine which metaslab is
* considered "best".
*/
if (i > max_idx)
continue;
if (segments != 0) {
WEIGHT_SET_COUNT(weight, segments);
WEIGHT_SET_INDEX(weight, i);
WEIGHT_SET_ACTIVE(weight, 0);
break;
}
}
return (weight);
}
/*
* Calculate the weight based on the on-disk histogram. Should be applied
* only to unloaded metaslabs (i.e no incoming allocations) in-order to
* give results consistent with the on-disk state
*/
static uint64_t
metaslab_weight_from_spacemap(metaslab_t *msp)
{
space_map_t *sm = msp->ms_sm;
ASSERT(!msp->ms_loaded);
ASSERT(sm != NULL);
ASSERT3U(space_map_object(sm), !=, 0);
ASSERT3U(sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
/*
* Create a joint histogram from all the segments that have made
* it to the metaslab's space map histogram, that are not yet
* available for allocation because they are still in the freeing
* pipeline (e.g. freeing, freed, and defer trees). Then subtract
* these segments from the space map's histogram to get a more
* accurate weight.
*/
uint64_t deferspace_histogram[SPACE_MAP_HISTOGRAM_SIZE] = {0};
for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++)
deferspace_histogram[i] += msp->ms_synchist[i];
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
deferspace_histogram[i] += msp->ms_deferhist[t][i];
}
}
uint64_t weight = 0;
for (int i = SPACE_MAP_HISTOGRAM_SIZE - 1; i >= 0; i--) {
ASSERT3U(sm->sm_phys->smp_histogram[i], >=,
deferspace_histogram[i]);
uint64_t count =
sm->sm_phys->smp_histogram[i] - deferspace_histogram[i];
if (count != 0) {
WEIGHT_SET_COUNT(weight, count);
WEIGHT_SET_INDEX(weight, i + sm->sm_shift);
WEIGHT_SET_ACTIVE(weight, 0);
break;
}
}
return (weight);
}
/*
* Compute a segment-based weight for the specified metaslab. The weight
* is determined by highest bucket in the histogram. The information
* for the highest bucket is encoded into the weight value.
*/
static uint64_t
metaslab_segment_weight(metaslab_t *msp)
{
metaslab_group_t *mg = msp->ms_group;
uint64_t weight = 0;
uint8_t shift = mg->mg_vd->vdev_ashift;
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* The metaslab is completely free.
*/
if (metaslab_allocated_space(msp) == 0) {
int idx = highbit64(msp->ms_size) - 1;
int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
if (idx < max_idx) {
WEIGHT_SET_COUNT(weight, 1ULL);
WEIGHT_SET_INDEX(weight, idx);
} else {
WEIGHT_SET_COUNT(weight, 1ULL << (idx - max_idx));
WEIGHT_SET_INDEX(weight, max_idx);
}
WEIGHT_SET_ACTIVE(weight, 0);
ASSERT(!WEIGHT_IS_SPACEBASED(weight));
return (weight);
}
ASSERT3U(msp->ms_sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
/*
* If the metaslab is fully allocated then just make the weight 0.
*/
if (metaslab_allocated_space(msp) == msp->ms_size)
return (0);
/*
* If the metaslab is already loaded, then use the range tree to
* determine the weight. Otherwise, we rely on the space map information
* to generate the weight.
*/
if (msp->ms_loaded) {
weight = metaslab_weight_from_range_tree(msp);
} else {
weight = metaslab_weight_from_spacemap(msp);
}
/*
* If the metaslab was active the last time we calculated its weight
* then keep it active. We want to consume the entire region that
* is associated with this weight.
*/
if (msp->ms_activation_weight != 0 && weight != 0)
WEIGHT_SET_ACTIVE(weight, WEIGHT_GET_ACTIVE(msp->ms_weight));
return (weight);
}
/*
* Determine if we should attempt to allocate from this metaslab. If the
* metaslab is loaded, then we can determine if the desired allocation
* can be satisfied by looking at the size of the maximum free segment
* on that metaslab. Otherwise, we make our decision based on the metaslab's
* weight. For segment-based weighting we can determine the maximum
* allocation based on the index encoded in its value. For space-based
* weights we rely on the entire weight (excluding the weight-type bit).
*/
static boolean_t
metaslab_should_allocate(metaslab_t *msp, uint64_t asize, boolean_t try_hard)
{
/*
* If the metaslab is loaded, ms_max_size is definitive and we can use
* the fast check. If it's not, the ms_max_size is a lower bound (once
* set), and we should use the fast check as long as we're not in
* try_hard and it's been less than zfs_metaslab_max_size_cache_sec
* seconds since the metaslab was unloaded.
*/
if (msp->ms_loaded ||
(msp->ms_max_size != 0 && !try_hard && gethrtime() <
msp->ms_unload_time + SEC2NSEC(zfs_metaslab_max_size_cache_sec)))
return (msp->ms_max_size >= asize);
boolean_t should_allocate;
if (!WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
/*
* The metaslab segment weight indicates segments in the
* range [2^i, 2^(i+1)), where i is the index in the weight.
* Since the asize might be in the middle of the range, we
* should attempt the allocation if asize < 2^(i+1).
*/
should_allocate = (asize <
1ULL << (WEIGHT_GET_INDEX(msp->ms_weight) + 1));
} else {
should_allocate = (asize <=
(msp->ms_weight & ~METASLAB_WEIGHT_TYPE));
}
return (should_allocate);
}
static uint64_t
metaslab_weight(metaslab_t *msp, boolean_t nodirty)
{
vdev_t *vd = msp->ms_group->mg_vd;
spa_t *spa = vd->vdev_spa;
uint64_t weight;
ASSERT(MUTEX_HELD(&msp->ms_lock));
metaslab_set_fragmentation(msp, nodirty);
/*
* Update the maximum size. If the metaslab is loaded, this will
* ensure that we get an accurate maximum size if newly freed space
* has been added back into the free tree. If the metaslab is
* unloaded, we check if there's a larger free segment in the
* unflushed frees. This is a lower bound on the largest allocatable
* segment size. Coalescing of adjacent entries may reveal larger
* allocatable segments, but we aren't aware of those until loading
* the space map into a range tree.
*/
if (msp->ms_loaded) {
msp->ms_max_size = metaslab_largest_allocatable(msp);
} else {
msp->ms_max_size = MAX(msp->ms_max_size,
metaslab_largest_unflushed_free(msp));
}
/*
* Segment-based weighting requires space map histogram support.
*/
if (zfs_metaslab_segment_weight_enabled &&
spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
(msp->ms_sm == NULL || msp->ms_sm->sm_dbuf->db_size ==
sizeof (space_map_phys_t))) {
weight = metaslab_segment_weight(msp);
} else {
weight = metaslab_space_weight(msp);
}
return (weight);
}
void
metaslab_recalculate_weight_and_sort(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
/* note: we preserve the mask (e.g. indication of primary, etc..) */
uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
metaslab_group_sort(msp->ms_group, msp,
metaslab_weight(msp, B_FALSE) | was_active);
}
static int
metaslab_activate_allocator(metaslab_group_t *mg, metaslab_t *msp,
int allocator, uint64_t activation_weight)
{
metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* If we're activating for the claim code, we don't want to actually
* set the metaslab up for a specific allocator.
*/
if (activation_weight == METASLAB_WEIGHT_CLAIM) {
ASSERT0(msp->ms_activation_weight);
msp->ms_activation_weight = msp->ms_weight;
metaslab_group_sort(mg, msp, msp->ms_weight |
activation_weight);
return (0);
}
metaslab_t **mspp = (activation_weight == METASLAB_WEIGHT_PRIMARY ?
&mga->mga_primary : &mga->mga_secondary);
mutex_enter(&mg->mg_lock);
if (*mspp != NULL) {
mutex_exit(&mg->mg_lock);
return (EEXIST);
}
*mspp = msp;
ASSERT3S(msp->ms_allocator, ==, -1);
msp->ms_allocator = allocator;
msp->ms_primary = (activation_weight == METASLAB_WEIGHT_PRIMARY);
ASSERT0(msp->ms_activation_weight);
msp->ms_activation_weight = msp->ms_weight;
metaslab_group_sort_impl(mg, msp,
msp->ms_weight | activation_weight);
mutex_exit(&mg->mg_lock);
return (0);
}
static int
metaslab_activate(metaslab_t *msp, int allocator, uint64_t activation_weight)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* The current metaslab is already activated for us so there
* is nothing to do. Already activated though, doesn't mean
* that this metaslab is activated for our allocator nor our
* requested activation weight. The metaslab could have started
* as an active one for our allocator but changed allocators
* while we were waiting to grab its ms_lock or we stole it
* [see find_valid_metaslab()]. This means that there is a
* possibility of passivating a metaslab of another allocator
* or from a different activation mask, from this thread.
*/
if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) {
ASSERT(msp->ms_loaded);
return (0);
}
int error = metaslab_load(msp);
if (error != 0) {
metaslab_group_sort(msp->ms_group, msp, 0);
return (error);
}
/*
* When entering metaslab_load() we may have dropped the
* ms_lock because we were loading this metaslab, or we
* were waiting for another thread to load it for us. In
* that scenario, we recheck the weight of the metaslab
* to see if it was activated by another thread.
*
* If the metaslab was activated for another allocator or
* it was activated with a different activation weight (e.g.
* we wanted to make it a primary but it was activated as
* secondary) we return error (EBUSY).
*
* If the metaslab was activated for the same allocator
* and requested activation mask, skip activating it.
*/
if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) {
if (msp->ms_allocator != allocator)
return (EBUSY);
if ((msp->ms_weight & activation_weight) == 0)
return (SET_ERROR(EBUSY));
EQUIV((activation_weight == METASLAB_WEIGHT_PRIMARY),
msp->ms_primary);
return (0);
}
/*
* If the metaslab has literally 0 space, it will have weight 0. In
* that case, don't bother activating it. This can happen if the
* metaslab had space during find_valid_metaslab, but another thread
* loaded it and used all that space while we were waiting to grab the
* lock.
*/
if (msp->ms_weight == 0) {
ASSERT0(range_tree_space(msp->ms_allocatable));
return (SET_ERROR(ENOSPC));
}
if ((error = metaslab_activate_allocator(msp->ms_group, msp,
allocator, activation_weight)) != 0) {
return (error);
}
ASSERT(msp->ms_loaded);
ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
return (0);
}
static void
metaslab_passivate_allocator(metaslab_group_t *mg, metaslab_t *msp,
uint64_t weight)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT(msp->ms_loaded);
if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) {
metaslab_group_sort(mg, msp, weight);
return;
}
mutex_enter(&mg->mg_lock);
ASSERT3P(msp->ms_group, ==, mg);
ASSERT3S(0, <=, msp->ms_allocator);
ASSERT3U(msp->ms_allocator, <, mg->mg_allocators);
metaslab_group_allocator_t *mga = &mg->mg_allocator[msp->ms_allocator];
if (msp->ms_primary) {
ASSERT3P(mga->mga_primary, ==, msp);
ASSERT(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
mga->mga_primary = NULL;
} else {
ASSERT3P(mga->mga_secondary, ==, msp);
ASSERT(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
mga->mga_secondary = NULL;
}
msp->ms_allocator = -1;
metaslab_group_sort_impl(mg, msp, weight);
mutex_exit(&mg->mg_lock);
}
static void
metaslab_passivate(metaslab_t *msp, uint64_t weight)
{
uint64_t size __maybe_unused = weight & ~METASLAB_WEIGHT_TYPE;
/*
* If size < SPA_MINBLOCKSIZE, then we will not allocate from
* this metaslab again. In that case, it had better be empty,
* or we would be leaving space on the table.
*/
ASSERT(!WEIGHT_IS_SPACEBASED(msp->ms_weight) ||
size >= SPA_MINBLOCKSIZE ||
range_tree_space(msp->ms_allocatable) == 0);
ASSERT0(weight & METASLAB_ACTIVE_MASK);
ASSERT(msp->ms_activation_weight != 0);
msp->ms_activation_weight = 0;
metaslab_passivate_allocator(msp->ms_group, msp, weight);
ASSERT0(msp->ms_weight & METASLAB_ACTIVE_MASK);
}
/*
* Segment-based metaslabs are activated once and remain active until
* we either fail an allocation attempt (similar to space-based metaslabs)
* or have exhausted the free space in zfs_metaslab_switch_threshold
* buckets since the metaslab was activated. This function checks to see
* if we've exhausted the zfs_metaslab_switch_threshold buckets in the
* metaslab and passivates it proactively. This will allow us to select a
* metaslab with a larger contiguous region, if any, remaining within this
* metaslab group. If we're in sync pass > 1, then we continue using this
* metaslab so that we don't dirty more block and cause more sync passes.
*/
static void
metaslab_segment_may_passivate(metaslab_t *msp)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
if (WEIGHT_IS_SPACEBASED(msp->ms_weight) || spa_sync_pass(spa) > 1)
return;
/*
* Since we are in the middle of a sync pass, the most accurate
* information that is accessible to us is the in-core range tree
* histogram; calculate the new weight based on that information.
*/
uint64_t weight = metaslab_weight_from_range_tree(msp);
int activation_idx = WEIGHT_GET_INDEX(msp->ms_activation_weight);
int current_idx = WEIGHT_GET_INDEX(weight);
if (current_idx <= activation_idx - zfs_metaslab_switch_threshold)
metaslab_passivate(msp, weight);
}
static void
metaslab_preload(void *arg)
{
metaslab_t *msp = arg;
metaslab_class_t *mc = msp->ms_group->mg_class;
spa_t *spa = mc->mc_spa;
fstrans_cookie_t cookie = spl_fstrans_mark();
ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock));
mutex_enter(&msp->ms_lock);
(void) metaslab_load(msp);
metaslab_set_selected_txg(msp, spa_syncing_txg(spa));
mutex_exit(&msp->ms_lock);
spl_fstrans_unmark(cookie);
}
static void
metaslab_group_preload(metaslab_group_t *mg)
{
spa_t *spa = mg->mg_vd->vdev_spa;
metaslab_t *msp;
avl_tree_t *t = &mg->mg_metaslab_tree;
int m = 0;
if (spa_shutting_down(spa) || !metaslab_preload_enabled) {
taskq_wait_outstanding(mg->mg_taskq, 0);
return;
}
mutex_enter(&mg->mg_lock);
/*
* Load the next potential metaslabs
*/
for (msp = avl_first(t); msp != NULL; msp = AVL_NEXT(t, msp)) {
ASSERT3P(msp->ms_group, ==, mg);
/*
* We preload only the maximum number of metaslabs specified
* by metaslab_preload_limit. If a metaslab is being forced
* to condense then we preload it too. This will ensure
* that force condensing happens in the next txg.
*/
if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) {
continue;
}
VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload,
msp, TQ_SLEEP) != TASKQID_INVALID);
}
mutex_exit(&mg->mg_lock);
}
/*
* Determine if the space map's on-disk footprint is past our tolerance for
* inefficiency. We would like to use the following criteria to make our
* decision:
*
* 1. Do not condense if the size of the space map object would dramatically
* increase as a result of writing out the free space range tree.
*
* 2. Condense if the on on-disk space map representation is at least
* zfs_condense_pct/100 times the size of the optimal representation
* (i.e. zfs_condense_pct = 110 and in-core = 1MB, optimal = 1.1MB).
*
* 3. Do not condense if the on-disk size of the space map does not actually
* decrease.
*
* Unfortunately, we cannot compute the on-disk size of the space map in this
* context because we cannot accurately compute the effects of compression, etc.
* Instead, we apply the heuristic described in the block comment for
* zfs_metaslab_condense_block_threshold - we only condense if the space used
* is greater than a threshold number of blocks.
*/
static boolean_t
metaslab_should_condense(metaslab_t *msp)
{
space_map_t *sm = msp->ms_sm;
vdev_t *vd = msp->ms_group->mg_vd;
uint64_t vdev_blocksize = 1 << vd->vdev_ashift;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT(msp->ms_loaded);
ASSERT(sm != NULL);
ASSERT3U(spa_sync_pass(vd->vdev_spa), ==, 1);
/*
* We always condense metaslabs that are empty and metaslabs for
* which a condense request has been made.
*/
if (range_tree_numsegs(msp->ms_allocatable) == 0 ||
msp->ms_condense_wanted)
return (B_TRUE);
uint64_t record_size = MAX(sm->sm_blksz, vdev_blocksize);
uint64_t object_size = space_map_length(sm);
uint64_t optimal_size = space_map_estimate_optimal_size(sm,
msp->ms_allocatable, SM_NO_VDEVID);
return (object_size >= (optimal_size * zfs_condense_pct / 100) &&
object_size > zfs_metaslab_condense_block_threshold * record_size);
}
/*
* Condense the on-disk space map representation to its minimized form.
* The minimized form consists of a small number of allocations followed
* by the entries of the free range tree (ms_allocatable). The condensed
* spacemap contains all the entries of previous TXGs (including those in
* the pool-wide log spacemaps; thus this is effectively a superset of
* metaslab_flush()), but this TXG's entries still need to be written.
*/
static void
metaslab_condense(metaslab_t *msp, dmu_tx_t *tx)
{
range_tree_t *condense_tree;
space_map_t *sm = msp->ms_sm;
uint64_t txg = dmu_tx_get_txg(tx);
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT(msp->ms_loaded);
ASSERT(msp->ms_sm != NULL);
/*
* In order to condense the space map, we need to change it so it
* only describes which segments are currently allocated and free.
*
* All the current free space resides in the ms_allocatable, all
* the ms_defer trees, and all the ms_allocating trees. We ignore
* ms_freed because it is empty because we're in sync pass 1. We
* ignore ms_freeing because these changes are not yet reflected
* in the spacemap (they will be written later this txg).
*
* So to truncate the space map to represent all the entries of
* previous TXGs we do the following:
*
* 1] We create a range tree (condense tree) that is 100% empty.
* 2] We add to it all segments found in the ms_defer trees
* as those segments are marked as free in the original space
* map. We do the same with the ms_allocating trees for the same
* reason. Adding these segments should be a relatively
* inexpensive operation since we expect these trees to have a
* small number of nodes.
* 3] We vacate any unflushed allocs, since they are not frees we
* need to add to the condense tree. Then we vacate any
* unflushed frees as they should already be part of ms_allocatable.
* 4] At this point, we would ideally like to add all segments
* in the ms_allocatable tree from the condense tree. This way
* we would write all the entries of the condense tree as the
* condensed space map, which would only contain freed
* segments with everything else assumed to be allocated.
*
* Doing so can be prohibitively expensive as ms_allocatable can
* be large, and therefore computationally expensive to add to
* the condense_tree. Instead we first sync out an entry marking
* everything as allocated, then the condense_tree and then the
* ms_allocatable, in the condensed space map. While this is not
* optimal, it is typically close to optimal and more importantly
* much cheaper to compute.
*
* 5] Finally, as both of the unflushed trees were written to our
* new and condensed metaslab space map, we basically flushed
* all the unflushed changes to disk, thus we call
* metaslab_flush_update().
*/
ASSERT3U(spa_sync_pass(spa), ==, 1);
ASSERT(range_tree_is_empty(msp->ms_freed)); /* since it is pass 1 */
zfs_dbgmsg("condensing: txg %llu, msp[%llu] %px, vdev id %llu, "
"spa %s, smp size %llu, segments %llu, forcing condense=%s",
(u_longlong_t)txg, (u_longlong_t)msp->ms_id, msp,
(u_longlong_t)msp->ms_group->mg_vd->vdev_id,
spa->spa_name, (u_longlong_t)space_map_length(msp->ms_sm),
(u_longlong_t)range_tree_numsegs(msp->ms_allocatable),
msp->ms_condense_wanted ? "TRUE" : "FALSE");
msp->ms_condense_wanted = B_FALSE;
range_seg_type_t type;
uint64_t shift, start;
type = metaslab_calculate_range_tree_type(msp->ms_group->mg_vd, msp,
&start, &shift);
condense_tree = range_tree_create(NULL, type, NULL, start, shift);
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
range_tree_walk(msp->ms_defer[t],
range_tree_add, condense_tree);
}
for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
range_tree_walk(msp->ms_allocating[(txg + t) & TXG_MASK],
range_tree_add, condense_tree);
}
ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
metaslab_unflushed_changes_memused(msp));
spa->spa_unflushed_stats.sus_memused -=
metaslab_unflushed_changes_memused(msp);
range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
/*
* We're about to drop the metaslab's lock thus allowing other
* consumers to change it's content. Set the metaslab's ms_condensing
* flag to ensure that allocations on this metaslab do not occur
* while we're in the middle of committing it to disk. This is only
* critical for ms_allocatable as all other range trees use per TXG
* views of their content.
*/
msp->ms_condensing = B_TRUE;
mutex_exit(&msp->ms_lock);
uint64_t object = space_map_object(msp->ms_sm);
space_map_truncate(sm,
spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ?
zfs_metaslab_sm_blksz_with_log : zfs_metaslab_sm_blksz_no_log, tx);
/*
* space_map_truncate() may have reallocated the spacemap object.
* If so, update the vdev_ms_array.
*/
if (space_map_object(msp->ms_sm) != object) {
object = space_map_object(msp->ms_sm);
dmu_write(spa->spa_meta_objset,
msp->ms_group->mg_vd->vdev_ms_array, sizeof (uint64_t) *
msp->ms_id, sizeof (uint64_t), &object, tx);
}
/*
* Note:
* When the log space map feature is enabled, each space map will
* always have ALLOCS followed by FREES for each sync pass. This is
* typically true even when the log space map feature is disabled,
* except from the case where a metaslab goes through metaslab_sync()
* and gets condensed. In that case the metaslab's space map will have
* ALLOCS followed by FREES (due to condensing) followed by ALLOCS
* followed by FREES (due to space_map_write() in metaslab_sync()) for
* sync pass 1.
*/
range_tree_t *tmp_tree = range_tree_create(NULL, type, NULL, start,
shift);
range_tree_add(tmp_tree, msp->ms_start, msp->ms_size);
space_map_write(sm, tmp_tree, SM_ALLOC, SM_NO_VDEVID, tx);
space_map_write(sm, msp->ms_allocatable, SM_FREE, SM_NO_VDEVID, tx);
space_map_write(sm, condense_tree, SM_FREE, SM_NO_VDEVID, tx);
range_tree_vacate(condense_tree, NULL, NULL);
range_tree_destroy(condense_tree);
range_tree_vacate(tmp_tree, NULL, NULL);
range_tree_destroy(tmp_tree);
mutex_enter(&msp->ms_lock);
msp->ms_condensing = B_FALSE;
metaslab_flush_update(msp, tx);
}
/*
* Called when the metaslab has been flushed (its own spacemap now reflects
* all the contents of the pool-wide spacemap log). Updates the metaslab's
* metadata and any pool-wide related log space map data (e.g. summary,
* obsolete logs, etc..) to reflect that.
*/
static void
metaslab_flush_update(metaslab_t *msp, dmu_tx_t *tx)
{
metaslab_group_t *mg = msp->ms_group;
spa_t *spa = mg->mg_vd->vdev_spa;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT3U(spa_sync_pass(spa), ==, 1);
ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
/*
* Just because a metaslab got flushed, that doesn't mean that
* it will pass through metaslab_sync_done(). Thus, make sure to
* update ms_synced_length here in case it doesn't.
*/
msp->ms_synced_length = space_map_length(msp->ms_sm);
/*
* We may end up here from metaslab_condense() without the
* feature being active. In that case this is a no-op.
*/
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
return;
ASSERT(spa_syncing_log_sm(spa) != NULL);
ASSERT(msp->ms_sm != NULL);
ASSERT(metaslab_unflushed_txg(msp) != 0);
ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL), ==, msp);
VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(spa));
/* update metaslab's position in our flushing tree */
uint64_t ms_prev_flushed_txg = metaslab_unflushed_txg(msp);
mutex_enter(&spa->spa_flushed_ms_lock);
avl_remove(&spa->spa_metaslabs_by_flushed, msp);
metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx);
avl_add(&spa->spa_metaslabs_by_flushed, msp);
mutex_exit(&spa->spa_flushed_ms_lock);
/* update metaslab counts of spa_log_sm_t nodes */
spa_log_sm_decrement_mscount(spa, ms_prev_flushed_txg);
spa_log_sm_increment_current_mscount(spa);
/* cleanup obsolete logs if any */
uint64_t log_blocks_before = spa_log_sm_nblocks(spa);
spa_cleanup_old_sm_logs(spa, tx);
uint64_t log_blocks_after = spa_log_sm_nblocks(spa);
VERIFY3U(log_blocks_after, <=, log_blocks_before);
/* update log space map summary */
uint64_t blocks_gone = log_blocks_before - log_blocks_after;
spa_log_summary_add_flushed_metaslab(spa);
spa_log_summary_decrement_mscount(spa, ms_prev_flushed_txg);
spa_log_summary_decrement_blkcount(spa, blocks_gone);
}
boolean_t
metaslab_flush(metaslab_t *msp, dmu_tx_t *tx)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT3U(spa_sync_pass(spa), ==, 1);
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
ASSERT(msp->ms_sm != NULL);
ASSERT(metaslab_unflushed_txg(msp) != 0);
ASSERT(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL) != NULL);
/*
* There is nothing wrong with flushing the same metaslab twice, as
* this codepath should work on that case. However, the current
* flushing scheme makes sure to avoid this situation as we would be
* making all these calls without having anything meaningful to write
* to disk. We assert this behavior here.
*/
ASSERT3U(metaslab_unflushed_txg(msp), <, dmu_tx_get_txg(tx));
/*
* We can not flush while loading, because then we would
* not load the ms_unflushed_{allocs,frees}.
*/
if (msp->ms_loading)
return (B_FALSE);
metaslab_verify_space(msp, dmu_tx_get_txg(tx));
metaslab_verify_weight_and_frag(msp);
/*
* Metaslab condensing is effectively flushing. Therefore if the
* metaslab can be condensed we can just condense it instead of
* flushing it.
*
* Note that metaslab_condense() does call metaslab_flush_update()
* so we can just return immediately after condensing. We also
* don't need to care about setting ms_flushing or broadcasting
* ms_flush_cv, even if we temporarily drop the ms_lock in
* metaslab_condense(), as the metaslab is already loaded.
*/
if (msp->ms_loaded && metaslab_should_condense(msp)) {
metaslab_group_t *mg = msp->ms_group;
/*
* For all histogram operations below refer to the
* comments of metaslab_sync() where we follow a
* similar procedure.
*/
metaslab_group_histogram_verify(mg);
metaslab_class_histogram_verify(mg->mg_class);
metaslab_group_histogram_remove(mg, msp);
metaslab_condense(msp, tx);
space_map_histogram_clear(msp->ms_sm);
space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx);
ASSERT(range_tree_is_empty(msp->ms_freed));
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
space_map_histogram_add(msp->ms_sm,
msp->ms_defer[t], tx);
}
metaslab_aux_histograms_update(msp);
metaslab_group_histogram_add(mg, msp);
metaslab_group_histogram_verify(mg);
metaslab_class_histogram_verify(mg->mg_class);
metaslab_verify_space(msp, dmu_tx_get_txg(tx));
/*
* Since we recreated the histogram (and potentially
* the ms_sm too while condensing) ensure that the
* weight is updated too because we are not guaranteed
* that this metaslab is dirty and will go through
* metaslab_sync_done().
*/
metaslab_recalculate_weight_and_sort(msp);
return (B_TRUE);
}
msp->ms_flushing = B_TRUE;
uint64_t sm_len_before = space_map_length(msp->ms_sm);
mutex_exit(&msp->ms_lock);
space_map_write(msp->ms_sm, msp->ms_unflushed_allocs, SM_ALLOC,
SM_NO_VDEVID, tx);
space_map_write(msp->ms_sm, msp->ms_unflushed_frees, SM_FREE,
SM_NO_VDEVID, tx);
mutex_enter(&msp->ms_lock);
uint64_t sm_len_after = space_map_length(msp->ms_sm);
if (zfs_flags & ZFS_DEBUG_LOG_SPACEMAP) {
zfs_dbgmsg("flushing: txg %llu, spa %s, vdev_id %llu, "
"ms_id %llu, unflushed_allocs %llu, unflushed_frees %llu, "
"appended %llu bytes", (u_longlong_t)dmu_tx_get_txg(tx),
spa_name(spa),
(u_longlong_t)msp->ms_group->mg_vd->vdev_id,
(u_longlong_t)msp->ms_id,
(u_longlong_t)range_tree_space(msp->ms_unflushed_allocs),
(u_longlong_t)range_tree_space(msp->ms_unflushed_frees),
(u_longlong_t)(sm_len_after - sm_len_before));
}
ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
metaslab_unflushed_changes_memused(msp));
spa->spa_unflushed_stats.sus_memused -=
metaslab_unflushed_changes_memused(msp);
range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
metaslab_verify_space(msp, dmu_tx_get_txg(tx));
metaslab_verify_weight_and_frag(msp);
metaslab_flush_update(msp, tx);
metaslab_verify_space(msp, dmu_tx_get_txg(tx));
metaslab_verify_weight_and_frag(msp);
msp->ms_flushing = B_FALSE;
cv_broadcast(&msp->ms_flush_cv);
return (B_TRUE);
}
/*
* Write a metaslab to disk in the context of the specified transaction group.
*/
void
metaslab_sync(metaslab_t *msp, uint64_t txg)
{
metaslab_group_t *mg = msp->ms_group;
vdev_t *vd = mg->mg_vd;
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa_meta_objset(spa);
range_tree_t *alloctree = msp->ms_allocating[txg & TXG_MASK];
dmu_tx_t *tx;
ASSERT(!vd->vdev_ishole);
/*
* This metaslab has just been added so there's no work to do now.
*/
if (msp->ms_new) {
ASSERT0(range_tree_space(alloctree));
ASSERT0(range_tree_space(msp->ms_freeing));
ASSERT0(range_tree_space(msp->ms_freed));
ASSERT0(range_tree_space(msp->ms_checkpointing));
ASSERT0(range_tree_space(msp->ms_trim));
return;
}
/*
* Normally, we don't want to process a metaslab if there are no
* allocations or frees to perform. However, if the metaslab is being
* forced to condense, it's loaded and we're not beyond the final
* dirty txg, we need to let it through. Not condensing beyond the
* final dirty txg prevents an issue where metaslabs that need to be
* condensed but were loaded for other reasons could cause a panic
* here. By only checking the txg in that branch of the conditional,
* we preserve the utility of the VERIFY statements in all other
* cases.
*/
if (range_tree_is_empty(alloctree) &&
range_tree_is_empty(msp->ms_freeing) &&
range_tree_is_empty(msp->ms_checkpointing) &&
!(msp->ms_loaded && msp->ms_condense_wanted &&
txg <= spa_final_dirty_txg(spa)))
return;
VERIFY3U(txg, <=, spa_final_dirty_txg(spa));
/*
* The only state that can actually be changing concurrently
* with metaslab_sync() is the metaslab's ms_allocatable. No
* other thread can be modifying this txg's alloc, freeing,
* freed, or space_map_phys_t. We drop ms_lock whenever we
* could call into the DMU, because the DMU can call down to
* us (e.g. via zio_free()) at any time.
*
* The spa_vdev_remove_thread() can be reading metaslab state
* concurrently, and it is locked out by the ms_sync_lock.
* Note that the ms_lock is insufficient for this, because it
* is dropped by space_map_write().
*/
tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
/*
* Generate a log space map if one doesn't exist already.
*/
spa_generate_syncing_log_sm(spa, tx);
if (msp->ms_sm == NULL) {
uint64_t new_object = space_map_alloc(mos,
spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ?
zfs_metaslab_sm_blksz_with_log :
zfs_metaslab_sm_blksz_no_log, tx);
VERIFY3U(new_object, !=, 0);
dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
msp->ms_id, sizeof (uint64_t), &new_object, tx);
VERIFY0(space_map_open(&msp->ms_sm, mos, new_object,
msp->ms_start, msp->ms_size, vd->vdev_ashift));
ASSERT(msp->ms_sm != NULL);
ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
ASSERT0(metaslab_allocated_space(msp));
}
if (metaslab_unflushed_txg(msp) == 0 &&
spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) {
ASSERT(spa_syncing_log_sm(spa) != NULL);
metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx);
spa_log_sm_increment_current_mscount(spa);
spa_log_summary_add_flushed_metaslab(spa);
ASSERT(msp->ms_sm != NULL);
mutex_enter(&spa->spa_flushed_ms_lock);
avl_add(&spa->spa_metaslabs_by_flushed, msp);
mutex_exit(&spa->spa_flushed_ms_lock);
ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
}
if (!range_tree_is_empty(msp->ms_checkpointing) &&
vd->vdev_checkpoint_sm == NULL) {
ASSERT(spa_has_checkpoint(spa));
uint64_t new_object = space_map_alloc(mos,
zfs_vdev_standard_sm_blksz, tx);
VERIFY3U(new_object, !=, 0);
VERIFY0(space_map_open(&vd->vdev_checkpoint_sm,
mos, new_object, 0, vd->vdev_asize, vd->vdev_ashift));
ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
/*
* We save the space map object as an entry in vdev_top_zap
* so it can be retrieved when the pool is reopened after an
* export or through zdb.
*/
VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset,
vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM,
sizeof (new_object), 1, &new_object, tx));
}
mutex_enter(&msp->ms_sync_lock);
mutex_enter(&msp->ms_lock);
/*
* Note: metaslab_condense() clears the space map's histogram.
* Therefore we must verify and remove this histogram before
* condensing.
*/
metaslab_group_histogram_verify(mg);
metaslab_class_histogram_verify(mg->mg_class);
metaslab_group_histogram_remove(mg, msp);
if (spa->spa_sync_pass == 1 && msp->ms_loaded &&
metaslab_should_condense(msp))
metaslab_condense(msp, tx);
/*
* We'll be going to disk to sync our space accounting, thus we
* drop the ms_lock during that time so allocations coming from
* open-context (ZIL) for future TXGs do not block.
*/
mutex_exit(&msp->ms_lock);
space_map_t *log_sm = spa_syncing_log_sm(spa);
if (log_sm != NULL) {
ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP));
space_map_write(log_sm, alloctree, SM_ALLOC,
vd->vdev_id, tx);
space_map_write(log_sm, msp->ms_freeing, SM_FREE,
vd->vdev_id, tx);
mutex_enter(&msp->ms_lock);
ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
metaslab_unflushed_changes_memused(msp));
spa->spa_unflushed_stats.sus_memused -=
metaslab_unflushed_changes_memused(msp);
range_tree_remove_xor_add(alloctree,
msp->ms_unflushed_frees, msp->ms_unflushed_allocs);
range_tree_remove_xor_add(msp->ms_freeing,
msp->ms_unflushed_allocs, msp->ms_unflushed_frees);
spa->spa_unflushed_stats.sus_memused +=
metaslab_unflushed_changes_memused(msp);
} else {
ASSERT(!spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP));
space_map_write(msp->ms_sm, alloctree, SM_ALLOC,
SM_NO_VDEVID, tx);
space_map_write(msp->ms_sm, msp->ms_freeing, SM_FREE,
SM_NO_VDEVID, tx);
mutex_enter(&msp->ms_lock);
}
msp->ms_allocated_space += range_tree_space(alloctree);
ASSERT3U(msp->ms_allocated_space, >=,
range_tree_space(msp->ms_freeing));
msp->ms_allocated_space -= range_tree_space(msp->ms_freeing);
if (!range_tree_is_empty(msp->ms_checkpointing)) {
ASSERT(spa_has_checkpoint(spa));
ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
/*
* Since we are doing writes to disk and the ms_checkpointing
* tree won't be changing during that time, we drop the
* ms_lock while writing to the checkpoint space map, for the
* same reason mentioned above.
*/
mutex_exit(&msp->ms_lock);
space_map_write(vd->vdev_checkpoint_sm,
msp->ms_checkpointing, SM_FREE, SM_NO_VDEVID, tx);
mutex_enter(&msp->ms_lock);
spa->spa_checkpoint_info.sci_dspace +=
range_tree_space(msp->ms_checkpointing);
vd->vdev_stat.vs_checkpoint_space +=
range_tree_space(msp->ms_checkpointing);
ASSERT3U(vd->vdev_stat.vs_checkpoint_space, ==,
-space_map_allocated(vd->vdev_checkpoint_sm));
range_tree_vacate(msp->ms_checkpointing, NULL, NULL);
}
if (msp->ms_loaded) {
/*
* When the space map is loaded, we have an accurate
* histogram in the range tree. This gives us an opportunity
* to bring the space map's histogram up-to-date so we clear
* it first before updating it.
*/
space_map_histogram_clear(msp->ms_sm);
space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx);
/*
* Since we've cleared the histogram we need to add back
* any free space that has already been processed, plus
* any deferred space. This allows the on-disk histogram
* to accurately reflect all free space even if some space
* is not yet available for allocation (i.e. deferred).
*/
space_map_histogram_add(msp->ms_sm, msp->ms_freed, tx);
/*
* Add back any deferred free space that has not been
* added back into the in-core free tree yet. This will
* ensure that we don't end up with a space map histogram
* that is completely empty unless the metaslab is fully
* allocated.
*/
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
space_map_histogram_add(msp->ms_sm,
msp->ms_defer[t], tx);
}
}
/*
* Always add the free space from this sync pass to the space
* map histogram. We want to make sure that the on-disk histogram
* accounts for all free space. If the space map is not loaded,
* then we will lose some accuracy but will correct it the next
* time we load the space map.
*/
space_map_histogram_add(msp->ms_sm, msp->ms_freeing, tx);
metaslab_aux_histograms_update(msp);
metaslab_group_histogram_add(mg, msp);
metaslab_group_histogram_verify(mg);
metaslab_class_histogram_verify(mg->mg_class);
/*
* For sync pass 1, we avoid traversing this txg's free range tree
* and instead will just swap the pointers for freeing and freed.
* We can safely do this since the freed_tree is guaranteed to be
* empty on the initial pass.
*
* Keep in mind that even if we are currently using a log spacemap
* we want current frees to end up in the ms_allocatable (but not
* get appended to the ms_sm) so their ranges can be reused as usual.
*/
if (spa_sync_pass(spa) == 1) {
range_tree_swap(&msp->ms_freeing, &msp->ms_freed);
ASSERT0(msp->ms_allocated_this_txg);
} else {
range_tree_vacate(msp->ms_freeing,
range_tree_add, msp->ms_freed);
}
msp->ms_allocated_this_txg += range_tree_space(alloctree);
range_tree_vacate(alloctree, NULL, NULL);
ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
ASSERT0(range_tree_space(msp->ms_allocating[TXG_CLEAN(txg)
& TXG_MASK]));
ASSERT0(range_tree_space(msp->ms_freeing));
ASSERT0(range_tree_space(msp->ms_checkpointing));
mutex_exit(&msp->ms_lock);
/*
* Verify that the space map object ID has been recorded in the
* vdev_ms_array.
*/
uint64_t object;
VERIFY0(dmu_read(mos, vd->vdev_ms_array,
msp->ms_id * sizeof (uint64_t), sizeof (uint64_t), &object, 0));
VERIFY3U(object, ==, space_map_object(msp->ms_sm));
mutex_exit(&msp->ms_sync_lock);
dmu_tx_commit(tx);
}
static void
metaslab_evict(metaslab_t *msp, uint64_t txg)
{
if (!msp->ms_loaded || msp->ms_disabled != 0)
return;
for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
VERIFY0(range_tree_space(
msp->ms_allocating[(txg + t) & TXG_MASK]));
}
if (msp->ms_allocator != -1)
metaslab_passivate(msp, msp->ms_weight & ~METASLAB_ACTIVE_MASK);
if (!metaslab_debug_unload)
metaslab_unload(msp);
}
/*
* Called after a transaction group has completely synced to mark
* all of the metaslab's free space as usable.
*/
void
metaslab_sync_done(metaslab_t *msp, uint64_t txg)
{
metaslab_group_t *mg = msp->ms_group;
vdev_t *vd = mg->mg_vd;
spa_t *spa = vd->vdev_spa;
range_tree_t **defer_tree;
int64_t alloc_delta, defer_delta;
boolean_t defer_allowed = B_TRUE;
ASSERT(!vd->vdev_ishole);
mutex_enter(&msp->ms_lock);
if (msp->ms_new) {
/* this is a new metaslab, add its capacity to the vdev */
metaslab_space_update(vd, mg->mg_class, 0, 0, msp->ms_size);
/* there should be no allocations nor frees at this point */
VERIFY0(msp->ms_allocated_this_txg);
VERIFY0(range_tree_space(msp->ms_freed));
}
ASSERT0(range_tree_space(msp->ms_freeing));
ASSERT0(range_tree_space(msp->ms_checkpointing));
defer_tree = &msp->ms_defer[txg % TXG_DEFER_SIZE];
uint64_t free_space = metaslab_class_get_space(spa_normal_class(spa)) -
metaslab_class_get_alloc(spa_normal_class(spa));
if (free_space <= spa_get_slop_space(spa) || vd->vdev_removing) {
defer_allowed = B_FALSE;
}
defer_delta = 0;
alloc_delta = msp->ms_allocated_this_txg -
range_tree_space(msp->ms_freed);
if (defer_allowed) {
defer_delta = range_tree_space(msp->ms_freed) -
range_tree_space(*defer_tree);
} else {
defer_delta -= range_tree_space(*defer_tree);
}
metaslab_space_update(vd, mg->mg_class, alloc_delta + defer_delta,
defer_delta, 0);
if (spa_syncing_log_sm(spa) == NULL) {
/*
* If there's a metaslab_load() in progress and we don't have
* a log space map, it means that we probably wrote to the
* metaslab's space map. If this is the case, we need to
* make sure that we wait for the load to complete so that we
* have a consistent view at the in-core side of the metaslab.
*/
metaslab_load_wait(msp);
} else {
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
}
/*
* When auto-trimming is enabled, free ranges which are added to
* ms_allocatable are also be added to ms_trim. The ms_trim tree is
* periodically consumed by the vdev_autotrim_thread() which issues
* trims for all ranges and then vacates the tree. The ms_trim tree
* can be discarded at any time with the sole consequence of recent
* frees not being trimmed.
*/
if (spa_get_autotrim(spa) == SPA_AUTOTRIM_ON) {
range_tree_walk(*defer_tree, range_tree_add, msp->ms_trim);
if (!defer_allowed) {
range_tree_walk(msp->ms_freed, range_tree_add,
msp->ms_trim);
}
} else {
range_tree_vacate(msp->ms_trim, NULL, NULL);
}
/*
* Move the frees from the defer_tree back to the free
* range tree (if it's loaded). Swap the freed_tree and
* the defer_tree -- this is safe to do because we've
* just emptied out the defer_tree.
*/
range_tree_vacate(*defer_tree,
msp->ms_loaded ? range_tree_add : NULL, msp->ms_allocatable);
if (defer_allowed) {
range_tree_swap(&msp->ms_freed, defer_tree);
} else {
range_tree_vacate(msp->ms_freed,
msp->ms_loaded ? range_tree_add : NULL,
msp->ms_allocatable);
}
msp->ms_synced_length = space_map_length(msp->ms_sm);
msp->ms_deferspace += defer_delta;
ASSERT3S(msp->ms_deferspace, >=, 0);
ASSERT3S(msp->ms_deferspace, <=, msp->ms_size);
if (msp->ms_deferspace != 0) {
/*
* Keep syncing this metaslab until all deferred frees
* are back in circulation.
*/
vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
}
metaslab_aux_histograms_update_done(msp, defer_allowed);
if (msp->ms_new) {
msp->ms_new = B_FALSE;
mutex_enter(&mg->mg_lock);
mg->mg_ms_ready++;
mutex_exit(&mg->mg_lock);
}
/*
* Re-sort metaslab within its group now that we've adjusted
* its allocatable space.
*/
metaslab_recalculate_weight_and_sort(msp);
ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
ASSERT0(range_tree_space(msp->ms_freeing));
ASSERT0(range_tree_space(msp->ms_freed));
ASSERT0(range_tree_space(msp->ms_checkpointing));
msp->ms_allocating_total -= msp->ms_allocated_this_txg;
msp->ms_allocated_this_txg = 0;
mutex_exit(&msp->ms_lock);
}
void
metaslab_sync_reassess(metaslab_group_t *mg)
{
spa_t *spa = mg->mg_class->mc_spa;
spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
metaslab_group_alloc_update(mg);
mg->mg_fragmentation = metaslab_group_fragmentation(mg);
/*
* Preload the next potential metaslabs but only on active
* metaslab groups. We can get into a state where the metaslab
* is no longer active since we dirty metaslabs as we remove a
* a device, thus potentially making the metaslab group eligible
* for preloading.
*/
if (mg->mg_activation_count > 0) {
metaslab_group_preload(mg);
}
spa_config_exit(spa, SCL_ALLOC, FTAG);
}
/*
* When writing a ditto block (i.e. more than one DVA for a given BP) on
* the same vdev as an existing DVA of this BP, then try to allocate it
* on a different metaslab than existing DVAs (i.e. a unique metaslab).
*/
static boolean_t
metaslab_is_unique(metaslab_t *msp, dva_t *dva)
{
uint64_t dva_ms_id;
if (DVA_GET_ASIZE(dva) == 0)
return (B_TRUE);
if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
return (B_TRUE);
dva_ms_id = DVA_GET_OFFSET(dva) >> msp->ms_group->mg_vd->vdev_ms_shift;
return (msp->ms_id != dva_ms_id);
}
/*
* ==========================================================================
* Metaslab allocation tracing facility
* ==========================================================================
*/
/*
* Add an allocation trace element to the allocation tracing list.
*/
static void
metaslab_trace_add(zio_alloc_list_t *zal, metaslab_group_t *mg,
metaslab_t *msp, uint64_t psize, uint32_t dva_id, uint64_t offset,
int allocator)
{
metaslab_alloc_trace_t *mat;
if (!metaslab_trace_enabled)
return;
/*
* When the tracing list reaches its maximum we remove
* the second element in the list before adding a new one.
* By removing the second element we preserve the original
* entry as a clue to what allocations steps have already been
* performed.
*/
if (zal->zal_size == metaslab_trace_max_entries) {
metaslab_alloc_trace_t *mat_next;
#ifdef ZFS_DEBUG
panic("too many entries in allocation list");
#endif
METASLABSTAT_BUMP(metaslabstat_trace_over_limit);
zal->zal_size--;
mat_next = list_next(&zal->zal_list, list_head(&zal->zal_list));
list_remove(&zal->zal_list, mat_next);
kmem_cache_free(metaslab_alloc_trace_cache, mat_next);
}
mat = kmem_cache_alloc(metaslab_alloc_trace_cache, KM_SLEEP);
list_link_init(&mat->mat_list_node);
mat->mat_mg = mg;
mat->mat_msp = msp;
mat->mat_size = psize;
mat->mat_dva_id = dva_id;
mat->mat_offset = offset;
mat->mat_weight = 0;
mat->mat_allocator = allocator;
if (msp != NULL)
mat->mat_weight = msp->ms_weight;
/*
* The list is part of the zio so locking is not required. Only
* a single thread will perform allocations for a given zio.
*/
list_insert_tail(&zal->zal_list, mat);
zal->zal_size++;
ASSERT3U(zal->zal_size, <=, metaslab_trace_max_entries);
}
void
metaslab_trace_init(zio_alloc_list_t *zal)
{
list_create(&zal->zal_list, sizeof (metaslab_alloc_trace_t),
offsetof(metaslab_alloc_trace_t, mat_list_node));
zal->zal_size = 0;
}
void
metaslab_trace_fini(zio_alloc_list_t *zal)
{
metaslab_alloc_trace_t *mat;
while ((mat = list_remove_head(&zal->zal_list)) != NULL)
kmem_cache_free(metaslab_alloc_trace_cache, mat);
list_destroy(&zal->zal_list);
zal->zal_size = 0;
}
/*
* ==========================================================================
* Metaslab block operations
* ==========================================================================
*/
static void
metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, void *tag, int flags,
int allocator)
{
if (!(flags & METASLAB_ASYNC_ALLOC) ||
(flags & METASLAB_DONT_THROTTLE))
return;
metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
if (!mg->mg_class->mc_alloc_throttle_enabled)
return;
metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
(void) zfs_refcount_add(&mga->mga_alloc_queue_depth, tag);
}
static void
metaslab_group_increment_qdepth(metaslab_group_t *mg, int allocator)
{
metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
metaslab_class_allocator_t *mca =
&mg->mg_class->mc_allocator[allocator];
uint64_t max = mg->mg_max_alloc_queue_depth;
uint64_t cur = mga->mga_cur_max_alloc_queue_depth;
while (cur < max) {
if (atomic_cas_64(&mga->mga_cur_max_alloc_queue_depth,
cur, cur + 1) == cur) {
atomic_inc_64(&mca->mca_alloc_max_slots);
return;
}
cur = mga->mga_cur_max_alloc_queue_depth;
}
}
void
metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, void *tag, int flags,
int allocator, boolean_t io_complete)
{
if (!(flags & METASLAB_ASYNC_ALLOC) ||
(flags & METASLAB_DONT_THROTTLE))
return;
metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
if (!mg->mg_class->mc_alloc_throttle_enabled)
return;
metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
(void) zfs_refcount_remove(&mga->mga_alloc_queue_depth, tag);
if (io_complete)
metaslab_group_increment_qdepth(mg, allocator);
}
void
metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, void *tag,
int allocator)
{
#ifdef ZFS_DEBUG
const dva_t *dva = bp->blk_dva;
int ndvas = BP_GET_NDVAS(bp);
for (int d = 0; d < ndvas; d++) {
uint64_t vdev = DVA_GET_VDEV(&dva[d]);
metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
VERIFY(zfs_refcount_not_held(&mga->mga_alloc_queue_depth, tag));
}
#endif
}
static uint64_t
metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg)
{
uint64_t start;
range_tree_t *rt = msp->ms_allocatable;
metaslab_class_t *mc = msp->ms_group->mg_class;
ASSERT(MUTEX_HELD(&msp->ms_lock));
VERIFY(!msp->ms_condensing);
VERIFY0(msp->ms_disabled);
start = mc->mc_ops->msop_alloc(msp, size);
if (start != -1ULL) {
metaslab_group_t *mg = msp->ms_group;
vdev_t *vd = mg->mg_vd;
VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift));
VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size);
range_tree_remove(rt, start, size);
range_tree_clear(msp->ms_trim, start, size);
if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
range_tree_add(msp->ms_allocating[txg & TXG_MASK], start, size);
msp->ms_allocating_total += size;
/* Track the last successful allocation */
msp->ms_alloc_txg = txg;
metaslab_verify_space(msp, txg);
}
/*
* Now that we've attempted the allocation we need to update the
* metaslab's maximum block size since it may have changed.
*/
msp->ms_max_size = metaslab_largest_allocatable(msp);
return (start);
}
/*
* Find the metaslab with the highest weight that is less than what we've
* already tried. In the common case, this means that we will examine each
* metaslab at most once. Note that concurrent callers could reorder metaslabs
* by activation/passivation once we have dropped the mg_lock. If a metaslab is
* activated by another thread, and we fail to allocate from the metaslab we
* have selected, we may not try the newly-activated metaslab, and instead
* activate another metaslab. This is not optimal, but generally does not cause
* any problems (a possible exception being if every metaslab is completely full
* except for the newly-activated metaslab which we fail to examine).
*/
static metaslab_t *
find_valid_metaslab(metaslab_group_t *mg, uint64_t activation_weight,
dva_t *dva, int d, boolean_t want_unique, uint64_t asize, int allocator,
boolean_t try_hard, zio_alloc_list_t *zal, metaslab_t *search,
boolean_t *was_active)
{
avl_index_t idx;
avl_tree_t *t = &mg->mg_metaslab_tree;
metaslab_t *msp = avl_find(t, search, &idx);
if (msp == NULL)
msp = avl_nearest(t, idx, AVL_AFTER);
int tries = 0;
for (; msp != NULL; msp = AVL_NEXT(t, msp)) {
int i;
if (!try_hard && tries > zfs_metaslab_find_max_tries) {
METASLABSTAT_BUMP(metaslabstat_too_many_tries);
return (NULL);
}
tries++;
if (!metaslab_should_allocate(msp, asize, try_hard)) {
metaslab_trace_add(zal, mg, msp, asize, d,
TRACE_TOO_SMALL, allocator);
continue;
}
/*
* If the selected metaslab is condensing or disabled,
* skip it.
*/
if (msp->ms_condensing || msp->ms_disabled > 0)
continue;
*was_active = msp->ms_allocator != -1;
/*
* If we're activating as primary, this is our first allocation
* from this disk, so we don't need to check how close we are.
* If the metaslab under consideration was already active,
* we're getting desperate enough to steal another allocator's
* metaslab, so we still don't care about distances.
*/
if (activation_weight == METASLAB_WEIGHT_PRIMARY || *was_active)
break;
for (i = 0; i < d; i++) {
if (want_unique &&
!metaslab_is_unique(msp, &dva[i]))
break; /* try another metaslab */
}
if (i == d)
break;
}
if (msp != NULL) {
search->ms_weight = msp->ms_weight;
search->ms_start = msp->ms_start + 1;
search->ms_allocator = msp->ms_allocator;
search->ms_primary = msp->ms_primary;
}
return (msp);
}
static void
metaslab_active_mask_verify(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
return;
if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0)
return;
if (msp->ms_weight & METASLAB_WEIGHT_PRIMARY) {
VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM);
VERIFY3S(msp->ms_allocator, !=, -1);
VERIFY(msp->ms_primary);
return;
}
if (msp->ms_weight & METASLAB_WEIGHT_SECONDARY) {
VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM);
VERIFY3S(msp->ms_allocator, !=, -1);
VERIFY(!msp->ms_primary);
return;
}
if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) {
VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
VERIFY3S(msp->ms_allocator, ==, -1);
return;
}
}
/* ARGSUSED */
static uint64_t
metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal,
uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, int d,
int allocator, boolean_t try_hard)
{
metaslab_t *msp = NULL;
uint64_t offset = -1ULL;
uint64_t activation_weight = METASLAB_WEIGHT_PRIMARY;
for (int i = 0; i < d; i++) {
if (activation_weight == METASLAB_WEIGHT_PRIMARY &&
DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
activation_weight = METASLAB_WEIGHT_SECONDARY;
} else if (activation_weight == METASLAB_WEIGHT_SECONDARY &&
DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
activation_weight = METASLAB_WEIGHT_CLAIM;
break;
}
}
/*
* If we don't have enough metaslabs active to fill the entire array, we
* just use the 0th slot.
*/
if (mg->mg_ms_ready < mg->mg_allocators * 3)
allocator = 0;
metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
ASSERT3U(mg->mg_vd->vdev_ms_count, >=, 2);
metaslab_t *search = kmem_alloc(sizeof (*search), KM_SLEEP);
search->ms_weight = UINT64_MAX;
search->ms_start = 0;
/*
* At the end of the metaslab tree are the already-active metaslabs,
* first the primaries, then the secondaries. When we resume searching
* through the tree, we need to consider ms_allocator and ms_primary so
* we start in the location right after where we left off, and don't
* accidentally loop forever considering the same metaslabs.
*/
search->ms_allocator = -1;
search->ms_primary = B_TRUE;
for (;;) {
boolean_t was_active = B_FALSE;
mutex_enter(&mg->mg_lock);
if (activation_weight == METASLAB_WEIGHT_PRIMARY &&
mga->mga_primary != NULL) {
msp = mga->mga_primary;
/*
* Even though we don't hold the ms_lock for the
* primary metaslab, those fields should not
* change while we hold the mg_lock. Thus it is
* safe to make assertions on them.
*/
ASSERT(msp->ms_primary);
ASSERT3S(msp->ms_allocator, ==, allocator);
ASSERT(msp->ms_loaded);
was_active = B_TRUE;
ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
} else if (activation_weight == METASLAB_WEIGHT_SECONDARY &&
mga->mga_secondary != NULL) {
msp = mga->mga_secondary;
/*
* See comment above about the similar assertions
* for the primary metaslab.
*/
ASSERT(!msp->ms_primary);
ASSERT3S(msp->ms_allocator, ==, allocator);
ASSERT(msp->ms_loaded);
was_active = B_TRUE;
ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
} else {
msp = find_valid_metaslab(mg, activation_weight, dva, d,
want_unique, asize, allocator, try_hard, zal,
search, &was_active);
}
mutex_exit(&mg->mg_lock);
if (msp == NULL) {
kmem_free(search, sizeof (*search));
return (-1ULL);
}
mutex_enter(&msp->ms_lock);
metaslab_active_mask_verify(msp);
/*
* This code is disabled out because of issues with
* tracepoints in non-gpl kernel modules.
*/
#if 0
DTRACE_PROBE3(ms__activation__attempt,
metaslab_t *, msp, uint64_t, activation_weight,
boolean_t, was_active);
#endif
/*
* Ensure that the metaslab we have selected is still
* capable of handling our request. It's possible that
* another thread may have changed the weight while we
* were blocked on the metaslab lock. We check the
* active status first to see if we need to set_selected_txg
* a new metaslab.
*/
if (was_active && !(msp->ms_weight & METASLAB_ACTIVE_MASK)) {
ASSERT3S(msp->ms_allocator, ==, -1);
mutex_exit(&msp->ms_lock);
continue;
}
/*
* If the metaslab was activated for another allocator
* while we were waiting in the ms_lock above, or it's
* a primary and we're seeking a secondary (or vice versa),
* we go back and select a new metaslab.
*/
if (!was_active && (msp->ms_weight & METASLAB_ACTIVE_MASK) &&
(msp->ms_allocator != -1) &&
(msp->ms_allocator != allocator || ((activation_weight ==
METASLAB_WEIGHT_PRIMARY) != msp->ms_primary))) {
ASSERT(msp->ms_loaded);
ASSERT((msp->ms_weight & METASLAB_WEIGHT_CLAIM) ||
msp->ms_allocator != -1);
mutex_exit(&msp->ms_lock);
continue;
}
/*
* This metaslab was used for claiming regions allocated
* by the ZIL during pool import. Once these regions are
* claimed we don't need to keep the CLAIM bit set
* anymore. Passivate this metaslab to zero its activation
* mask.
*/
if (msp->ms_weight & METASLAB_WEIGHT_CLAIM &&
activation_weight != METASLAB_WEIGHT_CLAIM) {
ASSERT(msp->ms_loaded);
ASSERT3S(msp->ms_allocator, ==, -1);
metaslab_passivate(msp, msp->ms_weight &
~METASLAB_WEIGHT_CLAIM);
mutex_exit(&msp->ms_lock);
continue;
}
metaslab_set_selected_txg(msp, txg);
int activation_error =
metaslab_activate(msp, allocator, activation_weight);
metaslab_active_mask_verify(msp);
/*
* If the metaslab was activated by another thread for
* another allocator or activation_weight (EBUSY), or it
* failed because another metaslab was assigned as primary
* for this allocator (EEXIST) we continue using this
* metaslab for our allocation, rather than going on to a
* worse metaslab (we waited for that metaslab to be loaded
* after all).
*
* If the activation failed due to an I/O error or ENOSPC we
* skip to the next metaslab.
*/
boolean_t activated;
if (activation_error == 0) {
activated = B_TRUE;
} else if (activation_error == EBUSY ||
activation_error == EEXIST) {
activated = B_FALSE;
} else {
mutex_exit(&msp->ms_lock);
continue;
}
ASSERT(msp->ms_loaded);
/*
* Now that we have the lock, recheck to see if we should
* continue to use this metaslab for this allocation. The
* the metaslab is now loaded so metaslab_should_allocate()
* can accurately determine if the allocation attempt should
* proceed.
*/
if (!metaslab_should_allocate(msp, asize, try_hard)) {
/* Passivate this metaslab and select a new one. */
metaslab_trace_add(zal, mg, msp, asize, d,
TRACE_TOO_SMALL, allocator);
goto next;
}
/*
* If this metaslab is currently condensing then pick again
* as we can't manipulate this metaslab until it's committed
* to disk. If this metaslab is being initialized, we shouldn't
* allocate from it since the allocated region might be
* overwritten after allocation.
*/
if (msp->ms_condensing) {
metaslab_trace_add(zal, mg, msp, asize, d,
TRACE_CONDENSING, allocator);
if (activated) {
metaslab_passivate(msp, msp->ms_weight &
~METASLAB_ACTIVE_MASK);
}
mutex_exit(&msp->ms_lock);
continue;
} else if (msp->ms_disabled > 0) {
metaslab_trace_add(zal, mg, msp, asize, d,
TRACE_DISABLED, allocator);
if (activated) {
metaslab_passivate(msp, msp->ms_weight &
~METASLAB_ACTIVE_MASK);
}
mutex_exit(&msp->ms_lock);
continue;
}
offset = metaslab_block_alloc(msp, asize, txg);
metaslab_trace_add(zal, mg, msp, asize, d, offset, allocator);
if (offset != -1ULL) {
/* Proactively passivate the metaslab, if needed */
if (activated)
metaslab_segment_may_passivate(msp);
break;
}
next:
ASSERT(msp->ms_loaded);
/*
* This code is disabled out because of issues with
* tracepoints in non-gpl kernel modules.
*/
#if 0
DTRACE_PROBE2(ms__alloc__failure, metaslab_t *, msp,
uint64_t, asize);
#endif
/*
* We were unable to allocate from this metaslab so determine
* a new weight for this metaslab. Now that we have loaded
* the metaslab we can provide a better hint to the metaslab
* selector.
*
* For space-based metaslabs, we use the maximum block size.
* This information is only available when the metaslab
* is loaded and is more accurate than the generic free
* space weight that was calculated by metaslab_weight().
* This information allows us to quickly compare the maximum
* available allocation in the metaslab to the allocation
* size being requested.
*
* For segment-based metaslabs, determine the new weight
* based on the highest bucket in the range tree. We
* explicitly use the loaded segment weight (i.e. the range
* tree histogram) since it contains the space that is
* currently available for allocation and is accurate
* even within a sync pass.
*/
uint64_t weight;
if (WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
weight = metaslab_largest_allocatable(msp);
WEIGHT_SET_SPACEBASED(weight);
} else {
weight = metaslab_weight_from_range_tree(msp);
}
if (activated) {
metaslab_passivate(msp, weight);
} else {
/*
* For the case where we use the metaslab that is
* active for another allocator we want to make
* sure that we retain the activation mask.
*
* Note that we could attempt to use something like
* metaslab_recalculate_weight_and_sort() that
* retains the activation mask here. That function
* uses metaslab_weight() to set the weight though
* which is not as accurate as the calculations
* above.
*/
weight |= msp->ms_weight & METASLAB_ACTIVE_MASK;
metaslab_group_sort(mg, msp, weight);
}
metaslab_active_mask_verify(msp);
/*
* We have just failed an allocation attempt, check
* that metaslab_should_allocate() agrees. Otherwise,
* we may end up in an infinite loop retrying the same
* metaslab.
*/
ASSERT(!metaslab_should_allocate(msp, asize, try_hard));
mutex_exit(&msp->ms_lock);
}
mutex_exit(&msp->ms_lock);
kmem_free(search, sizeof (*search));
return (offset);
}
static uint64_t
metaslab_group_alloc(metaslab_group_t *mg, zio_alloc_list_t *zal,
uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, int d,
int allocator, boolean_t try_hard)
{
uint64_t offset;
ASSERT(mg->mg_initialized);
offset = metaslab_group_alloc_normal(mg, zal, asize, txg, want_unique,
dva, d, allocator, try_hard);
mutex_enter(&mg->mg_lock);
if (offset == -1ULL) {
mg->mg_failed_allocations++;
metaslab_trace_add(zal, mg, NULL, asize, d,
TRACE_GROUP_FAILURE, allocator);
if (asize == SPA_GANGBLOCKSIZE) {
/*
* This metaslab group was unable to allocate
* the minimum gang block size so it must be out of
* space. We must notify the allocation throttle
* to start skipping allocation attempts to this
* metaslab group until more space becomes available.
* Note: this failure cannot be caused by the
* allocation throttle since the allocation throttle
* is only responsible for skipping devices and
* not failing block allocations.
*/
mg->mg_no_free_space = B_TRUE;
}
}
mg->mg_allocations++;
mutex_exit(&mg->mg_lock);
return (offset);
}
/*
* Allocate a block for the specified i/o.
*/
int
metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags,
zio_alloc_list_t *zal, int allocator)
{
metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
metaslab_group_t *mg, *fast_mg, *rotor;
vdev_t *vd;
boolean_t try_hard = B_FALSE;
ASSERT(!DVA_IS_VALID(&dva[d]));
/*
* For testing, make some blocks above a certain size be gang blocks.
* This will result in more split blocks when using device removal,
* and a large number of split blocks coupled with ztest-induced
* damage can result in extremely long reconstruction times. This
* will also test spilling from special to normal.
*/
if (psize >= metaslab_force_ganging && (random_in_range(100) < 3)) {
metaslab_trace_add(zal, NULL, NULL, psize, d, TRACE_FORCE_GANG,
allocator);
return (SET_ERROR(ENOSPC));
}
/*
* Start at the rotor and loop through all mgs until we find something.
* Note that there's no locking on mca_rotor or mca_aliquot because
* nothing actually breaks if we miss a few updates -- we just won't
* allocate quite as evenly. It all balances out over time.
*
* If we are doing ditto or log blocks, try to spread them across
* consecutive vdevs. If we're forced to reuse a vdev before we've
* allocated all of our ditto blocks, then try and spread them out on
* that vdev as much as possible. If it turns out to not be possible,
* gradually lower our standards until anything becomes acceptable.
* Also, allocating on consecutive vdevs (as opposed to random vdevs)
* gives us hope of containing our fault domains to something we're
* able to reason about. Otherwise, any two top-level vdev failures
* will guarantee the loss of data. With consecutive allocation,
* only two adjacent top-level vdev failures will result in data loss.
*
* If we are doing gang blocks (hintdva is non-NULL), try to keep
* ourselves on the same vdev as our gang block header. That
* way, we can hope for locality in vdev_cache, plus it makes our
* fault domains something tractable.
*/
if (hintdva) {
vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
/*
* It's possible the vdev we're using as the hint no
* longer exists or its mg has been closed (e.g. by
* device removal). Consult the rotor when
* all else fails.
*/
if (vd != NULL && vd->vdev_mg != NULL) {
mg = vdev_get_mg(vd, mc);
if (flags & METASLAB_HINTBP_AVOID &&
mg->mg_next != NULL)
mg = mg->mg_next;
} else {
mg = mca->mca_rotor;
}
} else if (d != 0) {
vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
mg = vd->vdev_mg->mg_next;
} else if (flags & METASLAB_FASTWRITE) {
mg = fast_mg = mca->mca_rotor;
do {
if (fast_mg->mg_vd->vdev_pending_fastwrite <
mg->mg_vd->vdev_pending_fastwrite)
mg = fast_mg;
} while ((fast_mg = fast_mg->mg_next) != mca->mca_rotor);
} else {
ASSERT(mca->mca_rotor != NULL);
mg = mca->mca_rotor;
}
/*
* If the hint put us into the wrong metaslab class, or into a
* metaslab group that has been passivated, just follow the rotor.
*/
if (mg->mg_class != mc || mg->mg_activation_count <= 0)
mg = mca->mca_rotor;
rotor = mg;
top:
do {
boolean_t allocatable;
ASSERT(mg->mg_activation_count == 1);
vd = mg->mg_vd;
/*
* Don't allocate from faulted devices.
*/
if (try_hard) {
spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
allocatable = vdev_allocatable(vd);
spa_config_exit(spa, SCL_ZIO, FTAG);
} else {
allocatable = vdev_allocatable(vd);
}
/*
* Determine if the selected metaslab group is eligible
* for allocations. If we're ganging then don't allow
* this metaslab group to skip allocations since that would
* inadvertently return ENOSPC and suspend the pool
* even though space is still available.
*/
if (allocatable && !GANG_ALLOCATION(flags) && !try_hard) {
allocatable = metaslab_group_allocatable(mg, rotor,
psize, allocator, d);
}
if (!allocatable) {
metaslab_trace_add(zal, mg, NULL, psize, d,
TRACE_NOT_ALLOCATABLE, allocator);
goto next;
}
ASSERT(mg->mg_initialized);
/*
* Avoid writing single-copy data to a failing,
* non-redundant vdev, unless we've already tried all
* other vdevs.
*/
if ((vd->vdev_stat.vs_write_errors > 0 ||
vd->vdev_state < VDEV_STATE_HEALTHY) &&
d == 0 && !try_hard && vd->vdev_children == 0) {
metaslab_trace_add(zal, mg, NULL, psize, d,
TRACE_VDEV_ERROR, allocator);
goto next;
}
ASSERT(mg->mg_class == mc);
uint64_t asize = vdev_psize_to_asize(vd, psize);
ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
/*
* If we don't need to try hard, then require that the
* block be on a different metaslab from any other DVAs
* in this BP (unique=true). If we are trying hard, then
* allow any metaslab to be used (unique=false).
*/
uint64_t offset = metaslab_group_alloc(mg, zal, asize, txg,
!try_hard, dva, d, allocator, try_hard);
if (offset != -1ULL) {
/*
* If we've just selected this metaslab group,
* figure out whether the corresponding vdev is
* over- or under-used relative to the pool,
* and set an allocation bias to even it out.
*
* Bias is also used to compensate for unequally
* sized vdevs so that space is allocated fairly.
*/
if (mca->mca_aliquot == 0 && metaslab_bias_enabled) {
vdev_stat_t *vs = &vd->vdev_stat;
int64_t vs_free = vs->vs_space - vs->vs_alloc;
int64_t mc_free = mc->mc_space - mc->mc_alloc;
int64_t ratio;
/*
* Calculate how much more or less we should
* try to allocate from this device during
* this iteration around the rotor.
*
* This basically introduces a zero-centered
* bias towards the devices with the most
* free space, while compensating for vdev
* size differences.
*
* Examples:
* vdev V1 = 16M/128M
* vdev V2 = 16M/128M
* ratio(V1) = 100% ratio(V2) = 100%
*
* vdev V1 = 16M/128M
* vdev V2 = 64M/128M
* ratio(V1) = 127% ratio(V2) = 72%
*
* vdev V1 = 16M/128M
* vdev V2 = 64M/512M
* ratio(V1) = 40% ratio(V2) = 160%
*/
ratio = (vs_free * mc->mc_alloc_groups * 100) /
(mc_free + 1);
mg->mg_bias = ((ratio - 100) *
(int64_t)mg->mg_aliquot) / 100;
} else if (!metaslab_bias_enabled) {
mg->mg_bias = 0;
}
if ((flags & METASLAB_FASTWRITE) ||
atomic_add_64_nv(&mca->mca_aliquot, asize) >=
mg->mg_aliquot + mg->mg_bias) {
mca->mca_rotor = mg->mg_next;
mca->mca_aliquot = 0;
}
DVA_SET_VDEV(&dva[d], vd->vdev_id);
DVA_SET_OFFSET(&dva[d], offset);
DVA_SET_GANG(&dva[d],
((flags & METASLAB_GANG_HEADER) ? 1 : 0));
DVA_SET_ASIZE(&dva[d], asize);
if (flags & METASLAB_FASTWRITE) {
atomic_add_64(&vd->vdev_pending_fastwrite,
psize);
}
return (0);
}
next:
mca->mca_rotor = mg->mg_next;
mca->mca_aliquot = 0;
} while ((mg = mg->mg_next) != rotor);
/*
* If we haven't tried hard, perhaps do so now.
*/
if (!try_hard && (zfs_metaslab_try_hard_before_gang ||
GANG_ALLOCATION(flags) || (flags & METASLAB_ZIL) != 0 ||
psize <= 1 << spa->spa_min_ashift)) {
METASLABSTAT_BUMP(metaslabstat_try_hard);
try_hard = B_TRUE;
goto top;
}
bzero(&dva[d], sizeof (dva_t));
metaslab_trace_add(zal, rotor, NULL, psize, d, TRACE_ENOSPC, allocator);
return (SET_ERROR(ENOSPC));
}
void
metaslab_free_concrete(vdev_t *vd, uint64_t offset, uint64_t asize,
boolean_t checkpoint)
{
metaslab_t *msp;
spa_t *spa = vd->vdev_spa;
ASSERT(vdev_is_concrete(vd));
ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
VERIFY(!msp->ms_condensing);
VERIFY3U(offset, >=, msp->ms_start);
VERIFY3U(offset + asize, <=, msp->ms_start + msp->ms_size);
VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
VERIFY0(P2PHASE(asize, 1ULL << vd->vdev_ashift));
metaslab_check_free_impl(vd, offset, asize);
mutex_enter(&msp->ms_lock);
if (range_tree_is_empty(msp->ms_freeing) &&
range_tree_is_empty(msp->ms_checkpointing)) {
vdev_dirty(vd, VDD_METASLAB, msp, spa_syncing_txg(spa));
}
if (checkpoint) {
ASSERT(spa_has_checkpoint(spa));
range_tree_add(msp->ms_checkpointing, offset, asize);
} else {
range_tree_add(msp->ms_freeing, offset, asize);
}
mutex_exit(&msp->ms_lock);
}
/* ARGSUSED */
void
metaslab_free_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
uint64_t size, void *arg)
{
boolean_t *checkpoint = arg;
ASSERT3P(checkpoint, !=, NULL);
if (vd->vdev_ops->vdev_op_remap != NULL)
vdev_indirect_mark_obsolete(vd, offset, size);
else
metaslab_free_impl(vd, offset, size, *checkpoint);
}
static void
metaslab_free_impl(vdev_t *vd, uint64_t offset, uint64_t size,
boolean_t checkpoint)
{
spa_t *spa = vd->vdev_spa;
ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
if (spa_syncing_txg(spa) > spa_freeze_txg(spa))
return;
if (spa->spa_vdev_removal != NULL &&
spa->spa_vdev_removal->svr_vdev_id == vd->vdev_id &&
vdev_is_concrete(vd)) {
/*
* Note: we check if the vdev is concrete because when
* we complete the removal, we first change the vdev to be
* an indirect vdev (in open context), and then (in syncing
* context) clear spa_vdev_removal.
*/
free_from_removing_vdev(vd, offset, size);
} else if (vd->vdev_ops->vdev_op_remap != NULL) {
vdev_indirect_mark_obsolete(vd, offset, size);
vd->vdev_ops->vdev_op_remap(vd, offset, size,
metaslab_free_impl_cb, &checkpoint);
} else {
metaslab_free_concrete(vd, offset, size, checkpoint);
}
}
typedef struct remap_blkptr_cb_arg {
blkptr_t *rbca_bp;
spa_remap_cb_t rbca_cb;
vdev_t *rbca_remap_vd;
uint64_t rbca_remap_offset;
void *rbca_cb_arg;
} remap_blkptr_cb_arg_t;
static void
remap_blkptr_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
uint64_t size, void *arg)
{
remap_blkptr_cb_arg_t *rbca = arg;
blkptr_t *bp = rbca->rbca_bp;
/* We can not remap split blocks. */
if (size != DVA_GET_ASIZE(&bp->blk_dva[0]))
return;
ASSERT0(inner_offset);
if (rbca->rbca_cb != NULL) {
/*
* At this point we know that we are not handling split
* blocks and we invoke the callback on the previous
* vdev which must be indirect.
*/
ASSERT3P(rbca->rbca_remap_vd->vdev_ops, ==, &vdev_indirect_ops);
rbca->rbca_cb(rbca->rbca_remap_vd->vdev_id,
rbca->rbca_remap_offset, size, rbca->rbca_cb_arg);
/* set up remap_blkptr_cb_arg for the next call */
rbca->rbca_remap_vd = vd;
rbca->rbca_remap_offset = offset;
}
/*
* The phys birth time is that of dva[0]. This ensures that we know
* when each dva was written, so that resilver can determine which
* blocks need to be scrubbed (i.e. those written during the time
* the vdev was offline). It also ensures that the key used in
* the ARC hash table is unique (i.e. dva[0] + phys_birth). If
* we didn't change the phys_birth, a lookup in the ARC for a
* remapped BP could find the data that was previously stored at
* this vdev + offset.
*/
vdev_t *oldvd = vdev_lookup_top(vd->vdev_spa,
DVA_GET_VDEV(&bp->blk_dva[0]));
vdev_indirect_births_t *vib = oldvd->vdev_indirect_births;
bp->blk_phys_birth = vdev_indirect_births_physbirth(vib,
DVA_GET_OFFSET(&bp->blk_dva[0]), DVA_GET_ASIZE(&bp->blk_dva[0]));
DVA_SET_VDEV(&bp->blk_dva[0], vd->vdev_id);
DVA_SET_OFFSET(&bp->blk_dva[0], offset);
}
/*
* If the block pointer contains any indirect DVAs, modify them to refer to
* concrete DVAs. Note that this will sometimes not be possible, leaving
* the indirect DVA in place. This happens if the indirect DVA spans multiple
* segments in the mapping (i.e. it is a "split block").
*
* If the BP was remapped, calls the callback on the original dva (note the
* callback can be called multiple times if the original indirect DVA refers
* to another indirect DVA, etc).
*
* Returns TRUE if the BP was remapped.
*/
boolean_t
spa_remap_blkptr(spa_t *spa, blkptr_t *bp, spa_remap_cb_t callback, void *arg)
{
remap_blkptr_cb_arg_t rbca;
if (!zfs_remap_blkptr_enable)
return (B_FALSE);
if (!spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS))
return (B_FALSE);
/*
* Dedup BP's can not be remapped, because ddt_phys_select() depends
* on DVA[0] being the same in the BP as in the DDT (dedup table).
*/
if (BP_GET_DEDUP(bp))
return (B_FALSE);
/*
* Gang blocks can not be remapped, because
* zio_checksum_gang_verifier() depends on the DVA[0] that's in
* the BP used to read the gang block header (GBH) being the same
* as the DVA[0] that we allocated for the GBH.
*/
if (BP_IS_GANG(bp))
return (B_FALSE);
/*
* Embedded BP's have no DVA to remap.
*/
if (BP_GET_NDVAS(bp) < 1)
return (B_FALSE);
/*
* Note: we only remap dva[0]. If we remapped other dvas, we
* would no longer know what their phys birth txg is.
*/
dva_t *dva = &bp->blk_dva[0];
uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t size = DVA_GET_ASIZE(dva);
vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
if (vd->vdev_ops->vdev_op_remap == NULL)
return (B_FALSE);
rbca.rbca_bp = bp;
rbca.rbca_cb = callback;
rbca.rbca_remap_vd = vd;
rbca.rbca_remap_offset = offset;
rbca.rbca_cb_arg = arg;
/*
* remap_blkptr_cb() will be called in order for each level of
* indirection, until a concrete vdev is reached or a split block is
* encountered. old_vd and old_offset are updated within the callback
* as we go from the one indirect vdev to the next one (either concrete
* or indirect again) in that order.
*/
vd->vdev_ops->vdev_op_remap(vd, offset, size, remap_blkptr_cb, &rbca);
/* Check if the DVA wasn't remapped because it is a split block */
if (DVA_GET_VDEV(&rbca.rbca_bp->blk_dva[0]) == vd->vdev_id)
return (B_FALSE);
return (B_TRUE);
}
/*
* Undo the allocation of a DVA which happened in the given transaction group.
*/
void
metaslab_unalloc_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
{
metaslab_t *msp;
vdev_t *vd;
uint64_t vdev = DVA_GET_VDEV(dva);
uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t size = DVA_GET_ASIZE(dva);
ASSERT(DVA_IS_VALID(dva));
ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
if (txg > spa_freeze_txg(spa))
return;
if ((vd = vdev_lookup_top(spa, vdev)) == NULL || !DVA_IS_VALID(dva) ||
(offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
zfs_panic_recover("metaslab_free_dva(): bad DVA %llu:%llu:%llu",
(u_longlong_t)vdev, (u_longlong_t)offset,
(u_longlong_t)size);
return;
}
ASSERT(!vd->vdev_removing);
ASSERT(vdev_is_concrete(vd));
ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
ASSERT3P(vd->vdev_indirect_mapping, ==, NULL);
if (DVA_GET_GANG(dva))
size = vdev_gang_header_asize(vd);
msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
mutex_enter(&msp->ms_lock);
range_tree_remove(msp->ms_allocating[txg & TXG_MASK],
offset, size);
msp->ms_allocating_total -= size;
VERIFY(!msp->ms_condensing);
VERIFY3U(offset, >=, msp->ms_start);
VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size);
VERIFY3U(range_tree_space(msp->ms_allocatable) + size, <=,
msp->ms_size);
VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
range_tree_add(msp->ms_allocatable, offset, size);
mutex_exit(&msp->ms_lock);
}
/*
* Free the block represented by the given DVA.
*/
void
metaslab_free_dva(spa_t *spa, const dva_t *dva, boolean_t checkpoint)
{
uint64_t vdev = DVA_GET_VDEV(dva);
uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t size = DVA_GET_ASIZE(dva);
vdev_t *vd = vdev_lookup_top(spa, vdev);
ASSERT(DVA_IS_VALID(dva));
ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
if (DVA_GET_GANG(dva)) {
size = vdev_gang_header_asize(vd);
}
metaslab_free_impl(vd, offset, size, checkpoint);
}
/*
* Reserve some allocation slots. The reservation system must be called
* before we call into the allocator. If there aren't any available slots
* then the I/O will be throttled until an I/O completes and its slots are
* freed up. The function returns true if it was successful in placing
* the reservation.
*/
boolean_t
metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, int allocator,
zio_t *zio, int flags)
{
metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
uint64_t max = mca->mca_alloc_max_slots;
ASSERT(mc->mc_alloc_throttle_enabled);
if (GANG_ALLOCATION(flags) || (flags & METASLAB_MUST_RESERVE) ||
zfs_refcount_count(&mca->mca_alloc_slots) + slots <= max) {
/*
+ * The potential race between _count() and _add() is covered
+ * by the allocator lock in most cases, or irrelevant due to
+ * GANG_ALLOCATION() or METASLAB_MUST_RESERVE set in others.
+ * But even if we assume some other non-existing scenario, the
+ * worst that can happen is few more I/Os get to allocation
+ * earlier, that is not a problem.
+ *
* We reserve the slots individually so that we can unreserve
* them individually when an I/O completes.
*/
for (int d = 0; d < slots; d++)
zfs_refcount_add(&mca->mca_alloc_slots, zio);
zio->io_flags |= ZIO_FLAG_IO_ALLOCATING;
return (B_TRUE);
}
return (B_FALSE);
}
void
metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots,
int allocator, zio_t *zio)
{
metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
ASSERT(mc->mc_alloc_throttle_enabled);
for (int d = 0; d < slots; d++)
zfs_refcount_remove(&mca->mca_alloc_slots, zio);
}
static int
metaslab_claim_concrete(vdev_t *vd, uint64_t offset, uint64_t size,
uint64_t txg)
{
metaslab_t *msp;
spa_t *spa = vd->vdev_spa;
int error = 0;
if (offset >> vd->vdev_ms_shift >= vd->vdev_ms_count)
return (SET_ERROR(ENXIO));
ASSERT3P(vd->vdev_ms, !=, NULL);
msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
mutex_enter(&msp->ms_lock);
if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded) {
error = metaslab_activate(msp, 0, METASLAB_WEIGHT_CLAIM);
if (error == EBUSY) {
ASSERT(msp->ms_loaded);
ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
error = 0;
}
}
if (error == 0 &&
!range_tree_contains(msp->ms_allocatable, offset, size))
error = SET_ERROR(ENOENT);
if (error || txg == 0) { /* txg == 0 indicates dry run */
mutex_exit(&msp->ms_lock);
return (error);
}
VERIFY(!msp->ms_condensing);
VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
VERIFY3U(range_tree_space(msp->ms_allocatable) - size, <=,
msp->ms_size);
range_tree_remove(msp->ms_allocatable, offset, size);
range_tree_clear(msp->ms_trim, offset, size);
if (spa_writeable(spa)) { /* don't dirty if we're zdb(8) */
metaslab_class_t *mc = msp->ms_group->mg_class;
multilist_sublist_t *mls =
multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
if (!multilist_link_active(&msp->ms_class_txg_node)) {
msp->ms_selected_txg = txg;
multilist_sublist_insert_head(mls, msp);
}
multilist_sublist_unlock(mls);
if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
vdev_dirty(vd, VDD_METASLAB, msp, txg);
range_tree_add(msp->ms_allocating[txg & TXG_MASK],
offset, size);
msp->ms_allocating_total += size;
}
mutex_exit(&msp->ms_lock);
return (0);
}
typedef struct metaslab_claim_cb_arg_t {
uint64_t mcca_txg;
int mcca_error;
} metaslab_claim_cb_arg_t;
/* ARGSUSED */
static void
metaslab_claim_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
uint64_t size, void *arg)
{
metaslab_claim_cb_arg_t *mcca_arg = arg;
if (mcca_arg->mcca_error == 0) {
mcca_arg->mcca_error = metaslab_claim_concrete(vd, offset,
size, mcca_arg->mcca_txg);
}
}
int
metaslab_claim_impl(vdev_t *vd, uint64_t offset, uint64_t size, uint64_t txg)
{
if (vd->vdev_ops->vdev_op_remap != NULL) {
metaslab_claim_cb_arg_t arg;
/*
* Only zdb(8) can claim on indirect vdevs. This is used
* to detect leaks of mapped space (that are not accounted
* for in the obsolete counts, spacemap, or bpobj).
*/
ASSERT(!spa_writeable(vd->vdev_spa));
arg.mcca_error = 0;
arg.mcca_txg = txg;
vd->vdev_ops->vdev_op_remap(vd, offset, size,
metaslab_claim_impl_cb, &arg);
if (arg.mcca_error == 0) {
arg.mcca_error = metaslab_claim_concrete(vd,
offset, size, txg);
}
return (arg.mcca_error);
} else {
return (metaslab_claim_concrete(vd, offset, size, txg));
}
}
/*
* Intent log support: upon opening the pool after a crash, notify the SPA
* of blocks that the intent log has allocated for immediate write, but
* which are still considered free by the SPA because the last transaction
* group didn't commit yet.
*/
static int
metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
{
uint64_t vdev = DVA_GET_VDEV(dva);
uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t size = DVA_GET_ASIZE(dva);
vdev_t *vd;
if ((vd = vdev_lookup_top(spa, vdev)) == NULL) {
return (SET_ERROR(ENXIO));
}
ASSERT(DVA_IS_VALID(dva));
if (DVA_GET_GANG(dva))
size = vdev_gang_header_asize(vd);
return (metaslab_claim_impl(vd, offset, size, txg));
}
int
metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
int ndvas, uint64_t txg, blkptr_t *hintbp, int flags,
zio_alloc_list_t *zal, zio_t *zio, int allocator)
{
dva_t *dva = bp->blk_dva;
dva_t *hintdva = (hintbp != NULL) ? hintbp->blk_dva : NULL;
int error = 0;
ASSERT(bp->blk_birth == 0);
ASSERT(BP_PHYSICAL_BIRTH(bp) == 0);
spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
if (mc->mc_allocator[allocator].mca_rotor == NULL) {
/* no vdevs in this class */
spa_config_exit(spa, SCL_ALLOC, FTAG);
return (SET_ERROR(ENOSPC));
}
ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
ASSERT(BP_GET_NDVAS(bp) == 0);
ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
ASSERT3P(zal, !=, NULL);
for (int d = 0; d < ndvas; d++) {
error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
txg, flags, zal, allocator);
if (error != 0) {
for (d--; d >= 0; d--) {
metaslab_unalloc_dva(spa, &dva[d], txg);
metaslab_group_alloc_decrement(spa,
DVA_GET_VDEV(&dva[d]), zio, flags,
allocator, B_FALSE);
bzero(&dva[d], sizeof (dva_t));
}
spa_config_exit(spa, SCL_ALLOC, FTAG);
return (error);
} else {
/*
* Update the metaslab group's queue depth
* based on the newly allocated dva.
*/
metaslab_group_alloc_increment(spa,
DVA_GET_VDEV(&dva[d]), zio, flags, allocator);
}
}
ASSERT(error == 0);
ASSERT(BP_GET_NDVAS(bp) == ndvas);
spa_config_exit(spa, SCL_ALLOC, FTAG);
BP_SET_BIRTH(bp, txg, 0);
return (0);
}
void
metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
{
const dva_t *dva = bp->blk_dva;
int ndvas = BP_GET_NDVAS(bp);
ASSERT(!BP_IS_HOLE(bp));
ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa));
/*
* If we have a checkpoint for the pool we need to make sure that
* the blocks that we free that are part of the checkpoint won't be
* reused until the checkpoint is discarded or we revert to it.
*
* The checkpoint flag is passed down the metaslab_free code path
* and is set whenever we want to add a block to the checkpoint's
* accounting. That is, we "checkpoint" blocks that existed at the
* time the checkpoint was created and are therefore referenced by
* the checkpointed uberblock.
*
* Note that, we don't checkpoint any blocks if the current
* syncing txg <= spa_checkpoint_txg. We want these frees to sync
* normally as they will be referenced by the checkpointed uberblock.
*/
boolean_t checkpoint = B_FALSE;
if (bp->blk_birth <= spa->spa_checkpoint_txg &&
spa_syncing_txg(spa) > spa->spa_checkpoint_txg) {
/*
* At this point, if the block is part of the checkpoint
* there is no way it was created in the current txg.
*/
ASSERT(!now);
ASSERT3U(spa_syncing_txg(spa), ==, txg);
checkpoint = B_TRUE;
}
spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
for (int d = 0; d < ndvas; d++) {
if (now) {
metaslab_unalloc_dva(spa, &dva[d], txg);
} else {
ASSERT3U(txg, ==, spa_syncing_txg(spa));
metaslab_free_dva(spa, &dva[d], checkpoint);
}
}
spa_config_exit(spa, SCL_FREE, FTAG);
}
int
metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
{
const dva_t *dva = bp->blk_dva;
int ndvas = BP_GET_NDVAS(bp);
int error = 0;
ASSERT(!BP_IS_HOLE(bp));
if (txg != 0) {
/*
* First do a dry run to make sure all DVAs are claimable,
* so we don't have to unwind from partial failures below.
*/
if ((error = metaslab_claim(spa, bp, 0)) != 0)
return (error);
}
spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
for (int d = 0; d < ndvas; d++) {
error = metaslab_claim_dva(spa, &dva[d], txg);
if (error != 0)
break;
}
spa_config_exit(spa, SCL_ALLOC, FTAG);
ASSERT(error == 0 || txg == 0);
return (error);
}
void
metaslab_fastwrite_mark(spa_t *spa, const blkptr_t *bp)
{
const dva_t *dva = bp->blk_dva;
int ndvas = BP_GET_NDVAS(bp);
uint64_t psize = BP_GET_PSIZE(bp);
int d;
vdev_t *vd;
ASSERT(!BP_IS_HOLE(bp));
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT(psize > 0);
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
for (d = 0; d < ndvas; d++) {
if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL)
continue;
atomic_add_64(&vd->vdev_pending_fastwrite, psize);
}
spa_config_exit(spa, SCL_VDEV, FTAG);
}
void
metaslab_fastwrite_unmark(spa_t *spa, const blkptr_t *bp)
{
const dva_t *dva = bp->blk_dva;
int ndvas = BP_GET_NDVAS(bp);
uint64_t psize = BP_GET_PSIZE(bp);
int d;
vdev_t *vd;
ASSERT(!BP_IS_HOLE(bp));
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT(psize > 0);
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
for (d = 0; d < ndvas; d++) {
if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL)
continue;
ASSERT3U(vd->vdev_pending_fastwrite, >=, psize);
atomic_sub_64(&vd->vdev_pending_fastwrite, psize);
}
spa_config_exit(spa, SCL_VDEV, FTAG);
}
/* ARGSUSED */
static void
metaslab_check_free_impl_cb(uint64_t inner, vdev_t *vd, uint64_t offset,
uint64_t size, void *arg)
{
if (vd->vdev_ops == &vdev_indirect_ops)
return;
metaslab_check_free_impl(vd, offset, size);
}
static void
metaslab_check_free_impl(vdev_t *vd, uint64_t offset, uint64_t size)
{
metaslab_t *msp;
spa_t *spa __maybe_unused = vd->vdev_spa;
if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
return;
if (vd->vdev_ops->vdev_op_remap != NULL) {
vd->vdev_ops->vdev_op_remap(vd, offset, size,
metaslab_check_free_impl_cb, NULL);
return;
}
ASSERT(vdev_is_concrete(vd));
ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
mutex_enter(&msp->ms_lock);
if (msp->ms_loaded) {
range_tree_verify_not_present(msp->ms_allocatable,
offset, size);
}
/*
* Check all segments that currently exist in the freeing pipeline.
*
* It would intuitively make sense to also check the current allocating
* tree since metaslab_unalloc_dva() exists for extents that are
* allocated and freed in the same sync pass within the same txg.
* Unfortunately there are places (e.g. the ZIL) where we allocate a
* segment but then we free part of it within the same txg
* [see zil_sync()]. Thus, we don't call range_tree_verify() in the
* current allocating tree.
*/
range_tree_verify_not_present(msp->ms_freeing, offset, size);
range_tree_verify_not_present(msp->ms_checkpointing, offset, size);
range_tree_verify_not_present(msp->ms_freed, offset, size);
for (int j = 0; j < TXG_DEFER_SIZE; j++)
range_tree_verify_not_present(msp->ms_defer[j], offset, size);
range_tree_verify_not_present(msp->ms_trim, offset, size);
mutex_exit(&msp->ms_lock);
}
void
metaslab_check_free(spa_t *spa, const blkptr_t *bp)
{
if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
return;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
vdev_t *vd = vdev_lookup_top(spa, vdev);
uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]);
if (DVA_GET_GANG(&bp->blk_dva[i]))
size = vdev_gang_header_asize(vd);
ASSERT3P(vd, !=, NULL);
metaslab_check_free_impl(vd, offset, size);
}
spa_config_exit(spa, SCL_VDEV, FTAG);
}
static void
metaslab_group_disable_wait(metaslab_group_t *mg)
{
ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock));
while (mg->mg_disabled_updating) {
cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock);
}
}
static void
metaslab_group_disabled_increment(metaslab_group_t *mg)
{
ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock));
ASSERT(mg->mg_disabled_updating);
while (mg->mg_ms_disabled >= max_disabled_ms) {
cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock);
}
mg->mg_ms_disabled++;
ASSERT3U(mg->mg_ms_disabled, <=, max_disabled_ms);
}
/*
* Mark the metaslab as disabled to prevent any allocations on this metaslab.
* We must also track how many metaslabs are currently disabled within a
* metaslab group and limit them to prevent allocation failures from
* occurring because all metaslabs are disabled.
*/
void
metaslab_disable(metaslab_t *msp)
{
ASSERT(!MUTEX_HELD(&msp->ms_lock));
metaslab_group_t *mg = msp->ms_group;
mutex_enter(&mg->mg_ms_disabled_lock);
/*
* To keep an accurate count of how many threads have disabled
* a specific metaslab group, we only allow one thread to mark
* the metaslab group at a time. This ensures that the value of
* ms_disabled will be accurate when we decide to mark a metaslab
* group as disabled. To do this we force all other threads
* to wait till the metaslab's mg_disabled_updating flag is no
* longer set.
*/
metaslab_group_disable_wait(mg);
mg->mg_disabled_updating = B_TRUE;
if (msp->ms_disabled == 0) {
metaslab_group_disabled_increment(mg);
}
mutex_enter(&msp->ms_lock);
msp->ms_disabled++;
mutex_exit(&msp->ms_lock);
mg->mg_disabled_updating = B_FALSE;
cv_broadcast(&mg->mg_ms_disabled_cv);
mutex_exit(&mg->mg_ms_disabled_lock);
}
void
metaslab_enable(metaslab_t *msp, boolean_t sync, boolean_t unload)
{
metaslab_group_t *mg = msp->ms_group;
spa_t *spa = mg->mg_vd->vdev_spa;
/*
* Wait for the outstanding IO to be synced to prevent newly
* allocated blocks from being overwritten. This used by
* initialize and TRIM which are modifying unallocated space.
*/
if (sync)
txg_wait_synced(spa_get_dsl(spa), 0);
mutex_enter(&mg->mg_ms_disabled_lock);
mutex_enter(&msp->ms_lock);
if (--msp->ms_disabled == 0) {
mg->mg_ms_disabled--;
cv_broadcast(&mg->mg_ms_disabled_cv);
if (unload)
metaslab_unload(msp);
}
mutex_exit(&msp->ms_lock);
mutex_exit(&mg->mg_ms_disabled_lock);
}
static void
metaslab_update_ondisk_flush_data(metaslab_t *ms, dmu_tx_t *tx)
{
vdev_t *vd = ms->ms_group->mg_vd;
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa_meta_objset(spa);
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
metaslab_unflushed_phys_t entry = {
.msp_unflushed_txg = metaslab_unflushed_txg(ms),
};
uint64_t entry_size = sizeof (entry);
uint64_t entry_offset = ms->ms_id * entry_size;
uint64_t object = 0;
int err = zap_lookup(mos, vd->vdev_top_zap,
VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1,
&object);
if (err == ENOENT) {
object = dmu_object_alloc(mos, DMU_OTN_UINT64_METADATA,
SPA_OLD_MAXBLOCKSIZE, DMU_OT_NONE, 0, tx);
VERIFY0(zap_add(mos, vd->vdev_top_zap,
VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1,
&object, tx));
} else {
VERIFY0(err);
}
dmu_write(spa_meta_objset(spa), object, entry_offset, entry_size,
&entry, tx);
}
void
metaslab_set_unflushed_txg(metaslab_t *ms, uint64_t txg, dmu_tx_t *tx)
{
spa_t *spa = ms->ms_group->mg_vd->vdev_spa;
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
return;
ms->ms_unflushed_txg = txg;
metaslab_update_ondisk_flush_data(ms, tx);
}
uint64_t
metaslab_unflushed_txg(metaslab_t *ms)
{
return (ms->ms_unflushed_txg);
}
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, aliquot, ULONG, ZMOD_RW,
"Allocation granularity (a.k.a. stripe size)");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, debug_load, INT, ZMOD_RW,
"Load all metaslabs when pool is first opened");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, debug_unload, INT, ZMOD_RW,
"Prevent metaslabs from being unloaded");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, preload_enabled, INT, ZMOD_RW,
"Preload potential metaslabs during reassessment");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay, INT, ZMOD_RW,
"Delay in txgs after metaslab was last used before unloading");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay_ms, INT, ZMOD_RW,
"Delay in milliseconds after metaslab was last used before unloading");
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, noalloc_threshold, INT, ZMOD_RW,
"Percentage of metaslab group size that should be free to make it "
"eligible for allocation");
ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, fragmentation_threshold, INT, ZMOD_RW,
"Percentage of metaslab group size that should be considered eligible "
"for allocations unless all metaslab groups within the metaslab class "
"have also crossed this threshold");
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, fragmentation_threshold, INT,
ZMOD_RW, "Fragmentation for metaslab to allow allocation");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, fragmentation_factor_enabled, INT, ZMOD_RW,
"Use the fragmentation metric to prefer less fragmented metaslabs");
/* END CSTYLED */
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, lba_weighting_enabled, INT, ZMOD_RW,
"Prefer metaslabs with lower LBAs");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, bias_enabled, INT, ZMOD_RW,
"Enable metaslab group biasing");
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, segment_weight_enabled, INT,
ZMOD_RW, "Enable segment-based metaslab selection");
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, switch_threshold, INT, ZMOD_RW,
"Segment-based metaslab selection maximum buckets before switching");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, force_ganging, ULONG, ZMOD_RW,
"Blocks larger than this size are forced to be gang blocks");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_max_search, INT, ZMOD_RW,
"Max distance (bytes) to search forward before using size tree");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_use_largest_segment, INT, ZMOD_RW,
"When looking in size tree, use largest segment instead of exact fit");
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, max_size_cache_sec, ULONG,
ZMOD_RW, "How long to trust the cached max chunk size of a metaslab");
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, mem_limit, INT, ZMOD_RW,
"Percentage of memory that can be used to store metaslab range trees");
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, try_hard_before_gang, INT,
ZMOD_RW, "Try hard to allocate before ganging");
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, find_max_tries, INT, ZMOD_RW,
"Normally only consider this many of the best metaslabs in each vdev");
diff --git a/sys/contrib/openzfs/module/zfs/spa.c b/sys/contrib/openzfs/module/zfs/spa.c
index 2a4db7d562b6..8ca9b49ba3b3 100644
--- a/sys/contrib/openzfs/module/zfs/spa.c
+++ b/sys/contrib/openzfs/module/zfs/spa.c
@@ -1,9950 +1,9950 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2018, Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright 2013 Saso Kiselkov. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2016 Toomas Soome <tsoome@me.com>
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
* Copyright 2018 Joyent, Inc.
* Copyright (c) 2017, 2019, Datto Inc. All rights reserved.
* Copyright 2017 Joyent, Inc.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
*/
/*
* SPA: Storage Pool Allocator
*
* This file contains all the routines used when modifying on-disk SPA state.
* This includes opening, importing, destroying, exporting a pool, and syncing a
* pool.
*/
#include <sys/zfs_context.h>
#include <sys/fm/fs/zfs.h>
#include <sys/spa_impl.h>
#include <sys/zio.h>
#include <sys/zio_checksum.h>
#include <sys/dmu.h>
#include <sys/dmu_tx.h>
#include <sys/zap.h>
#include <sys/zil.h>
#include <sys/ddt.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_removal.h>
#include <sys/vdev_indirect_mapping.h>
#include <sys/vdev_indirect_births.h>
#include <sys/vdev_initialize.h>
#include <sys/vdev_rebuild.h>
#include <sys/vdev_trim.h>
#include <sys/vdev_disk.h>
#include <sys/vdev_draid.h>
#include <sys/metaslab.h>
#include <sys/metaslab_impl.h>
#include <sys/mmp.h>
#include <sys/uberblock_impl.h>
#include <sys/txg.h>
#include <sys/avl.h>
#include <sys/bpobj.h>
#include <sys/dmu_traverse.h>
#include <sys/dmu_objset.h>
#include <sys/unique.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_synctask.h>
#include <sys/fs/zfs.h>
#include <sys/arc.h>
#include <sys/callb.h>
#include <sys/systeminfo.h>
#include <sys/spa_boot.h>
#include <sys/zfs_ioctl.h>
#include <sys/dsl_scan.h>
#include <sys/zfeature.h>
#include <sys/dsl_destroy.h>
#include <sys/zvol.h>
#ifdef _KERNEL
#include <sys/fm/protocol.h>
#include <sys/fm/util.h>
#include <sys/callb.h>
#include <sys/zone.h>
#include <sys/vmsystm.h>
#endif /* _KERNEL */
#include "zfs_prop.h"
#include "zfs_comutil.h"
/*
* The interval, in seconds, at which failed configuration cache file writes
* should be retried.
*/
int zfs_ccw_retry_interval = 300;
typedef enum zti_modes {
ZTI_MODE_FIXED, /* value is # of threads (min 1) */
ZTI_MODE_BATCH, /* cpu-intensive; value is ignored */
ZTI_MODE_SCALE, /* Taskqs scale with CPUs. */
ZTI_MODE_NULL, /* don't create a taskq */
ZTI_NMODES
} zti_modes_t;
#define ZTI_P(n, q) { ZTI_MODE_FIXED, (n), (q) }
#define ZTI_PCT(n) { ZTI_MODE_ONLINE_PERCENT, (n), 1 }
#define ZTI_BATCH { ZTI_MODE_BATCH, 0, 1 }
#define ZTI_SCALE { ZTI_MODE_SCALE, 0, 1 }
#define ZTI_NULL { ZTI_MODE_NULL, 0, 0 }
#define ZTI_N(n) ZTI_P(n, 1)
#define ZTI_ONE ZTI_N(1)
typedef struct zio_taskq_info {
zti_modes_t zti_mode;
uint_t zti_value;
uint_t zti_count;
} zio_taskq_info_t;
static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = {
"iss", "iss_h", "int", "int_h"
};
/*
* This table defines the taskq settings for each ZFS I/O type. When
* initializing a pool, we use this table to create an appropriately sized
* taskq. Some operations are low volume and therefore have a small, static
* number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE
* macros. Other operations process a large amount of data; the ZTI_BATCH
* macro causes us to create a taskq oriented for throughput. Some operations
* are so high frequency and short-lived that the taskq itself can become a
* point of lock contention. The ZTI_P(#, #) macro indicates that we need an
* additional degree of parallelism specified by the number of threads per-
* taskq and the number of taskqs; when dispatching an event in this case, the
* particular taskq is chosen at random. ZTI_SCALE is similar to ZTI_BATCH,
* but with number of taskqs also scaling with number of CPUs.
*
* The different taskq priorities are to handle the different contexts (issue
* and interrupt) and then to reserve threads for ZIO_PRIORITY_NOW I/Os that
* need to be handled with minimum delay.
*/
const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = {
/* ISSUE ISSUE_HIGH INTR INTR_HIGH */
{ ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* NULL */
{ ZTI_N(8), ZTI_NULL, ZTI_SCALE, ZTI_NULL }, /* READ */
{ ZTI_BATCH, ZTI_N(5), ZTI_SCALE, ZTI_N(5) }, /* WRITE */
{ ZTI_SCALE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */
{ ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* CLAIM */
{ ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* IOCTL */
{ ZTI_N(4), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* TRIM */
};
static void spa_sync_version(void *arg, dmu_tx_t *tx);
static void spa_sync_props(void *arg, dmu_tx_t *tx);
static boolean_t spa_has_active_shared_spare(spa_t *spa);
static int spa_load_impl(spa_t *spa, spa_import_type_t type, char **ereport);
static void spa_vdev_resilver_done(spa_t *spa);
uint_t zio_taskq_batch_pct = 80; /* 1 thread per cpu in pset */
uint_t zio_taskq_batch_tpq; /* threads per taskq */
boolean_t zio_taskq_sysdc = B_TRUE; /* use SDC scheduling class */
uint_t zio_taskq_basedc = 80; /* base duty cycle */
boolean_t spa_create_process = B_TRUE; /* no process ==> no sysdc */
/*
* Report any spa_load_verify errors found, but do not fail spa_load.
* This is used by zdb to analyze non-idle pools.
*/
boolean_t spa_load_verify_dryrun = B_FALSE;
/*
* This (illegal) pool name is used when temporarily importing a spa_t in order
* to get the vdev stats associated with the imported devices.
*/
#define TRYIMPORT_NAME "$import"
/*
* For debugging purposes: print out vdev tree during pool import.
*/
int spa_load_print_vdev_tree = B_FALSE;
/*
* A non-zero value for zfs_max_missing_tvds means that we allow importing
* pools with missing top-level vdevs. This is strictly intended for advanced
* pool recovery cases since missing data is almost inevitable. Pools with
* missing devices can only be imported read-only for safety reasons, and their
* fail-mode will be automatically set to "continue".
*
* With 1 missing vdev we should be able to import the pool and mount all
* datasets. User data that was not modified after the missing device has been
* added should be recoverable. This means that snapshots created prior to the
* addition of that device should be completely intact.
*
* With 2 missing vdevs, some datasets may fail to mount since there are
* dataset statistics that are stored as regular metadata. Some data might be
* recoverable if those vdevs were added recently.
*
* With 3 or more missing vdevs, the pool is severely damaged and MOS entries
* may be missing entirely. Chances of data recovery are very low. Note that
* there are also risks of performing an inadvertent rewind as we might be
* missing all the vdevs with the latest uberblocks.
*/
unsigned long zfs_max_missing_tvds = 0;
/*
* The parameters below are similar to zfs_max_missing_tvds but are only
* intended for a preliminary open of the pool with an untrusted config which
* might be incomplete or out-dated.
*
* We are more tolerant for pools opened from a cachefile since we could have
* an out-dated cachefile where a device removal was not registered.
* We could have set the limit arbitrarily high but in the case where devices
* are really missing we would want to return the proper error codes; we chose
* SPA_DVAS_PER_BP - 1 so that some copies of the MOS would still be available
* and we get a chance to retrieve the trusted config.
*/
uint64_t zfs_max_missing_tvds_cachefile = SPA_DVAS_PER_BP - 1;
/*
* In the case where config was assembled by scanning device paths (/dev/dsks
* by default) we are less tolerant since all the existing devices should have
* been detected and we want spa_load to return the right error codes.
*/
uint64_t zfs_max_missing_tvds_scan = 0;
/*
* Debugging aid that pauses spa_sync() towards the end.
*/
boolean_t zfs_pause_spa_sync = B_FALSE;
/*
* Variables to indicate the livelist condense zthr func should wait at certain
* points for the livelist to be removed - used to test condense/destroy races
*/
int zfs_livelist_condense_zthr_pause = 0;
int zfs_livelist_condense_sync_pause = 0;
/*
* Variables to track whether or not condense cancellation has been
* triggered in testing.
*/
int zfs_livelist_condense_sync_cancel = 0;
int zfs_livelist_condense_zthr_cancel = 0;
/*
* Variable to track whether or not extra ALLOC blkptrs were added to a
* livelist entry while it was being condensed (caused by the way we track
* remapped blkptrs in dbuf_remap_impl)
*/
int zfs_livelist_condense_new_alloc = 0;
/*
* ==========================================================================
* SPA properties routines
* ==========================================================================
*/
/*
* Add a (source=src, propname=propval) list to an nvlist.
*/
static void
spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval,
uint64_t intval, zprop_source_t src)
{
const char *propname = zpool_prop_to_name(prop);
nvlist_t *propval;
VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
if (strval != NULL)
VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0);
else
VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0);
VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0);
nvlist_free(propval);
}
/*
* Get property values from the spa configuration.
*/
static void
spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
{
vdev_t *rvd = spa->spa_root_vdev;
dsl_pool_t *pool = spa->spa_dsl_pool;
uint64_t size, alloc, cap, version;
const zprop_source_t src = ZPROP_SRC_NONE;
spa_config_dirent_t *dp;
metaslab_class_t *mc = spa_normal_class(spa);
ASSERT(MUTEX_HELD(&spa->spa_props_lock));
if (rvd != NULL) {
alloc = metaslab_class_get_alloc(mc);
alloc += metaslab_class_get_alloc(spa_special_class(spa));
alloc += metaslab_class_get_alloc(spa_dedup_class(spa));
alloc += metaslab_class_get_alloc(spa_embedded_log_class(spa));
size = metaslab_class_get_space(mc);
size += metaslab_class_get_space(spa_special_class(spa));
size += metaslab_class_get_space(spa_dedup_class(spa));
size += metaslab_class_get_space(spa_embedded_log_class(spa));
spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src);
spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src);
spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL,
size - alloc, src);
spa_prop_add_list(*nvp, ZPOOL_PROP_CHECKPOINT, NULL,
spa->spa_checkpoint_info.sci_dspace, src);
spa_prop_add_list(*nvp, ZPOOL_PROP_FRAGMENTATION, NULL,
metaslab_class_fragmentation(mc), src);
spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL,
metaslab_class_expandable_space(mc), src);
spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL,
(spa_mode(spa) == SPA_MODE_READ), src);
cap = (size == 0) ? 0 : (alloc * 100 / size);
spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src);
spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUPRATIO, NULL,
ddt_get_pool_dedup_ratio(spa), src);
spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL,
rvd->vdev_state, src);
version = spa_version(spa);
if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) {
spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL,
version, ZPROP_SRC_DEFAULT);
} else {
spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL,
version, ZPROP_SRC_LOCAL);
}
spa_prop_add_list(*nvp, ZPOOL_PROP_LOAD_GUID,
NULL, spa_load_guid(spa), src);
}
if (pool != NULL) {
/*
* The $FREE directory was introduced in SPA_VERSION_DEADLISTS,
* when opening pools before this version freedir will be NULL.
*/
if (pool->dp_free_dir != NULL) {
spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL,
dsl_dir_phys(pool->dp_free_dir)->dd_used_bytes,
src);
} else {
spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING,
NULL, 0, src);
}
if (pool->dp_leak_dir != NULL) {
spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, NULL,
dsl_dir_phys(pool->dp_leak_dir)->dd_used_bytes,
src);
} else {
spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED,
NULL, 0, src);
}
}
spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
if (spa->spa_comment != NULL) {
spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment,
0, ZPROP_SRC_LOCAL);
}
if (spa->spa_compatibility != NULL) {
spa_prop_add_list(*nvp, ZPOOL_PROP_COMPATIBILITY,
spa->spa_compatibility, 0, ZPROP_SRC_LOCAL);
}
if (spa->spa_root != NULL)
spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root,
0, ZPROP_SRC_LOCAL);
if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) {
spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
MIN(zfs_max_recordsize, SPA_MAXBLOCKSIZE), ZPROP_SRC_NONE);
} else {
spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
SPA_OLD_MAXBLOCKSIZE, ZPROP_SRC_NONE);
}
if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE)) {
spa_prop_add_list(*nvp, ZPOOL_PROP_MAXDNODESIZE, NULL,
DNODE_MAX_SIZE, ZPROP_SRC_NONE);
} else {
spa_prop_add_list(*nvp, ZPOOL_PROP_MAXDNODESIZE, NULL,
DNODE_MIN_SIZE, ZPROP_SRC_NONE);
}
if ((dp = list_head(&spa->spa_config_list)) != NULL) {
if (dp->scd_path == NULL) {
spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
"none", 0, ZPROP_SRC_LOCAL);
} else if (strcmp(dp->scd_path, spa_config_path) != 0) {
spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
dp->scd_path, 0, ZPROP_SRC_LOCAL);
}
}
}
/*
* Get zpool property values.
*/
int
spa_prop_get(spa_t *spa, nvlist_t **nvp)
{
objset_t *mos = spa->spa_meta_objset;
zap_cursor_t zc;
zap_attribute_t za;
dsl_pool_t *dp;
int err;
err = nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP);
if (err)
return (err);
dp = spa_get_dsl(spa);
dsl_pool_config_enter(dp, FTAG);
mutex_enter(&spa->spa_props_lock);
/*
* Get properties from the spa config.
*/
spa_prop_get_config(spa, nvp);
/* If no pool property object, no more prop to get. */
if (mos == NULL || spa->spa_pool_props_object == 0)
goto out;
/*
* Get properties from the MOS pool property object.
*/
for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
(err = zap_cursor_retrieve(&zc, &za)) == 0;
zap_cursor_advance(&zc)) {
uint64_t intval = 0;
char *strval = NULL;
zprop_source_t src = ZPROP_SRC_DEFAULT;
zpool_prop_t prop;
if ((prop = zpool_name_to_prop(za.za_name)) == ZPOOL_PROP_INVAL)
continue;
switch (za.za_integer_length) {
case 8:
/* integer property */
if (za.za_first_integer !=
zpool_prop_default_numeric(prop))
src = ZPROP_SRC_LOCAL;
if (prop == ZPOOL_PROP_BOOTFS) {
dsl_dataset_t *ds = NULL;
err = dsl_dataset_hold_obj(dp,
za.za_first_integer, FTAG, &ds);
if (err != 0)
break;
strval = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN,
KM_SLEEP);
dsl_dataset_name(ds, strval);
dsl_dataset_rele(ds, FTAG);
} else {
strval = NULL;
intval = za.za_first_integer;
}
spa_prop_add_list(*nvp, prop, strval, intval, src);
if (strval != NULL)
kmem_free(strval, ZFS_MAX_DATASET_NAME_LEN);
break;
case 1:
/* string property */
strval = kmem_alloc(za.za_num_integers, KM_SLEEP);
err = zap_lookup(mos, spa->spa_pool_props_object,
za.za_name, 1, za.za_num_integers, strval);
if (err) {
kmem_free(strval, za.za_num_integers);
break;
}
spa_prop_add_list(*nvp, prop, strval, 0, src);
kmem_free(strval, za.za_num_integers);
break;
default:
break;
}
}
zap_cursor_fini(&zc);
out:
mutex_exit(&spa->spa_props_lock);
dsl_pool_config_exit(dp, FTAG);
if (err && err != ENOENT) {
nvlist_free(*nvp);
*nvp = NULL;
return (err);
}
return (0);
}
/*
* Validate the given pool properties nvlist and modify the list
* for the property values to be set.
*/
static int
spa_prop_validate(spa_t *spa, nvlist_t *props)
{
nvpair_t *elem;
int error = 0, reset_bootfs = 0;
uint64_t objnum = 0;
boolean_t has_feature = B_FALSE;
elem = NULL;
while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
uint64_t intval;
char *strval, *slash, *check, *fname;
const char *propname = nvpair_name(elem);
zpool_prop_t prop = zpool_name_to_prop(propname);
switch (prop) {
case ZPOOL_PROP_INVAL:
if (!zpool_prop_feature(propname)) {
error = SET_ERROR(EINVAL);
break;
}
/*
* Sanitize the input.
*/
if (nvpair_type(elem) != DATA_TYPE_UINT64) {
error = SET_ERROR(EINVAL);
break;
}
if (nvpair_value_uint64(elem, &intval) != 0) {
error = SET_ERROR(EINVAL);
break;
}
if (intval != 0) {
error = SET_ERROR(EINVAL);
break;
}
fname = strchr(propname, '@') + 1;
if (zfeature_lookup_name(fname, NULL) != 0) {
error = SET_ERROR(EINVAL);
break;
}
has_feature = B_TRUE;
break;
case ZPOOL_PROP_VERSION:
error = nvpair_value_uint64(elem, &intval);
if (!error &&
(intval < spa_version(spa) ||
intval > SPA_VERSION_BEFORE_FEATURES ||
has_feature))
error = SET_ERROR(EINVAL);
break;
case ZPOOL_PROP_DELEGATION:
case ZPOOL_PROP_AUTOREPLACE:
case ZPOOL_PROP_LISTSNAPS:
case ZPOOL_PROP_AUTOEXPAND:
case ZPOOL_PROP_AUTOTRIM:
error = nvpair_value_uint64(elem, &intval);
if (!error && intval > 1)
error = SET_ERROR(EINVAL);
break;
case ZPOOL_PROP_MULTIHOST:
error = nvpair_value_uint64(elem, &intval);
if (!error && intval > 1)
error = SET_ERROR(EINVAL);
if (!error) {
uint32_t hostid = zone_get_hostid(NULL);
if (hostid)
spa->spa_hostid = hostid;
else
error = SET_ERROR(ENOTSUP);
}
break;
case ZPOOL_PROP_BOOTFS:
/*
* If the pool version is less than SPA_VERSION_BOOTFS,
* or the pool is still being created (version == 0),
* the bootfs property cannot be set.
*/
if (spa_version(spa) < SPA_VERSION_BOOTFS) {
error = SET_ERROR(ENOTSUP);
break;
}
/*
* Make sure the vdev config is bootable
*/
if (!vdev_is_bootable(spa->spa_root_vdev)) {
error = SET_ERROR(ENOTSUP);
break;
}
reset_bootfs = 1;
error = nvpair_value_string(elem, &strval);
if (!error) {
objset_t *os;
if (strval == NULL || strval[0] == '\0') {
objnum = zpool_prop_default_numeric(
ZPOOL_PROP_BOOTFS);
break;
}
error = dmu_objset_hold(strval, FTAG, &os);
if (error != 0)
break;
/* Must be ZPL. */
if (dmu_objset_type(os) != DMU_OST_ZFS) {
error = SET_ERROR(ENOTSUP);
} else {
objnum = dmu_objset_id(os);
}
dmu_objset_rele(os, FTAG);
}
break;
case ZPOOL_PROP_FAILUREMODE:
error = nvpair_value_uint64(elem, &intval);
if (!error && intval > ZIO_FAILURE_MODE_PANIC)
error = SET_ERROR(EINVAL);
/*
* This is a special case which only occurs when
* the pool has completely failed. This allows
* the user to change the in-core failmode property
* without syncing it out to disk (I/Os might
* currently be blocked). We do this by returning
* EIO to the caller (spa_prop_set) to trick it
* into thinking we encountered a property validation
* error.
*/
if (!error && spa_suspended(spa)) {
spa->spa_failmode = intval;
error = SET_ERROR(EIO);
}
break;
case ZPOOL_PROP_CACHEFILE:
if ((error = nvpair_value_string(elem, &strval)) != 0)
break;
if (strval[0] == '\0')
break;
if (strcmp(strval, "none") == 0)
break;
if (strval[0] != '/') {
error = SET_ERROR(EINVAL);
break;
}
slash = strrchr(strval, '/');
ASSERT(slash != NULL);
if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
strcmp(slash, "/..") == 0)
error = SET_ERROR(EINVAL);
break;
case ZPOOL_PROP_COMMENT:
if ((error = nvpair_value_string(elem, &strval)) != 0)
break;
for (check = strval; *check != '\0'; check++) {
if (!isprint(*check)) {
error = SET_ERROR(EINVAL);
break;
}
}
if (strlen(strval) > ZPROP_MAX_COMMENT)
error = SET_ERROR(E2BIG);
break;
default:
break;
}
if (error)
break;
}
(void) nvlist_remove_all(props,
zpool_prop_to_name(ZPOOL_PROP_DEDUPDITTO));
if (!error && reset_bootfs) {
error = nvlist_remove(props,
zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING);
if (!error) {
error = nvlist_add_uint64(props,
zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum);
}
}
return (error);
}
void
spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync)
{
char *cachefile;
spa_config_dirent_t *dp;
if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE),
&cachefile) != 0)
return;
dp = kmem_alloc(sizeof (spa_config_dirent_t),
KM_SLEEP);
if (cachefile[0] == '\0')
dp->scd_path = spa_strdup(spa_config_path);
else if (strcmp(cachefile, "none") == 0)
dp->scd_path = NULL;
else
dp->scd_path = spa_strdup(cachefile);
list_insert_head(&spa->spa_config_list, dp);
if (need_sync)
spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
}
int
spa_prop_set(spa_t *spa, nvlist_t *nvp)
{
int error;
nvpair_t *elem = NULL;
boolean_t need_sync = B_FALSE;
if ((error = spa_prop_validate(spa, nvp)) != 0)
return (error);
while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) {
zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem));
if (prop == ZPOOL_PROP_CACHEFILE ||
prop == ZPOOL_PROP_ALTROOT ||
prop == ZPOOL_PROP_READONLY)
continue;
if (prop == ZPOOL_PROP_VERSION || prop == ZPOOL_PROP_INVAL) {
uint64_t ver;
if (prop == ZPOOL_PROP_VERSION) {
VERIFY(nvpair_value_uint64(elem, &ver) == 0);
} else {
ASSERT(zpool_prop_feature(nvpair_name(elem)));
ver = SPA_VERSION_FEATURES;
need_sync = B_TRUE;
}
/* Save time if the version is already set. */
if (ver == spa_version(spa))
continue;
/*
* In addition to the pool directory object, we might
* create the pool properties object, the features for
* read object, the features for write object, or the
* feature descriptions object.
*/
error = dsl_sync_task(spa->spa_name, NULL,
spa_sync_version, &ver,
6, ZFS_SPACE_CHECK_RESERVED);
if (error)
return (error);
continue;
}
need_sync = B_TRUE;
break;
}
if (need_sync) {
return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props,
nvp, 6, ZFS_SPACE_CHECK_RESERVED));
}
return (0);
}
/*
* If the bootfs property value is dsobj, clear it.
*/
void
spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
{
if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
VERIFY(zap_remove(spa->spa_meta_objset,
spa->spa_pool_props_object,
zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
spa->spa_bootfs = 0;
}
}
/*ARGSUSED*/
static int
spa_change_guid_check(void *arg, dmu_tx_t *tx)
{
uint64_t *newguid __maybe_unused = arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
vdev_t *rvd = spa->spa_root_vdev;
uint64_t vdev_state;
if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
int error = (spa_has_checkpoint(spa)) ?
ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
return (SET_ERROR(error));
}
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
vdev_state = rvd->vdev_state;
spa_config_exit(spa, SCL_STATE, FTAG);
if (vdev_state != VDEV_STATE_HEALTHY)
return (SET_ERROR(ENXIO));
ASSERT3U(spa_guid(spa), !=, *newguid);
return (0);
}
static void
spa_change_guid_sync(void *arg, dmu_tx_t *tx)
{
uint64_t *newguid = arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
uint64_t oldguid;
vdev_t *rvd = spa->spa_root_vdev;
oldguid = spa_guid(spa);
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
rvd->vdev_guid = *newguid;
rvd->vdev_guid_sum += (*newguid - oldguid);
vdev_config_dirty(rvd);
spa_config_exit(spa, SCL_STATE, FTAG);
spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu",
(u_longlong_t)oldguid, (u_longlong_t)*newguid);
}
/*
* Change the GUID for the pool. This is done so that we can later
* re-import a pool built from a clone of our own vdevs. We will modify
* the root vdev's guid, our own pool guid, and then mark all of our
* vdevs dirty. Note that we must make sure that all our vdevs are
* online when we do this, or else any vdevs that weren't present
* would be orphaned from our pool. We are also going to issue a
* sysevent to update any watchers.
*/
int
spa_change_guid(spa_t *spa)
{
int error;
uint64_t guid;
mutex_enter(&spa->spa_vdev_top_lock);
mutex_enter(&spa_namespace_lock);
guid = spa_generate_guid(NULL);
error = dsl_sync_task(spa->spa_name, spa_change_guid_check,
spa_change_guid_sync, &guid, 5, ZFS_SPACE_CHECK_RESERVED);
if (error == 0) {
spa_write_cachefile(spa, B_FALSE, B_TRUE);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_REGUID);
}
mutex_exit(&spa_namespace_lock);
mutex_exit(&spa->spa_vdev_top_lock);
return (error);
}
/*
* ==========================================================================
* SPA state manipulation (open/create/destroy/import/export)
* ==========================================================================
*/
static int
spa_error_entry_compare(const void *a, const void *b)
{
const spa_error_entry_t *sa = (const spa_error_entry_t *)a;
const spa_error_entry_t *sb = (const spa_error_entry_t *)b;
int ret;
ret = memcmp(&sa->se_bookmark, &sb->se_bookmark,
sizeof (zbookmark_phys_t));
return (TREE_ISIGN(ret));
}
/*
* Utility function which retrieves copies of the current logs and
* re-initializes them in the process.
*/
void
spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
{
ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t));
bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t));
avl_create(&spa->spa_errlist_scrub,
spa_error_entry_compare, sizeof (spa_error_entry_t),
offsetof(spa_error_entry_t, se_avl));
avl_create(&spa->spa_errlist_last,
spa_error_entry_compare, sizeof (spa_error_entry_t),
offsetof(spa_error_entry_t, se_avl));
}
static void
spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
{
const zio_taskq_info_t *ztip = &zio_taskqs[t][q];
enum zti_modes mode = ztip->zti_mode;
uint_t value = ztip->zti_value;
uint_t count = ztip->zti_count;
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
uint_t cpus, flags = TASKQ_DYNAMIC;
boolean_t batch = B_FALSE;
switch (mode) {
case ZTI_MODE_FIXED:
ASSERT3U(value, >, 0);
break;
case ZTI_MODE_BATCH:
batch = B_TRUE;
flags |= TASKQ_THREADS_CPU_PCT;
value = MIN(zio_taskq_batch_pct, 100);
break;
case ZTI_MODE_SCALE:
flags |= TASKQ_THREADS_CPU_PCT;
/*
* We want more taskqs to reduce lock contention, but we want
* less for better request ordering and CPU utilization.
*/
cpus = MAX(1, boot_ncpus * zio_taskq_batch_pct / 100);
if (zio_taskq_batch_tpq > 0) {
count = MAX(1, (cpus + zio_taskq_batch_tpq / 2) /
zio_taskq_batch_tpq);
} else {
/*
* Prefer 6 threads per taskq, but no more taskqs
* than threads in them on large systems. For 80%:
*
* taskq taskq total
* cpus taskqs percent threads threads
* ------- ------- ------- ------- -------
* 1 1 80% 1 1
* 2 1 80% 1 1
* 4 1 80% 3 3
* 8 2 40% 3 6
* 16 3 27% 4 12
* 32 5 16% 5 25
* 64 7 11% 7 49
* 128 10 8% 10 100
* 256 14 6% 15 210
*/
count = 1 + cpus / 6;
while (count * count > cpus)
count--;
}
/* Limit each taskq within 100% to not trigger assertion. */
count = MAX(count, (zio_taskq_batch_pct + 99) / 100);
value = (zio_taskq_batch_pct + count / 2) / count;
break;
case ZTI_MODE_NULL:
tqs->stqs_count = 0;
tqs->stqs_taskq = NULL;
return;
default:
panic("unrecognized mode for %s_%s taskq (%u:%u) in "
"spa_activate()",
zio_type_name[t], zio_taskq_types[q], mode, value);
break;
}
ASSERT3U(count, >, 0);
tqs->stqs_count = count;
tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP);
for (uint_t i = 0; i < count; i++) {
taskq_t *tq;
char name[32];
if (count > 1)
(void) snprintf(name, sizeof (name), "%s_%s_%u",
zio_type_name[t], zio_taskq_types[q], i);
else
(void) snprintf(name, sizeof (name), "%s_%s",
zio_type_name[t], zio_taskq_types[q]);
if (zio_taskq_sysdc && spa->spa_proc != &p0) {
if (batch)
flags |= TASKQ_DC_BATCH;
tq = taskq_create_sysdc(name, value, 50, INT_MAX,
spa->spa_proc, zio_taskq_basedc, flags);
} else {
pri_t pri = maxclsyspri;
/*
* The write issue taskq can be extremely CPU
* intensive. Run it at slightly less important
* priority than the other taskqs.
*
* Under Linux and FreeBSD this means incrementing
* the priority value as opposed to platforms like
* illumos where it should be decremented.
*
* On FreeBSD, if priorities divided by four (RQ_PPQ)
* are equal then a difference between them is
* insignificant.
*/
if (t == ZIO_TYPE_WRITE && q == ZIO_TASKQ_ISSUE) {
#if defined(__linux__)
pri++;
#elif defined(__FreeBSD__)
pri += 4;
#else
#error "unknown OS"
#endif
}
tq = taskq_create_proc(name, value, pri, 50,
INT_MAX, spa->spa_proc, flags);
}
tqs->stqs_taskq[i] = tq;
}
}
static void
spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
{
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
if (tqs->stqs_taskq == NULL) {
ASSERT3U(tqs->stqs_count, ==, 0);
return;
}
for (uint_t i = 0; i < tqs->stqs_count; i++) {
ASSERT3P(tqs->stqs_taskq[i], !=, NULL);
taskq_destroy(tqs->stqs_taskq[i]);
}
kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *));
tqs->stqs_taskq = NULL;
}
/*
* Dispatch a task to the appropriate taskq for the ZFS I/O type and priority.
* Note that a type may have multiple discrete taskqs to avoid lock contention
* on the taskq itself. In that case we choose which taskq at random by using
* the low bits of gethrtime().
*/
void
spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent)
{
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
taskq_t *tq;
ASSERT3P(tqs->stqs_taskq, !=, NULL);
ASSERT3U(tqs->stqs_count, !=, 0);
if (tqs->stqs_count == 1) {
tq = tqs->stqs_taskq[0];
} else {
tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count];
}
taskq_dispatch_ent(tq, func, arg, flags, ent);
}
/*
* Same as spa_taskq_dispatch_ent() but block on the task until completion.
*/
void
spa_taskq_dispatch_sync(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
task_func_t *func, void *arg, uint_t flags)
{
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
taskq_t *tq;
taskqid_t id;
ASSERT3P(tqs->stqs_taskq, !=, NULL);
ASSERT3U(tqs->stqs_count, !=, 0);
if (tqs->stqs_count == 1) {
tq = tqs->stqs_taskq[0];
} else {
tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count];
}
id = taskq_dispatch(tq, func, arg, flags);
if (id)
taskq_wait_id(tq, id);
}
static void
spa_create_zio_taskqs(spa_t *spa)
{
for (int t = 0; t < ZIO_TYPES; t++) {
for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
spa_taskqs_init(spa, t, q);
}
}
}
/*
* Disabled until spa_thread() can be adapted for Linux.
*/
#undef HAVE_SPA_THREAD
#if defined(_KERNEL) && defined(HAVE_SPA_THREAD)
static void
spa_thread(void *arg)
{
psetid_t zio_taskq_psrset_bind = PS_NONE;
callb_cpr_t cprinfo;
spa_t *spa = arg;
user_t *pu = PTOU(curproc);
CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr,
spa->spa_name);
ASSERT(curproc != &p0);
(void) snprintf(pu->u_psargs, sizeof (pu->u_psargs),
"zpool-%s", spa->spa_name);
(void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm));
/* bind this thread to the requested psrset */
if (zio_taskq_psrset_bind != PS_NONE) {
pool_lock();
mutex_enter(&cpu_lock);
mutex_enter(&pidlock);
mutex_enter(&curproc->p_lock);
if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind,
0, NULL, NULL) == 0) {
curthread->t_bind_pset = zio_taskq_psrset_bind;
} else {
cmn_err(CE_WARN,
"Couldn't bind process for zfs pool \"%s\" to "
"pset %d\n", spa->spa_name, zio_taskq_psrset_bind);
}
mutex_exit(&curproc->p_lock);
mutex_exit(&pidlock);
mutex_exit(&cpu_lock);
pool_unlock();
}
if (zio_taskq_sysdc) {
sysdc_thread_enter(curthread, 100, 0);
}
spa->spa_proc = curproc;
spa->spa_did = curthread->t_did;
spa_create_zio_taskqs(spa);
mutex_enter(&spa->spa_proc_lock);
ASSERT(spa->spa_proc_state == SPA_PROC_CREATED);
spa->spa_proc_state = SPA_PROC_ACTIVE;
cv_broadcast(&spa->spa_proc_cv);
CALLB_CPR_SAFE_BEGIN(&cprinfo);
while (spa->spa_proc_state == SPA_PROC_ACTIVE)
cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock);
ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE);
spa->spa_proc_state = SPA_PROC_GONE;
spa->spa_proc = &p0;
cv_broadcast(&spa->spa_proc_cv);
CALLB_CPR_EXIT(&cprinfo); /* drops spa_proc_lock */
mutex_enter(&curproc->p_lock);
lwp_exit();
}
#endif
/*
* Activate an uninitialized pool.
*/
static void
spa_activate(spa_t *spa, spa_mode_t mode)
{
ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
spa->spa_state = POOL_STATE_ACTIVE;
spa->spa_mode = mode;
spa->spa_normal_class = metaslab_class_create(spa, zfs_metaslab_ops);
spa->spa_log_class = metaslab_class_create(spa, zfs_metaslab_ops);
spa->spa_embedded_log_class =
metaslab_class_create(spa, zfs_metaslab_ops);
spa->spa_special_class = metaslab_class_create(spa, zfs_metaslab_ops);
spa->spa_dedup_class = metaslab_class_create(spa, zfs_metaslab_ops);
/* Try to create a covering process */
mutex_enter(&spa->spa_proc_lock);
ASSERT(spa->spa_proc_state == SPA_PROC_NONE);
ASSERT(spa->spa_proc == &p0);
spa->spa_did = 0;
#ifdef HAVE_SPA_THREAD
/* Only create a process if we're going to be around a while. */
if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) {
if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri,
NULL, 0) == 0) {
spa->spa_proc_state = SPA_PROC_CREATED;
while (spa->spa_proc_state == SPA_PROC_CREATED) {
cv_wait(&spa->spa_proc_cv,
&spa->spa_proc_lock);
}
ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
ASSERT(spa->spa_proc != &p0);
ASSERT(spa->spa_did != 0);
} else {
#ifdef _KERNEL
cmn_err(CE_WARN,
"Couldn't create process for zfs pool \"%s\"\n",
spa->spa_name);
#endif
}
}
#endif /* HAVE_SPA_THREAD */
mutex_exit(&spa->spa_proc_lock);
/* If we didn't create a process, we need to create our taskqs. */
if (spa->spa_proc == &p0) {
spa_create_zio_taskqs(spa);
}
for (size_t i = 0; i < TXG_SIZE; i++) {
spa->spa_txg_zio[i] = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL);
}
list_create(&spa->spa_config_dirty_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_config_dirty_node));
list_create(&spa->spa_evicting_os_list, sizeof (objset_t),
offsetof(objset_t, os_evicting_node));
list_create(&spa->spa_state_dirty_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_state_dirty_node));
txg_list_create(&spa->spa_vdev_txg_list, spa,
offsetof(struct vdev, vdev_txg_node));
avl_create(&spa->spa_errlist_scrub,
spa_error_entry_compare, sizeof (spa_error_entry_t),
offsetof(spa_error_entry_t, se_avl));
avl_create(&spa->spa_errlist_last,
spa_error_entry_compare, sizeof (spa_error_entry_t),
offsetof(spa_error_entry_t, se_avl));
spa_keystore_init(&spa->spa_keystore);
/*
* This taskq is used to perform zvol-minor-related tasks
* asynchronously. This has several advantages, including easy
* resolution of various deadlocks.
*
* The taskq must be single threaded to ensure tasks are always
* processed in the order in which they were dispatched.
*
* A taskq per pool allows one to keep the pools independent.
* This way if one pool is suspended, it will not impact another.
*
* The preferred location to dispatch a zvol minor task is a sync
* task. In this context, there is easy access to the spa_t and minimal
* error handling is required because the sync task must succeed.
*/
spa->spa_zvol_taskq = taskq_create("z_zvol", 1, defclsyspri,
1, INT_MAX, 0);
/*
* Taskq dedicated to prefetcher threads: this is used to prevent the
* pool traverse code from monopolizing the global (and limited)
* system_taskq by inappropriately scheduling long running tasks on it.
*/
spa->spa_prefetch_taskq = taskq_create("z_prefetch", 100,
defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT);
/*
* The taskq to upgrade datasets in this pool. Currently used by
* feature SPA_FEATURE_USEROBJ_ACCOUNTING/SPA_FEATURE_PROJECT_QUOTA.
*/
spa->spa_upgrade_taskq = taskq_create("z_upgrade", 100,
defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT);
}
/*
* Opposite of spa_activate().
*/
static void
spa_deactivate(spa_t *spa)
{
ASSERT(spa->spa_sync_on == B_FALSE);
ASSERT(spa->spa_dsl_pool == NULL);
ASSERT(spa->spa_root_vdev == NULL);
ASSERT(spa->spa_async_zio_root == NULL);
ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
spa_evicting_os_wait(spa);
if (spa->spa_zvol_taskq) {
taskq_destroy(spa->spa_zvol_taskq);
spa->spa_zvol_taskq = NULL;
}
if (spa->spa_prefetch_taskq) {
taskq_destroy(spa->spa_prefetch_taskq);
spa->spa_prefetch_taskq = NULL;
}
if (spa->spa_upgrade_taskq) {
taskq_destroy(spa->spa_upgrade_taskq);
spa->spa_upgrade_taskq = NULL;
}
txg_list_destroy(&spa->spa_vdev_txg_list);
list_destroy(&spa->spa_config_dirty_list);
list_destroy(&spa->spa_evicting_os_list);
list_destroy(&spa->spa_state_dirty_list);
taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
for (int t = 0; t < ZIO_TYPES; t++) {
for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
spa_taskqs_fini(spa, t, q);
}
}
for (size_t i = 0; i < TXG_SIZE; i++) {
ASSERT3P(spa->spa_txg_zio[i], !=, NULL);
VERIFY0(zio_wait(spa->spa_txg_zio[i]));
spa->spa_txg_zio[i] = NULL;
}
metaslab_class_destroy(spa->spa_normal_class);
spa->spa_normal_class = NULL;
metaslab_class_destroy(spa->spa_log_class);
spa->spa_log_class = NULL;
metaslab_class_destroy(spa->spa_embedded_log_class);
spa->spa_embedded_log_class = NULL;
metaslab_class_destroy(spa->spa_special_class);
spa->spa_special_class = NULL;
metaslab_class_destroy(spa->spa_dedup_class);
spa->spa_dedup_class = NULL;
/*
* If this was part of an import or the open otherwise failed, we may
* still have errors left in the queues. Empty them just in case.
*/
spa_errlog_drain(spa);
avl_destroy(&spa->spa_errlist_scrub);
avl_destroy(&spa->spa_errlist_last);
spa_keystore_fini(&spa->spa_keystore);
spa->spa_state = POOL_STATE_UNINITIALIZED;
mutex_enter(&spa->spa_proc_lock);
if (spa->spa_proc_state != SPA_PROC_NONE) {
ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
spa->spa_proc_state = SPA_PROC_DEACTIVATE;
cv_broadcast(&spa->spa_proc_cv);
while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) {
ASSERT(spa->spa_proc != &p0);
cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
}
ASSERT(spa->spa_proc_state == SPA_PROC_GONE);
spa->spa_proc_state = SPA_PROC_NONE;
}
ASSERT(spa->spa_proc == &p0);
mutex_exit(&spa->spa_proc_lock);
/*
* We want to make sure spa_thread() has actually exited the ZFS
* module, so that the module can't be unloaded out from underneath
* it.
*/
if (spa->spa_did != 0) {
thread_join(spa->spa_did);
spa->spa_did = 0;
}
}
/*
* Verify a pool configuration, and construct the vdev tree appropriately. This
* will create all the necessary vdevs in the appropriate layout, with each vdev
* in the CLOSED state. This will prep the pool before open/creation/import.
* All vdev validation is done by the vdev_alloc() routine.
*/
int
spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
uint_t id, int atype)
{
nvlist_t **child;
uint_t children;
int error;
if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
return (error);
if ((*vdp)->vdev_ops->vdev_op_leaf)
return (0);
error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children);
if (error == ENOENT)
return (0);
if (error) {
vdev_free(*vdp);
*vdp = NULL;
return (SET_ERROR(EINVAL));
}
for (int c = 0; c < children; c++) {
vdev_t *vd;
if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
atype)) != 0) {
vdev_free(*vdp);
*vdp = NULL;
return (error);
}
}
ASSERT(*vdp != NULL);
return (0);
}
static boolean_t
spa_should_flush_logs_on_unload(spa_t *spa)
{
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
return (B_FALSE);
if (!spa_writeable(spa))
return (B_FALSE);
if (!spa->spa_sync_on)
return (B_FALSE);
if (spa_state(spa) != POOL_STATE_EXPORTED)
return (B_FALSE);
if (zfs_keep_log_spacemaps_at_export)
return (B_FALSE);
return (B_TRUE);
}
/*
* Opens a transaction that will set the flag that will instruct
* spa_sync to attempt to flush all the metaslabs for that txg.
*/
static void
spa_unload_log_sm_flush_all(spa_t *spa)
{
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
ASSERT3U(spa->spa_log_flushall_txg, ==, 0);
spa->spa_log_flushall_txg = dmu_tx_get_txg(tx);
dmu_tx_commit(tx);
txg_wait_synced(spa_get_dsl(spa), spa->spa_log_flushall_txg);
}
static void
spa_unload_log_sm_metadata(spa_t *spa)
{
void *cookie = NULL;
spa_log_sm_t *sls;
while ((sls = avl_destroy_nodes(&spa->spa_sm_logs_by_txg,
&cookie)) != NULL) {
VERIFY0(sls->sls_mscount);
kmem_free(sls, sizeof (spa_log_sm_t));
}
for (log_summary_entry_t *e = list_head(&spa->spa_log_summary);
e != NULL; e = list_head(&spa->spa_log_summary)) {
VERIFY0(e->lse_mscount);
list_remove(&spa->spa_log_summary, e);
kmem_free(e, sizeof (log_summary_entry_t));
}
spa->spa_unflushed_stats.sus_nblocks = 0;
spa->spa_unflushed_stats.sus_memused = 0;
spa->spa_unflushed_stats.sus_blocklimit = 0;
}
static void
spa_destroy_aux_threads(spa_t *spa)
{
if (spa->spa_condense_zthr != NULL) {
zthr_destroy(spa->spa_condense_zthr);
spa->spa_condense_zthr = NULL;
}
if (spa->spa_checkpoint_discard_zthr != NULL) {
zthr_destroy(spa->spa_checkpoint_discard_zthr);
spa->spa_checkpoint_discard_zthr = NULL;
}
if (spa->spa_livelist_delete_zthr != NULL) {
zthr_destroy(spa->spa_livelist_delete_zthr);
spa->spa_livelist_delete_zthr = NULL;
}
if (spa->spa_livelist_condense_zthr != NULL) {
zthr_destroy(spa->spa_livelist_condense_zthr);
spa->spa_livelist_condense_zthr = NULL;
}
}
/*
* Opposite of spa_load().
*/
static void
spa_unload(spa_t *spa)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_state(spa) != POOL_STATE_UNINITIALIZED);
spa_import_progress_remove(spa_guid(spa));
spa_load_note(spa, "UNLOADING");
spa_wake_waiters(spa);
/*
* If the log space map feature is enabled and the pool is getting
* exported (but not destroyed), we want to spend some time flushing
* as many metaslabs as we can in an attempt to destroy log space
* maps and save import time.
*/
if (spa_should_flush_logs_on_unload(spa))
spa_unload_log_sm_flush_all(spa);
/*
* Stop async tasks.
*/
spa_async_suspend(spa);
if (spa->spa_root_vdev) {
vdev_t *root_vdev = spa->spa_root_vdev;
vdev_initialize_stop_all(root_vdev, VDEV_INITIALIZE_ACTIVE);
vdev_trim_stop_all(root_vdev, VDEV_TRIM_ACTIVE);
vdev_autotrim_stop_all(spa);
vdev_rebuild_stop_all(spa);
}
/*
* Stop syncing.
*/
if (spa->spa_sync_on) {
txg_sync_stop(spa->spa_dsl_pool);
spa->spa_sync_on = B_FALSE;
}
/*
* This ensures that there is no async metaslab prefetching
* while we attempt to unload the spa.
*/
if (spa->spa_root_vdev != NULL) {
for (int c = 0; c < spa->spa_root_vdev->vdev_children; c++) {
vdev_t *vc = spa->spa_root_vdev->vdev_child[c];
if (vc->vdev_mg != NULL)
taskq_wait(vc->vdev_mg->mg_taskq);
}
}
if (spa->spa_mmp.mmp_thread)
mmp_thread_stop(spa);
/*
* Wait for any outstanding async I/O to complete.
*/
if (spa->spa_async_zio_root != NULL) {
for (int i = 0; i < max_ncpus; i++)
(void) zio_wait(spa->spa_async_zio_root[i]);
kmem_free(spa->spa_async_zio_root, max_ncpus * sizeof (void *));
spa->spa_async_zio_root = NULL;
}
if (spa->spa_vdev_removal != NULL) {
spa_vdev_removal_destroy(spa->spa_vdev_removal);
spa->spa_vdev_removal = NULL;
}
spa_destroy_aux_threads(spa);
spa_condense_fini(spa);
bpobj_close(&spa->spa_deferred_bpobj);
spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
/*
* Close all vdevs.
*/
if (spa->spa_root_vdev)
vdev_free(spa->spa_root_vdev);
ASSERT(spa->spa_root_vdev == NULL);
/*
* Close the dsl pool.
*/
if (spa->spa_dsl_pool) {
dsl_pool_close(spa->spa_dsl_pool);
spa->spa_dsl_pool = NULL;
spa->spa_meta_objset = NULL;
}
ddt_unload(spa);
spa_unload_log_sm_metadata(spa);
/*
* Drop and purge level 2 cache
*/
spa_l2cache_drop(spa);
for (int i = 0; i < spa->spa_spares.sav_count; i++)
vdev_free(spa->spa_spares.sav_vdevs[i]);
if (spa->spa_spares.sav_vdevs) {
kmem_free(spa->spa_spares.sav_vdevs,
spa->spa_spares.sav_count * sizeof (void *));
spa->spa_spares.sav_vdevs = NULL;
}
if (spa->spa_spares.sav_config) {
nvlist_free(spa->spa_spares.sav_config);
spa->spa_spares.sav_config = NULL;
}
spa->spa_spares.sav_count = 0;
for (int i = 0; i < spa->spa_l2cache.sav_count; i++) {
vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]);
vdev_free(spa->spa_l2cache.sav_vdevs[i]);
}
if (spa->spa_l2cache.sav_vdevs) {
kmem_free(spa->spa_l2cache.sav_vdevs,
spa->spa_l2cache.sav_count * sizeof (void *));
spa->spa_l2cache.sav_vdevs = NULL;
}
if (spa->spa_l2cache.sav_config) {
nvlist_free(spa->spa_l2cache.sav_config);
spa->spa_l2cache.sav_config = NULL;
}
spa->spa_l2cache.sav_count = 0;
spa->spa_async_suspended = 0;
spa->spa_indirect_vdevs_loaded = B_FALSE;
if (spa->spa_comment != NULL) {
spa_strfree(spa->spa_comment);
spa->spa_comment = NULL;
}
if (spa->spa_compatibility != NULL) {
spa_strfree(spa->spa_compatibility);
spa->spa_compatibility = NULL;
}
spa_config_exit(spa, SCL_ALL, spa);
}
/*
* Load (or re-load) the current list of vdevs describing the active spares for
* this pool. When this is called, we have some form of basic information in
* 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and
* then re-generate a more complete list including status information.
*/
void
spa_load_spares(spa_t *spa)
{
nvlist_t **spares;
uint_t nspares;
int i;
vdev_t *vd, *tvd;
#ifndef _KERNEL
/*
* zdb opens both the current state of the pool and the
* checkpointed state (if present), with a different spa_t.
*
* As spare vdevs are shared among open pools, we skip loading
* them when we load the checkpointed state of the pool.
*/
if (!spa_writeable(spa))
return;
#endif
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
/*
* First, close and free any existing spare vdevs.
*/
for (i = 0; i < spa->spa_spares.sav_count; i++) {
vd = spa->spa_spares.sav_vdevs[i];
/* Undo the call to spa_activate() below */
if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
B_FALSE)) != NULL && tvd->vdev_isspare)
spa_spare_remove(tvd);
vdev_close(vd);
vdev_free(vd);
}
if (spa->spa_spares.sav_vdevs)
kmem_free(spa->spa_spares.sav_vdevs,
spa->spa_spares.sav_count * sizeof (void *));
if (spa->spa_spares.sav_config == NULL)
nspares = 0;
else
VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
spa->spa_spares.sav_count = (int)nspares;
spa->spa_spares.sav_vdevs = NULL;
if (nspares == 0)
return;
/*
* Construct the array of vdevs, opening them to get status in the
* process. For each spare, there is potentially two different vdev_t
* structures associated with it: one in the list of spares (used only
* for basic validation purposes) and one in the active vdev
* configuration (if it's spared in). During this phase we open and
* validate each vdev on the spare list. If the vdev also exists in the
* active configuration, then we also mark this vdev as an active spare.
*/
spa->spa_spares.sav_vdevs = kmem_zalloc(nspares * sizeof (void *),
KM_SLEEP);
for (i = 0; i < spa->spa_spares.sav_count; i++) {
VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
VDEV_ALLOC_SPARE) == 0);
ASSERT(vd != NULL);
spa->spa_spares.sav_vdevs[i] = vd;
if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
B_FALSE)) != NULL) {
if (!tvd->vdev_isspare)
spa_spare_add(tvd);
/*
* We only mark the spare active if we were successfully
* able to load the vdev. Otherwise, importing a pool
* with a bad active spare would result in strange
* behavior, because multiple pool would think the spare
* is actively in use.
*
* There is a vulnerability here to an equally bizarre
* circumstance, where a dead active spare is later
* brought back to life (onlined or otherwise). Given
* the rarity of this scenario, and the extra complexity
* it adds, we ignore the possibility.
*/
if (!vdev_is_dead(tvd))
spa_spare_activate(tvd);
}
vd->vdev_top = vd;
vd->vdev_aux = &spa->spa_spares;
if (vdev_open(vd) != 0)
continue;
if (vdev_validate_aux(vd) == 0)
spa_spare_add(vd);
}
/*
* Recompute the stashed list of spares, with status information
* this time.
*/
VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES,
DATA_TYPE_NVLIST_ARRAY) == 0);
spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
KM_SLEEP);
for (i = 0; i < spa->spa_spares.sav_count; i++)
spares[i] = vdev_config_generate(spa,
spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE);
VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0);
for (i = 0; i < spa->spa_spares.sav_count; i++)
nvlist_free(spares[i]);
kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *));
}
/*
* Load (or re-load) the current list of vdevs describing the active l2cache for
* this pool. When this is called, we have some form of basic information in
* 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and
* then re-generate a more complete list including status information.
* Devices which are already active have their details maintained, and are
* not re-opened.
*/
void
spa_load_l2cache(spa_t *spa)
{
nvlist_t **l2cache = NULL;
uint_t nl2cache;
int i, j, oldnvdevs;
uint64_t guid;
vdev_t *vd, **oldvdevs, **newvdevs;
spa_aux_vdev_t *sav = &spa->spa_l2cache;
#ifndef _KERNEL
/*
* zdb opens both the current state of the pool and the
* checkpointed state (if present), with a different spa_t.
*
* As L2 caches are part of the ARC which is shared among open
* pools, we skip loading them when we load the checkpointed
* state of the pool.
*/
if (!spa_writeable(spa))
return;
#endif
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
oldvdevs = sav->sav_vdevs;
oldnvdevs = sav->sav_count;
sav->sav_vdevs = NULL;
sav->sav_count = 0;
if (sav->sav_config == NULL) {
nl2cache = 0;
newvdevs = NULL;
goto out;
}
VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP);
/*
* Process new nvlist of vdevs.
*/
for (i = 0; i < nl2cache; i++) {
VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID,
&guid) == 0);
newvdevs[i] = NULL;
for (j = 0; j < oldnvdevs; j++) {
vd = oldvdevs[j];
if (vd != NULL && guid == vd->vdev_guid) {
/*
* Retain previous vdev for add/remove ops.
*/
newvdevs[i] = vd;
oldvdevs[j] = NULL;
break;
}
}
if (newvdevs[i] == NULL) {
/*
* Create new vdev
*/
VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
VDEV_ALLOC_L2CACHE) == 0);
ASSERT(vd != NULL);
newvdevs[i] = vd;
/*
* Commit this vdev as an l2cache device,
* even if it fails to open.
*/
spa_l2cache_add(vd);
vd->vdev_top = vd;
vd->vdev_aux = sav;
spa_l2cache_activate(vd);
if (vdev_open(vd) != 0)
continue;
(void) vdev_validate_aux(vd);
if (!vdev_is_dead(vd))
l2arc_add_vdev(spa, vd);
/*
* Upon cache device addition to a pool or pool
* creation with a cache device or if the header
* of the device is invalid we issue an async
* TRIM command for the whole device which will
* execute if l2arc_trim_ahead > 0.
*/
spa_async_request(spa, SPA_ASYNC_L2CACHE_TRIM);
}
}
sav->sav_vdevs = newvdevs;
sav->sav_count = (int)nl2cache;
/*
* Recompute the stashed list of l2cache devices, with status
* information this time.
*/
VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
DATA_TYPE_NVLIST_ARRAY) == 0);
if (sav->sav_count > 0)
l2cache = kmem_alloc(sav->sav_count * sizeof (void *),
KM_SLEEP);
for (i = 0; i < sav->sav_count; i++)
l2cache[i] = vdev_config_generate(spa,
sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE);
VERIFY(nvlist_add_nvlist_array(sav->sav_config,
ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0);
out:
/*
* Purge vdevs that were dropped
*/
for (i = 0; i < oldnvdevs; i++) {
uint64_t pool;
vd = oldvdevs[i];
if (vd != NULL) {
ASSERT(vd->vdev_isl2cache);
if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
pool != 0ULL && l2arc_vdev_present(vd))
l2arc_remove_vdev(vd);
vdev_clear_stats(vd);
vdev_free(vd);
}
}
if (oldvdevs)
kmem_free(oldvdevs, oldnvdevs * sizeof (void *));
for (i = 0; i < sav->sav_count; i++)
nvlist_free(l2cache[i]);
if (sav->sav_count)
kmem_free(l2cache, sav->sav_count * sizeof (void *));
}
static int
load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
{
dmu_buf_t *db;
char *packed = NULL;
size_t nvsize = 0;
int error;
*value = NULL;
error = dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db);
if (error)
return (error);
nvsize = *(uint64_t *)db->db_data;
dmu_buf_rele(db, FTAG);
packed = vmem_alloc(nvsize, KM_SLEEP);
error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed,
DMU_READ_PREFETCH);
if (error == 0)
error = nvlist_unpack(packed, nvsize, value, 0);
vmem_free(packed, nvsize);
return (error);
}
/*
* Concrete top-level vdevs that are not missing and are not logs. At every
* spa_sync we write new uberblocks to at least SPA_SYNC_MIN_VDEVS core tvds.
*/
static uint64_t
spa_healthy_core_tvds(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
uint64_t tvds = 0;
for (uint64_t i = 0; i < rvd->vdev_children; i++) {
vdev_t *vd = rvd->vdev_child[i];
if (vd->vdev_islog)
continue;
if (vdev_is_concrete(vd) && !vdev_is_dead(vd))
tvds++;
}
return (tvds);
}
/*
* Checks to see if the given vdev could not be opened, in which case we post a
* sysevent to notify the autoreplace code that the device has been removed.
*/
static void
spa_check_removed(vdev_t *vd)
{
for (uint64_t c = 0; c < vd->vdev_children; c++)
spa_check_removed(vd->vdev_child[c]);
if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) &&
vdev_is_concrete(vd)) {
zfs_post_autoreplace(vd->vdev_spa, vd);
spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_CHECK);
}
}
static int
spa_check_for_missing_logs(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
/*
* If we're doing a normal import, then build up any additional
* diagnostic information about missing log devices.
* We'll pass this up to the user for further processing.
*/
if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) {
nvlist_t **child, *nv;
uint64_t idx = 0;
child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t *),
KM_SLEEP);
VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
/*
* We consider a device as missing only if it failed
* to open (i.e. offline or faulted is not considered
* as missing).
*/
if (tvd->vdev_islog &&
tvd->vdev_state == VDEV_STATE_CANT_OPEN) {
child[idx++] = vdev_config_generate(spa, tvd,
B_FALSE, VDEV_CONFIG_MISSING);
}
}
if (idx > 0) {
fnvlist_add_nvlist_array(nv,
ZPOOL_CONFIG_CHILDREN, child, idx);
fnvlist_add_nvlist(spa->spa_load_info,
ZPOOL_CONFIG_MISSING_DEVICES, nv);
for (uint64_t i = 0; i < idx; i++)
nvlist_free(child[i]);
}
nvlist_free(nv);
kmem_free(child, rvd->vdev_children * sizeof (char **));
if (idx > 0) {
spa_load_failed(spa, "some log devices are missing");
vdev_dbgmsg_print_tree(rvd, 2);
return (SET_ERROR(ENXIO));
}
} else {
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
if (tvd->vdev_islog &&
tvd->vdev_state == VDEV_STATE_CANT_OPEN) {
spa_set_log_state(spa, SPA_LOG_CLEAR);
spa_load_note(spa, "some log devices are "
"missing, ZIL is dropped.");
vdev_dbgmsg_print_tree(rvd, 2);
break;
}
}
}
return (0);
}
/*
* Check for missing log devices
*/
static boolean_t
spa_check_logs(spa_t *spa)
{
boolean_t rv = B_FALSE;
dsl_pool_t *dp = spa_get_dsl(spa);
switch (spa->spa_log_state) {
default:
break;
case SPA_LOG_MISSING:
/* need to recheck in case slog has been restored */
case SPA_LOG_UNKNOWN:
rv = (dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
zil_check_log_chain, NULL, DS_FIND_CHILDREN) != 0);
if (rv)
spa_set_log_state(spa, SPA_LOG_MISSING);
break;
}
return (rv);
}
/*
* Passivate any log vdevs (note, does not apply to embedded log metaslabs).
*/
static boolean_t
spa_passivate_log(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
boolean_t slog_found = B_FALSE;
ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
if (tvd->vdev_islog) {
ASSERT3P(tvd->vdev_log_mg, ==, NULL);
metaslab_group_passivate(tvd->vdev_mg);
slog_found = B_TRUE;
}
}
return (slog_found);
}
/*
* Activate any log vdevs (note, does not apply to embedded log metaslabs).
*/
static void
spa_activate_log(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
if (tvd->vdev_islog) {
ASSERT3P(tvd->vdev_log_mg, ==, NULL);
metaslab_group_activate(tvd->vdev_mg);
}
}
}
int
spa_reset_logs(spa_t *spa)
{
int error;
error = dmu_objset_find(spa_name(spa), zil_reset,
NULL, DS_FIND_CHILDREN);
if (error == 0) {
/*
* We successfully offlined the log device, sync out the
* current txg so that the "stubby" block can be removed
* by zil_sync().
*/
txg_wait_synced(spa->spa_dsl_pool, 0);
}
return (error);
}
static void
spa_aux_check_removed(spa_aux_vdev_t *sav)
{
for (int i = 0; i < sav->sav_count; i++)
spa_check_removed(sav->sav_vdevs[i]);
}
void
spa_claim_notify(zio_t *zio)
{
spa_t *spa = zio->io_spa;
if (zio->io_error)
return;
mutex_enter(&spa->spa_props_lock); /* any mutex will do */
if (spa->spa_claim_max_txg < zio->io_bp->blk_birth)
spa->spa_claim_max_txg = zio->io_bp->blk_birth;
mutex_exit(&spa->spa_props_lock);
}
typedef struct spa_load_error {
uint64_t sle_meta_count;
uint64_t sle_data_count;
} spa_load_error_t;
static void
spa_load_verify_done(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
spa_load_error_t *sle = zio->io_private;
dmu_object_type_t type = BP_GET_TYPE(bp);
int error = zio->io_error;
spa_t *spa = zio->io_spa;
abd_free(zio->io_abd);
if (error) {
if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) &&
type != DMU_OT_INTENT_LOG)
atomic_inc_64(&sle->sle_meta_count);
else
atomic_inc_64(&sle->sle_data_count);
}
mutex_enter(&spa->spa_scrub_lock);
spa->spa_load_verify_bytes -= BP_GET_PSIZE(bp);
cv_broadcast(&spa->spa_scrub_io_cv);
mutex_exit(&spa->spa_scrub_lock);
}
/*
* Maximum number of inflight bytes is the log2 fraction of the arc size.
* By default, we set it to 1/16th of the arc.
*/
int spa_load_verify_shift = 4;
int spa_load_verify_metadata = B_TRUE;
int spa_load_verify_data = B_TRUE;
/*ARGSUSED*/
static int
spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
{
if (zb->zb_level == ZB_DNODE_LEVEL || BP_IS_HOLE(bp) ||
BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp))
return (0);
/*
* Note: normally this routine will not be called if
* spa_load_verify_metadata is not set. However, it may be useful
* to manually set the flag after the traversal has begun.
*/
if (!spa_load_verify_metadata)
return (0);
if (!BP_IS_METADATA(bp) && !spa_load_verify_data)
return (0);
uint64_t maxinflight_bytes =
arc_target_bytes() >> spa_load_verify_shift;
zio_t *rio = arg;
size_t size = BP_GET_PSIZE(bp);
mutex_enter(&spa->spa_scrub_lock);
while (spa->spa_load_verify_bytes >= maxinflight_bytes)
cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
spa->spa_load_verify_bytes += size;
mutex_exit(&spa->spa_scrub_lock);
zio_nowait(zio_read(rio, spa, bp, abd_alloc_for_io(size, B_FALSE), size,
spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB,
ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL |
ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb));
return (0);
}
/* ARGSUSED */
static int
verify_dataset_name_len(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
{
if (dsl_dataset_namelen(ds) >= ZFS_MAX_DATASET_NAME_LEN)
return (SET_ERROR(ENAMETOOLONG));
return (0);
}
static int
spa_load_verify(spa_t *spa)
{
zio_t *rio;
spa_load_error_t sle = { 0 };
zpool_load_policy_t policy;
boolean_t verify_ok = B_FALSE;
int error = 0;
zpool_get_load_policy(spa->spa_config, &policy);
if (policy.zlp_rewind & ZPOOL_NEVER_REWIND)
return (0);
dsl_pool_config_enter(spa->spa_dsl_pool, FTAG);
error = dmu_objset_find_dp(spa->spa_dsl_pool,
spa->spa_dsl_pool->dp_root_dir_obj, verify_dataset_name_len, NULL,
DS_FIND_CHILDREN);
dsl_pool_config_exit(spa->spa_dsl_pool, FTAG);
if (error != 0)
return (error);
rio = zio_root(spa, NULL, &sle,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
if (spa_load_verify_metadata) {
if (spa->spa_extreme_rewind) {
spa_load_note(spa, "performing a complete scan of the "
"pool since extreme rewind is on. This may take "
"a very long time.\n (spa_load_verify_data=%u, "
"spa_load_verify_metadata=%u)",
spa_load_verify_data, spa_load_verify_metadata);
}
error = traverse_pool(spa, spa->spa_verify_min_txg,
TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA |
TRAVERSE_NO_DECRYPT, spa_load_verify_cb, rio);
}
(void) zio_wait(rio);
ASSERT0(spa->spa_load_verify_bytes);
spa->spa_load_meta_errors = sle.sle_meta_count;
spa->spa_load_data_errors = sle.sle_data_count;
if (sle.sle_meta_count != 0 || sle.sle_data_count != 0) {
spa_load_note(spa, "spa_load_verify found %llu metadata errors "
"and %llu data errors", (u_longlong_t)sle.sle_meta_count,
(u_longlong_t)sle.sle_data_count);
}
if (spa_load_verify_dryrun ||
(!error && sle.sle_meta_count <= policy.zlp_maxmeta &&
sle.sle_data_count <= policy.zlp_maxdata)) {
int64_t loss = 0;
verify_ok = B_TRUE;
spa->spa_load_txg = spa->spa_uberblock.ub_txg;
spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp;
loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts;
VERIFY(nvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_LOAD_TIME, spa->spa_load_txg_ts) == 0);
VERIFY(nvlist_add_int64(spa->spa_load_info,
ZPOOL_CONFIG_REWIND_TIME, loss) == 0);
VERIFY(nvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count) == 0);
} else {
spa->spa_load_max_txg = spa->spa_uberblock.ub_txg;
}
if (spa_load_verify_dryrun)
return (0);
if (error) {
if (error != ENXIO && error != EIO)
error = SET_ERROR(EIO);
return (error);
}
return (verify_ok ? 0 : EIO);
}
/*
* Find a value in the pool props object.
*/
static void
spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val)
{
(void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object,
zpool_prop_to_name(prop), sizeof (uint64_t), 1, val);
}
/*
* Find a value in the pool directory object.
*/
static int
spa_dir_prop(spa_t *spa, const char *name, uint64_t *val, boolean_t log_enoent)
{
int error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
name, sizeof (uint64_t), 1, val);
if (error != 0 && (error != ENOENT || log_enoent)) {
spa_load_failed(spa, "couldn't get '%s' value in MOS directory "
"[error=%d]", name, error);
}
return (error);
}
static int
spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err)
{
vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux);
return (SET_ERROR(err));
}
boolean_t
spa_livelist_delete_check(spa_t *spa)
{
return (spa->spa_livelists_to_delete != 0);
}
/* ARGSUSED */
static boolean_t
spa_livelist_delete_cb_check(void *arg, zthr_t *z)
{
spa_t *spa = arg;
return (spa_livelist_delete_check(spa));
}
static int
delete_blkptr_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
spa_t *spa = arg;
zio_free(spa, tx->tx_txg, bp);
dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD,
-bp_get_dsize_sync(spa, bp),
-BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx);
return (0);
}
static int
dsl_get_next_livelist_obj(objset_t *os, uint64_t zap_obj, uint64_t *llp)
{
int err;
zap_cursor_t zc;
zap_attribute_t za;
zap_cursor_init(&zc, os, zap_obj);
err = zap_cursor_retrieve(&zc, &za);
zap_cursor_fini(&zc);
if (err == 0)
*llp = za.za_first_integer;
return (err);
}
/*
* Components of livelist deletion that must be performed in syncing
* context: freeing block pointers and updating the pool-wide data
* structures to indicate how much work is left to do
*/
typedef struct sublist_delete_arg {
spa_t *spa;
dsl_deadlist_t *ll;
uint64_t key;
bplist_t *to_free;
} sublist_delete_arg_t;
static void
sublist_delete_sync(void *arg, dmu_tx_t *tx)
{
sublist_delete_arg_t *sda = arg;
spa_t *spa = sda->spa;
dsl_deadlist_t *ll = sda->ll;
uint64_t key = sda->key;
bplist_t *to_free = sda->to_free;
bplist_iterate(to_free, delete_blkptr_cb, spa, tx);
dsl_deadlist_remove_entry(ll, key, tx);
}
typedef struct livelist_delete_arg {
spa_t *spa;
uint64_t ll_obj;
uint64_t zap_obj;
} livelist_delete_arg_t;
static void
livelist_delete_sync(void *arg, dmu_tx_t *tx)
{
livelist_delete_arg_t *lda = arg;
spa_t *spa = lda->spa;
uint64_t ll_obj = lda->ll_obj;
uint64_t zap_obj = lda->zap_obj;
objset_t *mos = spa->spa_meta_objset;
uint64_t count;
/* free the livelist and decrement the feature count */
VERIFY0(zap_remove_int(mos, zap_obj, ll_obj, tx));
dsl_deadlist_free(mos, ll_obj, tx);
spa_feature_decr(spa, SPA_FEATURE_LIVELIST, tx);
VERIFY0(zap_count(mos, zap_obj, &count));
if (count == 0) {
/* no more livelists to delete */
VERIFY0(zap_remove(mos, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_DELETED_CLONES, tx));
VERIFY0(zap_destroy(mos, zap_obj, tx));
spa->spa_livelists_to_delete = 0;
spa_notify_waiters(spa);
}
}
/*
* Load in the value for the livelist to be removed and open it. Then,
* load its first sublist and determine which block pointers should actually
* be freed. Then, call a synctask which performs the actual frees and updates
* the pool-wide livelist data.
*/
/* ARGSUSED */
static void
spa_livelist_delete_cb(void *arg, zthr_t *z)
{
spa_t *spa = arg;
uint64_t ll_obj = 0, count;
objset_t *mos = spa->spa_meta_objset;
uint64_t zap_obj = spa->spa_livelists_to_delete;
/*
* Determine the next livelist to delete. This function should only
* be called if there is at least one deleted clone.
*/
VERIFY0(dsl_get_next_livelist_obj(mos, zap_obj, &ll_obj));
VERIFY0(zap_count(mos, ll_obj, &count));
if (count > 0) {
dsl_deadlist_t *ll;
dsl_deadlist_entry_t *dle;
bplist_t to_free;
ll = kmem_zalloc(sizeof (dsl_deadlist_t), KM_SLEEP);
dsl_deadlist_open(ll, mos, ll_obj);
dle = dsl_deadlist_first(ll);
ASSERT3P(dle, !=, NULL);
bplist_create(&to_free);
int err = dsl_process_sub_livelist(&dle->dle_bpobj, &to_free,
z, NULL);
if (err == 0) {
sublist_delete_arg_t sync_arg = {
.spa = spa,
.ll = ll,
.key = dle->dle_mintxg,
.to_free = &to_free
};
zfs_dbgmsg("deleting sublist (id %llu) from"
" livelist %llu, %lld remaining",
(u_longlong_t)dle->dle_bpobj.bpo_object,
(u_longlong_t)ll_obj, (longlong_t)count - 1);
VERIFY0(dsl_sync_task(spa_name(spa), NULL,
sublist_delete_sync, &sync_arg, 0,
ZFS_SPACE_CHECK_DESTROY));
} else {
VERIFY3U(err, ==, EINTR);
}
bplist_clear(&to_free);
bplist_destroy(&to_free);
dsl_deadlist_close(ll);
kmem_free(ll, sizeof (dsl_deadlist_t));
} else {
livelist_delete_arg_t sync_arg = {
.spa = spa,
.ll_obj = ll_obj,
.zap_obj = zap_obj
};
zfs_dbgmsg("deletion of livelist %llu completed",
(u_longlong_t)ll_obj);
VERIFY0(dsl_sync_task(spa_name(spa), NULL, livelist_delete_sync,
&sync_arg, 0, ZFS_SPACE_CHECK_DESTROY));
}
}
static void
spa_start_livelist_destroy_thread(spa_t *spa)
{
ASSERT3P(spa->spa_livelist_delete_zthr, ==, NULL);
spa->spa_livelist_delete_zthr =
zthr_create("z_livelist_destroy",
spa_livelist_delete_cb_check, spa_livelist_delete_cb, spa);
}
typedef struct livelist_new_arg {
bplist_t *allocs;
bplist_t *frees;
} livelist_new_arg_t;
static int
livelist_track_new_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
ASSERT(tx == NULL);
livelist_new_arg_t *lna = arg;
if (bp_freed) {
bplist_append(lna->frees, bp);
} else {
bplist_append(lna->allocs, bp);
zfs_livelist_condense_new_alloc++;
}
return (0);
}
typedef struct livelist_condense_arg {
spa_t *spa;
bplist_t to_keep;
uint64_t first_size;
uint64_t next_size;
} livelist_condense_arg_t;
static void
spa_livelist_condense_sync(void *arg, dmu_tx_t *tx)
{
livelist_condense_arg_t *lca = arg;
spa_t *spa = lca->spa;
bplist_t new_frees;
dsl_dataset_t *ds = spa->spa_to_condense.ds;
/* Have we been cancelled? */
if (spa->spa_to_condense.cancelled) {
zfs_livelist_condense_sync_cancel++;
goto out;
}
dsl_deadlist_entry_t *first = spa->spa_to_condense.first;
dsl_deadlist_entry_t *next = spa->spa_to_condense.next;
dsl_deadlist_t *ll = &ds->ds_dir->dd_livelist;
/*
* It's possible that the livelist was changed while the zthr was
* running. Therefore, we need to check for new blkptrs in the two
* entries being condensed and continue to track them in the livelist.
* Because of the way we handle remapped blkptrs (see dbuf_remap_impl),
* it's possible that the newly added blkptrs are FREEs or ALLOCs so
* we need to sort them into two different bplists.
*/
uint64_t first_obj = first->dle_bpobj.bpo_object;
uint64_t next_obj = next->dle_bpobj.bpo_object;
uint64_t cur_first_size = first->dle_bpobj.bpo_phys->bpo_num_blkptrs;
uint64_t cur_next_size = next->dle_bpobj.bpo_phys->bpo_num_blkptrs;
bplist_create(&new_frees);
livelist_new_arg_t new_bps = {
.allocs = &lca->to_keep,
.frees = &new_frees,
};
if (cur_first_size > lca->first_size) {
VERIFY0(livelist_bpobj_iterate_from_nofree(&first->dle_bpobj,
livelist_track_new_cb, &new_bps, lca->first_size));
}
if (cur_next_size > lca->next_size) {
VERIFY0(livelist_bpobj_iterate_from_nofree(&next->dle_bpobj,
livelist_track_new_cb, &new_bps, lca->next_size));
}
dsl_deadlist_clear_entry(first, ll, tx);
ASSERT(bpobj_is_empty(&first->dle_bpobj));
dsl_deadlist_remove_entry(ll, next->dle_mintxg, tx);
bplist_iterate(&lca->to_keep, dsl_deadlist_insert_alloc_cb, ll, tx);
bplist_iterate(&new_frees, dsl_deadlist_insert_free_cb, ll, tx);
bplist_destroy(&new_frees);
char dsname[ZFS_MAX_DATASET_NAME_LEN];
dsl_dataset_name(ds, dsname);
zfs_dbgmsg("txg %llu condensing livelist of %s (id %llu), bpobj %llu "
"(%llu blkptrs) and bpobj %llu (%llu blkptrs) -> bpobj %llu "
"(%llu blkptrs)", (u_longlong_t)tx->tx_txg, dsname,
(u_longlong_t)ds->ds_object, (u_longlong_t)first_obj,
(u_longlong_t)cur_first_size, (u_longlong_t)next_obj,
(u_longlong_t)cur_next_size,
(u_longlong_t)first->dle_bpobj.bpo_object,
(u_longlong_t)first->dle_bpobj.bpo_phys->bpo_num_blkptrs);
out:
dmu_buf_rele(ds->ds_dbuf, spa);
spa->spa_to_condense.ds = NULL;
bplist_clear(&lca->to_keep);
bplist_destroy(&lca->to_keep);
kmem_free(lca, sizeof (livelist_condense_arg_t));
spa->spa_to_condense.syncing = B_FALSE;
}
static void
spa_livelist_condense_cb(void *arg, zthr_t *t)
{
while (zfs_livelist_condense_zthr_pause &&
!(zthr_has_waiters(t) || zthr_iscancelled(t)))
delay(1);
spa_t *spa = arg;
dsl_deadlist_entry_t *first = spa->spa_to_condense.first;
dsl_deadlist_entry_t *next = spa->spa_to_condense.next;
uint64_t first_size, next_size;
livelist_condense_arg_t *lca =
kmem_alloc(sizeof (livelist_condense_arg_t), KM_SLEEP);
bplist_create(&lca->to_keep);
/*
* Process the livelists (matching FREEs and ALLOCs) in open context
* so we have minimal work in syncing context to condense.
*
* We save bpobj sizes (first_size and next_size) to use later in
* syncing context to determine if entries were added to these sublists
* while in open context. This is possible because the clone is still
* active and open for normal writes and we want to make sure the new,
* unprocessed blockpointers are inserted into the livelist normally.
*
* Note that dsl_process_sub_livelist() both stores the size number of
* blockpointers and iterates over them while the bpobj's lock held, so
* the sizes returned to us are consistent which what was actually
* processed.
*/
int err = dsl_process_sub_livelist(&first->dle_bpobj, &lca->to_keep, t,
&first_size);
if (err == 0)
err = dsl_process_sub_livelist(&next->dle_bpobj, &lca->to_keep,
t, &next_size);
if (err == 0) {
while (zfs_livelist_condense_sync_pause &&
!(zthr_has_waiters(t) || zthr_iscancelled(t)))
delay(1);
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
dmu_tx_mark_netfree(tx);
dmu_tx_hold_space(tx, 1);
err = dmu_tx_assign(tx, TXG_NOWAIT | TXG_NOTHROTTLE);
if (err == 0) {
/*
* Prevent the condense zthr restarting before
* the synctask completes.
*/
spa->spa_to_condense.syncing = B_TRUE;
lca->spa = spa;
lca->first_size = first_size;
lca->next_size = next_size;
dsl_sync_task_nowait(spa_get_dsl(spa),
spa_livelist_condense_sync, lca, tx);
dmu_tx_commit(tx);
return;
}
}
/*
* Condensing can not continue: either it was externally stopped or
* we were unable to assign to a tx because the pool has run out of
* space. In the second case, we'll just end up trying to condense
* again in a later txg.
*/
ASSERT(err != 0);
bplist_clear(&lca->to_keep);
bplist_destroy(&lca->to_keep);
kmem_free(lca, sizeof (livelist_condense_arg_t));
dmu_buf_rele(spa->spa_to_condense.ds->ds_dbuf, spa);
spa->spa_to_condense.ds = NULL;
if (err == EINTR)
zfs_livelist_condense_zthr_cancel++;
}
/* ARGSUSED */
/*
* Check that there is something to condense but that a condense is not
* already in progress and that condensing has not been cancelled.
*/
static boolean_t
spa_livelist_condense_cb_check(void *arg, zthr_t *z)
{
spa_t *spa = arg;
if ((spa->spa_to_condense.ds != NULL) &&
(spa->spa_to_condense.syncing == B_FALSE) &&
(spa->spa_to_condense.cancelled == B_FALSE)) {
return (B_TRUE);
}
return (B_FALSE);
}
static void
spa_start_livelist_condensing_thread(spa_t *spa)
{
spa->spa_to_condense.ds = NULL;
spa->spa_to_condense.first = NULL;
spa->spa_to_condense.next = NULL;
spa->spa_to_condense.syncing = B_FALSE;
spa->spa_to_condense.cancelled = B_FALSE;
ASSERT3P(spa->spa_livelist_condense_zthr, ==, NULL);
spa->spa_livelist_condense_zthr =
zthr_create("z_livelist_condense",
spa_livelist_condense_cb_check,
spa_livelist_condense_cb, spa);
}
static void
spa_spawn_aux_threads(spa_t *spa)
{
ASSERT(spa_writeable(spa));
ASSERT(MUTEX_HELD(&spa_namespace_lock));
spa_start_indirect_condensing_thread(spa);
spa_start_livelist_destroy_thread(spa);
spa_start_livelist_condensing_thread(spa);
ASSERT3P(spa->spa_checkpoint_discard_zthr, ==, NULL);
spa->spa_checkpoint_discard_zthr =
zthr_create("z_checkpoint_discard",
spa_checkpoint_discard_thread_check,
spa_checkpoint_discard_thread, spa);
}
/*
* Fix up config after a partly-completed split. This is done with the
* ZPOOL_CONFIG_SPLIT nvlist. Both the splitting pool and the split-off
* pool have that entry in their config, but only the splitting one contains
* a list of all the guids of the vdevs that are being split off.
*
* This function determines what to do with that list: either rejoin
* all the disks to the pool, or complete the splitting process. To attempt
* the rejoin, each disk that is offlined is marked online again, and
* we do a reopen() call. If the vdev label for every disk that was
* marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL)
* then we call vdev_split() on each disk, and complete the split.
*
* Otherwise we leave the config alone, with all the vdevs in place in
* the original pool.
*/
static void
spa_try_repair(spa_t *spa, nvlist_t *config)
{
uint_t extracted;
uint64_t *glist;
uint_t i, gcount;
nvlist_t *nvl;
vdev_t **vd;
boolean_t attempt_reopen;
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0)
return;
/* check that the config is complete */
if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
&glist, &gcount) != 0)
return;
vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP);
/* attempt to online all the vdevs & validate */
attempt_reopen = B_TRUE;
for (i = 0; i < gcount; i++) {
if (glist[i] == 0) /* vdev is hole */
continue;
vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE);
if (vd[i] == NULL) {
/*
* Don't bother attempting to reopen the disks;
* just do the split.
*/
attempt_reopen = B_FALSE;
} else {
/* attempt to re-online it */
vd[i]->vdev_offline = B_FALSE;
}
}
if (attempt_reopen) {
vdev_reopen(spa->spa_root_vdev);
/* check each device to see what state it's in */
for (extracted = 0, i = 0; i < gcount; i++) {
if (vd[i] != NULL &&
vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL)
break;
++extracted;
}
}
/*
* If every disk has been moved to the new pool, or if we never
* even attempted to look at them, then we split them off for
* good.
*/
if (!attempt_reopen || gcount == extracted) {
for (i = 0; i < gcount; i++)
if (vd[i] != NULL)
vdev_split(vd[i]);
vdev_reopen(spa->spa_root_vdev);
}
kmem_free(vd, gcount * sizeof (vdev_t *));
}
static int
spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type)
{
char *ereport = FM_EREPORT_ZFS_POOL;
int error;
spa->spa_load_state = state;
(void) spa_import_progress_set_state(spa_guid(spa),
spa_load_state(spa));
gethrestime(&spa->spa_loaded_ts);
error = spa_load_impl(spa, type, &ereport);
/*
* Don't count references from objsets that are already closed
* and are making their way through the eviction process.
*/
spa_evicting_os_wait(spa);
spa->spa_minref = zfs_refcount_count(&spa->spa_refcount);
if (error) {
if (error != EEXIST) {
spa->spa_loaded_ts.tv_sec = 0;
spa->spa_loaded_ts.tv_nsec = 0;
}
if (error != EBADF) {
(void) zfs_ereport_post(ereport, spa,
NULL, NULL, NULL, 0);
}
}
spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE;
spa->spa_ena = 0;
(void) spa_import_progress_set_state(spa_guid(spa),
spa_load_state(spa));
return (error);
}
#ifdef ZFS_DEBUG
/*
* Count the number of per-vdev ZAPs associated with all of the vdevs in the
* vdev tree rooted in the given vd, and ensure that each ZAP is present in the
* spa's per-vdev ZAP list.
*/
static uint64_t
vdev_count_verify_zaps(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
uint64_t total = 0;
if (vd->vdev_top_zap != 0) {
total++;
ASSERT0(zap_lookup_int(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, vd->vdev_top_zap));
}
if (vd->vdev_leaf_zap != 0) {
total++;
ASSERT0(zap_lookup_int(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, vd->vdev_leaf_zap));
}
for (uint64_t i = 0; i < vd->vdev_children; i++) {
total += vdev_count_verify_zaps(vd->vdev_child[i]);
}
return (total);
}
#endif
/*
* Determine whether the activity check is required.
*/
static boolean_t
spa_activity_check_required(spa_t *spa, uberblock_t *ub, nvlist_t *label,
nvlist_t *config)
{
uint64_t state = 0;
uint64_t hostid = 0;
uint64_t tryconfig_txg = 0;
uint64_t tryconfig_timestamp = 0;
uint16_t tryconfig_mmp_seq = 0;
nvlist_t *nvinfo;
if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) {
nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
(void) nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG,
&tryconfig_txg);
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
&tryconfig_timestamp);
(void) nvlist_lookup_uint16(nvinfo, ZPOOL_CONFIG_MMP_SEQ,
&tryconfig_mmp_seq);
}
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, &state);
/*
* Disable the MMP activity check - This is used by zdb which
* is intended to be used on potentially active pools.
*/
if (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP)
return (B_FALSE);
/*
* Skip the activity check when the MMP feature is disabled.
*/
if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay == 0)
return (B_FALSE);
/*
* If the tryconfig_ values are nonzero, they are the results of an
* earlier tryimport. If they all match the uberblock we just found,
* then the pool has not changed and we return false so we do not test
* a second time.
*/
if (tryconfig_txg && tryconfig_txg == ub->ub_txg &&
tryconfig_timestamp && tryconfig_timestamp == ub->ub_timestamp &&
tryconfig_mmp_seq && tryconfig_mmp_seq ==
(MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0))
return (B_FALSE);
/*
* Allow the activity check to be skipped when importing the pool
* on the same host which last imported it. Since the hostid from
* configuration may be stale use the one read from the label.
*/
if (nvlist_exists(label, ZPOOL_CONFIG_HOSTID))
hostid = fnvlist_lookup_uint64(label, ZPOOL_CONFIG_HOSTID);
if (hostid == spa_get_hostid(spa))
return (B_FALSE);
/*
* Skip the activity test when the pool was cleanly exported.
*/
if (state != POOL_STATE_ACTIVE)
return (B_FALSE);
return (B_TRUE);
}
/*
* Nanoseconds the activity check must watch for changes on-disk.
*/
static uint64_t
spa_activity_check_duration(spa_t *spa, uberblock_t *ub)
{
uint64_t import_intervals = MAX(zfs_multihost_import_intervals, 1);
uint64_t multihost_interval = MSEC2NSEC(
MMP_INTERVAL_OK(zfs_multihost_interval));
uint64_t import_delay = MAX(NANOSEC, import_intervals *
multihost_interval);
/*
* Local tunables determine a minimum duration except for the case
* where we know when the remote host will suspend the pool if MMP
* writes do not land.
*
* See Big Theory comment at the top of mmp.c for the reasoning behind
* these cases and times.
*/
ASSERT(MMP_IMPORT_SAFETY_FACTOR >= 100);
if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) &&
MMP_FAIL_INT(ub) > 0) {
/* MMP on remote host will suspend pool after failed writes */
import_delay = MMP_FAIL_INT(ub) * MSEC2NSEC(MMP_INTERVAL(ub)) *
MMP_IMPORT_SAFETY_FACTOR / 100;
zfs_dbgmsg("fail_intvals>0 import_delay=%llu ub_mmp "
"mmp_fails=%llu ub_mmp mmp_interval=%llu "
"import_intervals=%llu", (u_longlong_t)import_delay,
(u_longlong_t)MMP_FAIL_INT(ub),
(u_longlong_t)MMP_INTERVAL(ub),
(u_longlong_t)import_intervals);
} else if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) &&
MMP_FAIL_INT(ub) == 0) {
/* MMP on remote host will never suspend pool */
import_delay = MAX(import_delay, (MSEC2NSEC(MMP_INTERVAL(ub)) +
ub->ub_mmp_delay) * import_intervals);
zfs_dbgmsg("fail_intvals=0 import_delay=%llu ub_mmp "
"mmp_interval=%llu ub_mmp_delay=%llu "
"import_intervals=%llu", (u_longlong_t)import_delay,
(u_longlong_t)MMP_INTERVAL(ub),
(u_longlong_t)ub->ub_mmp_delay,
(u_longlong_t)import_intervals);
} else if (MMP_VALID(ub)) {
/*
* zfs-0.7 compatibility case
*/
import_delay = MAX(import_delay, (multihost_interval +
ub->ub_mmp_delay) * import_intervals);
zfs_dbgmsg("import_delay=%llu ub_mmp_delay=%llu "
"import_intervals=%llu leaves=%u",
(u_longlong_t)import_delay,
(u_longlong_t)ub->ub_mmp_delay,
(u_longlong_t)import_intervals,
vdev_count_leaves(spa));
} else {
/* Using local tunings is the only reasonable option */
zfs_dbgmsg("pool last imported on non-MMP aware "
"host using import_delay=%llu multihost_interval=%llu "
"import_intervals=%llu", (u_longlong_t)import_delay,
(u_longlong_t)multihost_interval,
(u_longlong_t)import_intervals);
}
return (import_delay);
}
/*
* Perform the import activity check. If the user canceled the import or
* we detected activity then fail.
*/
static int
spa_activity_check(spa_t *spa, uberblock_t *ub, nvlist_t *config)
{
uint64_t txg = ub->ub_txg;
uint64_t timestamp = ub->ub_timestamp;
uint64_t mmp_config = ub->ub_mmp_config;
uint16_t mmp_seq = MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0;
uint64_t import_delay;
hrtime_t import_expire;
nvlist_t *mmp_label = NULL;
vdev_t *rvd = spa->spa_root_vdev;
kcondvar_t cv;
kmutex_t mtx;
int error = 0;
cv_init(&cv, NULL, CV_DEFAULT, NULL);
mutex_init(&mtx, NULL, MUTEX_DEFAULT, NULL);
mutex_enter(&mtx);
/*
* If ZPOOL_CONFIG_MMP_TXG is present an activity check was performed
* during the earlier tryimport. If the txg recorded there is 0 then
* the pool is known to be active on another host.
*
* Otherwise, the pool might be in use on another host. Check for
* changes in the uberblocks on disk if necessary.
*/
if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) {
nvlist_t *nvinfo = fnvlist_lookup_nvlist(config,
ZPOOL_CONFIG_LOAD_INFO);
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_TXG) &&
fnvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG) == 0) {
vdev_uberblock_load(rvd, ub, &mmp_label);
error = SET_ERROR(EREMOTEIO);
goto out;
}
}
import_delay = spa_activity_check_duration(spa, ub);
/* Add a small random factor in case of simultaneous imports (0-25%) */
import_delay += import_delay * random_in_range(250) / 1000;
import_expire = gethrtime() + import_delay;
while (gethrtime() < import_expire) {
(void) spa_import_progress_set_mmp_check(spa_guid(spa),
NSEC2SEC(import_expire - gethrtime()));
vdev_uberblock_load(rvd, ub, &mmp_label);
if (txg != ub->ub_txg || timestamp != ub->ub_timestamp ||
mmp_seq != (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0)) {
zfs_dbgmsg("multihost activity detected "
"txg %llu ub_txg %llu "
"timestamp %llu ub_timestamp %llu "
"mmp_config %#llx ub_mmp_config %#llx",
(u_longlong_t)txg, (u_longlong_t)ub->ub_txg,
(u_longlong_t)timestamp,
(u_longlong_t)ub->ub_timestamp,
(u_longlong_t)mmp_config,
(u_longlong_t)ub->ub_mmp_config);
error = SET_ERROR(EREMOTEIO);
break;
}
if (mmp_label) {
nvlist_free(mmp_label);
mmp_label = NULL;
}
error = cv_timedwait_sig(&cv, &mtx, ddi_get_lbolt() + hz);
if (error != -1) {
error = SET_ERROR(EINTR);
break;
}
error = 0;
}
out:
mutex_exit(&mtx);
mutex_destroy(&mtx);
cv_destroy(&cv);
/*
* If the pool is determined to be active store the status in the
* spa->spa_load_info nvlist. If the remote hostname or hostid are
* available from configuration read from disk store them as well.
* This allows 'zpool import' to generate a more useful message.
*
* ZPOOL_CONFIG_MMP_STATE - observed pool status (mandatory)
* ZPOOL_CONFIG_MMP_HOSTNAME - hostname from the active pool
* ZPOOL_CONFIG_MMP_HOSTID - hostid from the active pool
*/
if (error == EREMOTEIO) {
char *hostname = "<unknown>";
uint64_t hostid = 0;
if (mmp_label) {
if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTNAME)) {
hostname = fnvlist_lookup_string(mmp_label,
ZPOOL_CONFIG_HOSTNAME);
fnvlist_add_string(spa->spa_load_info,
ZPOOL_CONFIG_MMP_HOSTNAME, hostname);
}
if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTID)) {
hostid = fnvlist_lookup_uint64(mmp_label,
ZPOOL_CONFIG_HOSTID);
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_HOSTID, hostid);
}
}
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_STATE, MMP_STATE_ACTIVE);
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_TXG, 0);
error = spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO);
}
if (mmp_label)
nvlist_free(mmp_label);
return (error);
}
static int
spa_verify_host(spa_t *spa, nvlist_t *mos_config)
{
uint64_t hostid;
char *hostname;
uint64_t myhostid = 0;
if (!spa_is_root(spa) && nvlist_lookup_uint64(mos_config,
ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
hostname = fnvlist_lookup_string(mos_config,
ZPOOL_CONFIG_HOSTNAME);
myhostid = zone_get_hostid(NULL);
if (hostid != 0 && myhostid != 0 && hostid != myhostid) {
cmn_err(CE_WARN, "pool '%s' could not be "
"loaded as it was last accessed by "
"another system (host: %s hostid: 0x%llx). "
"See: https://openzfs.github.io/openzfs-docs/msg/"
"ZFS-8000-EY",
spa_name(spa), hostname, (u_longlong_t)hostid);
spa_load_failed(spa, "hostid verification failed: pool "
"last accessed by host: %s (hostid: 0x%llx)",
hostname, (u_longlong_t)hostid);
return (SET_ERROR(EBADF));
}
}
return (0);
}
static int
spa_ld_parse_config(spa_t *spa, spa_import_type_t type)
{
int error = 0;
nvlist_t *nvtree, *nvl, *config = spa->spa_config;
int parse;
vdev_t *rvd;
uint64_t pool_guid;
char *comment;
char *compatibility;
/*
* Versioning wasn't explicitly added to the label until later, so if
* it's not present treat it as the initial version.
*/
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
&spa->spa_ubsync.ub_version) != 0)
spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL;
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) {
spa_load_failed(spa, "invalid config provided: '%s' missing",
ZPOOL_CONFIG_POOL_GUID);
return (SET_ERROR(EINVAL));
}
/*
* If we are doing an import, ensure that the pool is not already
* imported by checking if its pool guid already exists in the
* spa namespace.
*
* The only case that we allow an already imported pool to be
* imported again, is when the pool is checkpointed and we want to
* look at its checkpointed state from userland tools like zdb.
*/
#ifdef _KERNEL
if ((spa->spa_load_state == SPA_LOAD_IMPORT ||
spa->spa_load_state == SPA_LOAD_TRYIMPORT) &&
spa_guid_exists(pool_guid, 0)) {
#else
if ((spa->spa_load_state == SPA_LOAD_IMPORT ||
spa->spa_load_state == SPA_LOAD_TRYIMPORT) &&
spa_guid_exists(pool_guid, 0) &&
!spa_importing_readonly_checkpoint(spa)) {
#endif
spa_load_failed(spa, "a pool with guid %llu is already open",
(u_longlong_t)pool_guid);
return (SET_ERROR(EEXIST));
}
spa->spa_config_guid = pool_guid;
nvlist_free(spa->spa_load_info);
spa->spa_load_info = fnvlist_alloc();
ASSERT(spa->spa_comment == NULL);
if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
spa->spa_comment = spa_strdup(comment);
ASSERT(spa->spa_compatibility == NULL);
if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMPATIBILITY,
&compatibility) == 0)
spa->spa_compatibility = spa_strdup(compatibility);
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
&spa->spa_config_txg);
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) == 0)
spa->spa_config_splitting = fnvlist_dup(nvl);
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtree)) {
spa_load_failed(spa, "invalid config provided: '%s' missing",
ZPOOL_CONFIG_VDEV_TREE);
return (SET_ERROR(EINVAL));
}
/*
* Create "The Godfather" zio to hold all async IOs
*/
spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
KM_SLEEP);
for (int i = 0; i < max_ncpus; i++) {
spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_GODFATHER);
}
/*
* Parse the configuration into a vdev tree. We explicitly set the
* value that will be returned by spa_version() since parsing the
* configuration requires knowing the version number.
*/
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
parse = (type == SPA_IMPORT_EXISTING ?
VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT);
error = spa_config_parse(spa, &rvd, nvtree, NULL, 0, parse);
spa_config_exit(spa, SCL_ALL, FTAG);
if (error != 0) {
spa_load_failed(spa, "unable to parse config [error=%d]",
error);
return (error);
}
ASSERT(spa->spa_root_vdev == rvd);
ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT);
ASSERT3U(spa->spa_max_ashift, <=, SPA_MAXBLOCKSHIFT);
if (type != SPA_IMPORT_ASSEMBLE) {
ASSERT(spa_guid(spa) == pool_guid);
}
return (0);
}
/*
* Recursively open all vdevs in the vdev tree. This function is called twice:
* first with the untrusted config, then with the trusted config.
*/
static int
spa_ld_open_vdevs(spa_t *spa)
{
int error = 0;
/*
* spa_missing_tvds_allowed defines how many top-level vdevs can be
* missing/unopenable for the root vdev to be still considered openable.
*/
if (spa->spa_trust_config) {
spa->spa_missing_tvds_allowed = zfs_max_missing_tvds;
} else if (spa->spa_config_source == SPA_CONFIG_SRC_CACHEFILE) {
spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_cachefile;
} else if (spa->spa_config_source == SPA_CONFIG_SRC_SCAN) {
spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_scan;
} else {
spa->spa_missing_tvds_allowed = 0;
}
spa->spa_missing_tvds_allowed =
MAX(zfs_max_missing_tvds, spa->spa_missing_tvds_allowed);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
error = vdev_open(spa->spa_root_vdev);
spa_config_exit(spa, SCL_ALL, FTAG);
if (spa->spa_missing_tvds != 0) {
spa_load_note(spa, "vdev tree has %lld missing top-level "
"vdevs.", (u_longlong_t)spa->spa_missing_tvds);
if (spa->spa_trust_config && (spa->spa_mode & SPA_MODE_WRITE)) {
/*
* Although theoretically we could allow users to open
* incomplete pools in RW mode, we'd need to add a lot
* of extra logic (e.g. adjust pool space to account
* for missing vdevs).
* This limitation also prevents users from accidentally
* opening the pool in RW mode during data recovery and
* damaging it further.
*/
spa_load_note(spa, "pools with missing top-level "
"vdevs can only be opened in read-only mode.");
error = SET_ERROR(ENXIO);
} else {
spa_load_note(spa, "current settings allow for maximum "
"%lld missing top-level vdevs at this stage.",
(u_longlong_t)spa->spa_missing_tvds_allowed);
}
}
if (error != 0) {
spa_load_failed(spa, "unable to open vdev tree [error=%d]",
error);
}
if (spa->spa_missing_tvds != 0 || error != 0)
vdev_dbgmsg_print_tree(spa->spa_root_vdev, 2);
return (error);
}
/*
* We need to validate the vdev labels against the configuration that
* we have in hand. This function is called twice: first with an untrusted
* config, then with a trusted config. The validation is more strict when the
* config is trusted.
*/
static int
spa_ld_validate_vdevs(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
error = vdev_validate(rvd);
spa_config_exit(spa, SCL_ALL, FTAG);
if (error != 0) {
spa_load_failed(spa, "vdev_validate failed [error=%d]", error);
return (error);
}
if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
spa_load_failed(spa, "cannot open vdev tree after invalidating "
"some vdevs");
vdev_dbgmsg_print_tree(rvd, 2);
return (SET_ERROR(ENXIO));
}
return (0);
}
static void
spa_ld_select_uberblock_done(spa_t *spa, uberblock_t *ub)
{
spa->spa_state = POOL_STATE_ACTIVE;
spa->spa_ubsync = spa->spa_uberblock;
spa->spa_verify_min_txg = spa->spa_extreme_rewind ?
TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1;
spa->spa_first_txg = spa->spa_last_ubsync_txg ?
spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1;
spa->spa_claim_max_txg = spa->spa_first_txg;
spa->spa_prev_software_version = ub->ub_software_version;
}
static int
spa_ld_select_uberblock(spa_t *spa, spa_import_type_t type)
{
vdev_t *rvd = spa->spa_root_vdev;
nvlist_t *label;
uberblock_t *ub = &spa->spa_uberblock;
boolean_t activity_check = B_FALSE;
/*
* If we are opening the checkpointed state of the pool by
* rewinding to it, at this point we will have written the
* checkpointed uberblock to the vdev labels, so searching
* the labels will find the right uberblock. However, if
* we are opening the checkpointed state read-only, we have
* not modified the labels. Therefore, we must ignore the
* labels and continue using the spa_uberblock that was set
* by spa_ld_checkpoint_rewind.
*
* Note that it would be fine to ignore the labels when
* rewinding (opening writeable) as well. However, if we
* crash just after writing the labels, we will end up
* searching the labels. Doing so in the common case means
* that this code path gets exercised normally, rather than
* just in the edge case.
*/
if (ub->ub_checkpoint_txg != 0 &&
spa_importing_readonly_checkpoint(spa)) {
spa_ld_select_uberblock_done(spa, ub);
return (0);
}
/*
* Find the best uberblock.
*/
vdev_uberblock_load(rvd, ub, &label);
/*
* If we weren't able to find a single valid uberblock, return failure.
*/
if (ub->ub_txg == 0) {
nvlist_free(label);
spa_load_failed(spa, "no valid uberblock found");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO));
}
if (spa->spa_load_max_txg != UINT64_MAX) {
(void) spa_import_progress_set_max_txg(spa_guid(spa),
(u_longlong_t)spa->spa_load_max_txg);
}
spa_load_note(spa, "using uberblock with txg=%llu",
(u_longlong_t)ub->ub_txg);
/*
* For pools which have the multihost property on determine if the
* pool is truly inactive and can be safely imported. Prevent
* hosts which don't have a hostid set from importing the pool.
*/
activity_check = spa_activity_check_required(spa, ub, label,
spa->spa_config);
if (activity_check) {
if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay &&
spa_get_hostid(spa) == 0) {
nvlist_free(label);
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID);
return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO));
}
int error = spa_activity_check(spa, ub, spa->spa_config);
if (error) {
nvlist_free(label);
return (error);
}
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_STATE, MMP_STATE_INACTIVE);
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_TXG, ub->ub_txg);
fnvlist_add_uint16(spa->spa_load_info,
ZPOOL_CONFIG_MMP_SEQ,
(MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0));
}
/*
* If the pool has an unsupported version we can't open it.
*/
if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) {
nvlist_free(label);
spa_load_failed(spa, "version %llu is not supported",
(u_longlong_t)ub->ub_version);
return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP));
}
if (ub->ub_version >= SPA_VERSION_FEATURES) {
nvlist_t *features;
/*
* If we weren't able to find what's necessary for reading the
* MOS in the label, return failure.
*/
if (label == NULL) {
spa_load_failed(spa, "label config unavailable");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
ENXIO));
}
if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_FEATURES_FOR_READ,
&features) != 0) {
nvlist_free(label);
spa_load_failed(spa, "invalid label: '%s' missing",
ZPOOL_CONFIG_FEATURES_FOR_READ);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
ENXIO));
}
/*
* Update our in-core representation with the definitive values
* from the label.
*/
nvlist_free(spa->spa_label_features);
VERIFY(nvlist_dup(features, &spa->spa_label_features, 0) == 0);
}
nvlist_free(label);
/*
* Look through entries in the label nvlist's features_for_read. If
* there is a feature listed there which we don't understand then we
* cannot open a pool.
*/
if (ub->ub_version >= SPA_VERSION_FEATURES) {
nvlist_t *unsup_feat;
VERIFY(nvlist_alloc(&unsup_feat, NV_UNIQUE_NAME, KM_SLEEP) ==
0);
for (nvpair_t *nvp = nvlist_next_nvpair(spa->spa_label_features,
NULL); nvp != NULL;
nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) {
if (!zfeature_is_supported(nvpair_name(nvp))) {
VERIFY(nvlist_add_string(unsup_feat,
nvpair_name(nvp), "") == 0);
}
}
if (!nvlist_empty(unsup_feat)) {
VERIFY(nvlist_add_nvlist(spa->spa_load_info,
ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat) == 0);
nvlist_free(unsup_feat);
spa_load_failed(spa, "some features are unsupported");
return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
ENOTSUP));
}
nvlist_free(unsup_feat);
}
if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) {
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_try_repair(spa, spa->spa_config);
spa_config_exit(spa, SCL_ALL, FTAG);
nvlist_free(spa->spa_config_splitting);
spa->spa_config_splitting = NULL;
}
/*
* Initialize internal SPA structures.
*/
spa_ld_select_uberblock_done(spa, ub);
return (0);
}
static int
spa_ld_open_rootbp(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
if (error != 0) {
spa_load_failed(spa, "unable to open rootbp in dsl_pool_init "
"[error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
return (0);
}
static int
spa_ld_trusted_config(spa_t *spa, spa_import_type_t type,
boolean_t reloading)
{
vdev_t *mrvd, *rvd = spa->spa_root_vdev;
nvlist_t *nv, *mos_config, *policy;
int error = 0, copy_error;
uint64_t healthy_tvds, healthy_tvds_mos;
uint64_t mos_config_txg;
if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object, B_TRUE)
!= 0)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/*
* If we're assembling a pool from a split, the config provided is
* already trusted so there is nothing to do.
*/
if (type == SPA_IMPORT_ASSEMBLE)
return (0);
healthy_tvds = spa_healthy_core_tvds(spa);
if (load_nvlist(spa, spa->spa_config_object, &mos_config)
!= 0) {
spa_load_failed(spa, "unable to retrieve MOS config");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
/*
* If we are doing an open, pool owner wasn't verified yet, thus do
* the verification here.
*/
if (spa->spa_load_state == SPA_LOAD_OPEN) {
error = spa_verify_host(spa, mos_config);
if (error != 0) {
nvlist_free(mos_config);
return (error);
}
}
nv = fnvlist_lookup_nvlist(mos_config, ZPOOL_CONFIG_VDEV_TREE);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
/*
* Build a new vdev tree from the trusted config
*/
error = spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD);
if (error != 0) {
nvlist_free(mos_config);
spa_config_exit(spa, SCL_ALL, FTAG);
spa_load_failed(spa, "spa_config_parse failed [error=%d]",
error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
}
/*
* Vdev paths in the MOS may be obsolete. If the untrusted config was
* obtained by scanning /dev/dsk, then it will have the right vdev
* paths. We update the trusted MOS config with this information.
* We first try to copy the paths with vdev_copy_path_strict, which
* succeeds only when both configs have exactly the same vdev tree.
* If that fails, we fall back to a more flexible method that has a
* best effort policy.
*/
copy_error = vdev_copy_path_strict(rvd, mrvd);
if (copy_error != 0 || spa_load_print_vdev_tree) {
spa_load_note(spa, "provided vdev tree:");
vdev_dbgmsg_print_tree(rvd, 2);
spa_load_note(spa, "MOS vdev tree:");
vdev_dbgmsg_print_tree(mrvd, 2);
}
if (copy_error != 0) {
spa_load_note(spa, "vdev_copy_path_strict failed, falling "
"back to vdev_copy_path_relaxed");
vdev_copy_path_relaxed(rvd, mrvd);
}
vdev_close(rvd);
vdev_free(rvd);
spa->spa_root_vdev = mrvd;
rvd = mrvd;
spa_config_exit(spa, SCL_ALL, FTAG);
/*
* We will use spa_config if we decide to reload the spa or if spa_load
* fails and we rewind. We must thus regenerate the config using the
* MOS information with the updated paths. ZPOOL_LOAD_POLICY is used to
* pass settings on how to load the pool and is not stored in the MOS.
* We copy it over to our new, trusted config.
*/
mos_config_txg = fnvlist_lookup_uint64(mos_config,
ZPOOL_CONFIG_POOL_TXG);
nvlist_free(mos_config);
mos_config = spa_config_generate(spa, NULL, mos_config_txg, B_FALSE);
if (nvlist_lookup_nvlist(spa->spa_config, ZPOOL_LOAD_POLICY,
&policy) == 0)
fnvlist_add_nvlist(mos_config, ZPOOL_LOAD_POLICY, policy);
spa_config_set(spa, mos_config);
spa->spa_config_source = SPA_CONFIG_SRC_MOS;
/*
* Now that we got the config from the MOS, we should be more strict
* in checking blkptrs and can make assumptions about the consistency
* of the vdev tree. spa_trust_config must be set to true before opening
* vdevs in order for them to be writeable.
*/
spa->spa_trust_config = B_TRUE;
/*
* Open and validate the new vdev tree
*/
error = spa_ld_open_vdevs(spa);
if (error != 0)
return (error);
error = spa_ld_validate_vdevs(spa);
if (error != 0)
return (error);
if (copy_error != 0 || spa_load_print_vdev_tree) {
spa_load_note(spa, "final vdev tree:");
vdev_dbgmsg_print_tree(rvd, 2);
}
if (spa->spa_load_state != SPA_LOAD_TRYIMPORT &&
!spa->spa_extreme_rewind && zfs_max_missing_tvds == 0) {
/*
* Sanity check to make sure that we are indeed loading the
* latest uberblock. If we missed SPA_SYNC_MIN_VDEVS tvds
* in the config provided and they happened to be the only ones
* to have the latest uberblock, we could involuntarily perform
* an extreme rewind.
*/
healthy_tvds_mos = spa_healthy_core_tvds(spa);
if (healthy_tvds_mos - healthy_tvds >=
SPA_SYNC_MIN_VDEVS) {
spa_load_note(spa, "config provided misses too many "
"top-level vdevs compared to MOS (%lld vs %lld). ",
(u_longlong_t)healthy_tvds,
(u_longlong_t)healthy_tvds_mos);
spa_load_note(spa, "vdev tree:");
vdev_dbgmsg_print_tree(rvd, 2);
if (reloading) {
spa_load_failed(spa, "config was already "
"provided from MOS. Aborting.");
return (spa_vdev_err(rvd,
VDEV_AUX_CORRUPT_DATA, EIO));
}
spa_load_note(spa, "spa must be reloaded using MOS "
"config");
return (SET_ERROR(EAGAIN));
}
}
error = spa_check_for_missing_logs(spa);
if (error != 0)
return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO));
if (rvd->vdev_guid_sum != spa->spa_uberblock.ub_guid_sum) {
spa_load_failed(spa, "uberblock guid sum doesn't match MOS "
"guid sum (%llu != %llu)",
(u_longlong_t)spa->spa_uberblock.ub_guid_sum,
(u_longlong_t)rvd->vdev_guid_sum);
return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM,
ENXIO));
}
return (0);
}
static int
spa_ld_open_indirect_vdev_metadata(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
/*
* Everything that we read before spa_remove_init() must be stored
* on concreted vdevs. Therefore we do this as early as possible.
*/
error = spa_remove_init(spa);
if (error != 0) {
spa_load_failed(spa, "spa_remove_init failed [error=%d]",
error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
/*
* Retrieve information needed to condense indirect vdev mappings.
*/
error = spa_condense_init(spa);
if (error != 0) {
spa_load_failed(spa, "spa_condense_init failed [error=%d]",
error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
}
return (0);
}
static int
spa_ld_check_features(spa_t *spa, boolean_t *missing_feat_writep)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
if (spa_version(spa) >= SPA_VERSION_FEATURES) {
boolean_t missing_feat_read = B_FALSE;
nvlist_t *unsup_feat, *enabled_feat;
if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ,
&spa->spa_feat_for_read_obj, B_TRUE) != 0) {
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE,
&spa->spa_feat_for_write_obj, B_TRUE) != 0) {
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS,
&spa->spa_feat_desc_obj, B_TRUE) != 0) {
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
enabled_feat = fnvlist_alloc();
unsup_feat = fnvlist_alloc();
if (!spa_features_check(spa, B_FALSE,
unsup_feat, enabled_feat))
missing_feat_read = B_TRUE;
if (spa_writeable(spa) ||
spa->spa_load_state == SPA_LOAD_TRYIMPORT) {
if (!spa_features_check(spa, B_TRUE,
unsup_feat, enabled_feat)) {
*missing_feat_writep = B_TRUE;
}
}
fnvlist_add_nvlist(spa->spa_load_info,
ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat);
if (!nvlist_empty(unsup_feat)) {
fnvlist_add_nvlist(spa->spa_load_info,
ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat);
}
fnvlist_free(enabled_feat);
fnvlist_free(unsup_feat);
if (!missing_feat_read) {
fnvlist_add_boolean(spa->spa_load_info,
ZPOOL_CONFIG_CAN_RDONLY);
}
/*
* If the state is SPA_LOAD_TRYIMPORT, our objective is
* twofold: to determine whether the pool is available for
* import in read-write mode and (if it is not) whether the
* pool is available for import in read-only mode. If the pool
* is available for import in read-write mode, it is displayed
* as available in userland; if it is not available for import
* in read-only mode, it is displayed as unavailable in
* userland. If the pool is available for import in read-only
* mode but not read-write mode, it is displayed as unavailable
* in userland with a special note that the pool is actually
* available for open in read-only mode.
*
* As a result, if the state is SPA_LOAD_TRYIMPORT and we are
* missing a feature for write, we must first determine whether
* the pool can be opened read-only before returning to
* userland in order to know whether to display the
* abovementioned note.
*/
if (missing_feat_read || (*missing_feat_writep &&
spa_writeable(spa))) {
spa_load_failed(spa, "pool uses unsupported features");
return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
ENOTSUP));
}
/*
* Load refcounts for ZFS features from disk into an in-memory
* cache during SPA initialization.
*/
for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
uint64_t refcount;
error = feature_get_refcount_from_disk(spa,
&spa_feature_table[i], &refcount);
if (error == 0) {
spa->spa_feat_refcount_cache[i] = refcount;
} else if (error == ENOTSUP) {
spa->spa_feat_refcount_cache[i] =
SPA_FEATURE_DISABLED;
} else {
spa_load_failed(spa, "error getting refcount "
"for feature %s [error=%d]",
spa_feature_table[i].fi_guid, error);
return (spa_vdev_err(rvd,
VDEV_AUX_CORRUPT_DATA, EIO));
}
}
}
if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) {
if (spa_dir_prop(spa, DMU_POOL_FEATURE_ENABLED_TXG,
&spa->spa_feat_enabled_txg_obj, B_TRUE) != 0)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
/*
* Encryption was added before bookmark_v2, even though bookmark_v2
* is now a dependency. If this pool has encryption enabled without
* bookmark_v2, trigger an errata message.
*/
if (spa_feature_is_enabled(spa, SPA_FEATURE_ENCRYPTION) &&
!spa_feature_is_enabled(spa, SPA_FEATURE_BOOKMARK_V2)) {
spa->spa_errata = ZPOOL_ERRATA_ZOL_8308_ENCRYPTION;
}
return (0);
}
static int
spa_ld_load_special_directories(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
spa->spa_is_initializing = B_TRUE;
error = dsl_pool_open(spa->spa_dsl_pool);
spa->spa_is_initializing = B_FALSE;
if (error != 0) {
spa_load_failed(spa, "dsl_pool_open failed [error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
return (0);
}
static int
spa_ld_get_props(spa_t *spa)
{
int error = 0;
uint64_t obj;
vdev_t *rvd = spa->spa_root_vdev;
/* Grab the checksum salt from the MOS. */
error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_CHECKSUM_SALT, 1,
sizeof (spa->spa_cksum_salt.zcs_bytes),
spa->spa_cksum_salt.zcs_bytes);
if (error == ENOENT) {
/* Generate a new salt for subsequent use */
(void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
sizeof (spa->spa_cksum_salt.zcs_bytes));
} else if (error != 0) {
spa_load_failed(spa, "unable to retrieve checksum salt from "
"MOS [error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj, B_TRUE) != 0)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj);
if (error != 0) {
spa_load_failed(spa, "error opening deferred-frees bpobj "
"[error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
/*
* Load the bit that tells us to use the new accounting function
* (raid-z deflation). If we have an older pool, this will not
* be present.
*/
error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION,
&spa->spa_creation_version, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/*
* Load the persistent error log. If we have an older pool, this will
* not be present.
*/
error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last,
B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB,
&spa->spa_errlog_scrub, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/*
* Load the livelist deletion field. If a livelist is queued for
* deletion, indicate that in the spa
*/
error = spa_dir_prop(spa, DMU_POOL_DELETED_CLONES,
&spa->spa_livelists_to_delete, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/*
* Load the history object. If we have an older pool, this
* will not be present.
*/
error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/*
* Load the per-vdev ZAP map. If we have an older pool, this will not
* be present; in this case, defer its creation to a later time to
* avoid dirtying the MOS this early / out of sync context. See
* spa_sync_config_object.
*/
/* The sentinel is only available in the MOS config. */
nvlist_t *mos_config;
if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0) {
spa_load_failed(spa, "unable to retrieve MOS config");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
error = spa_dir_prop(spa, DMU_POOL_VDEV_ZAP_MAP,
&spa->spa_all_vdev_zaps, B_FALSE);
if (error == ENOENT) {
VERIFY(!nvlist_exists(mos_config,
ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS));
spa->spa_avz_action = AVZ_ACTION_INITIALIZE;
ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
} else if (error != 0) {
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
} else if (!nvlist_exists(mos_config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)) {
/*
* An older version of ZFS overwrote the sentinel value, so
* we have orphaned per-vdev ZAPs in the MOS. Defer their
* destruction to later; see spa_sync_config_object.
*/
spa->spa_avz_action = AVZ_ACTION_DESTROY;
/*
* We're assuming that no vdevs have had their ZAPs created
* before this. Better be sure of it.
*/
ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
}
nvlist_free(mos_config);
spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object,
B_FALSE);
if (error && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
if (error == 0) {
- uint64_t autoreplace;
+ uint64_t autoreplace = 0;
spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs);
spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace);
spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation);
spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode);
spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand);
spa_prop_find(spa, ZPOOL_PROP_MULTIHOST, &spa->spa_multihost);
spa_prop_find(spa, ZPOOL_PROP_AUTOTRIM, &spa->spa_autotrim);
spa->spa_autoreplace = (autoreplace != 0);
}
/*
* If we are importing a pool with missing top-level vdevs,
* we enforce that the pool doesn't panic or get suspended on
* error since the likelihood of missing data is extremely high.
*/
if (spa->spa_missing_tvds > 0 &&
spa->spa_failmode != ZIO_FAILURE_MODE_CONTINUE &&
spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
spa_load_note(spa, "forcing failmode to 'continue' "
"as some top level vdevs are missing");
spa->spa_failmode = ZIO_FAILURE_MODE_CONTINUE;
}
return (0);
}
static int
spa_ld_open_aux_vdevs(spa_t *spa, spa_import_type_t type)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
/*
* If we're assembling the pool from the split-off vdevs of
* an existing pool, we don't want to attach the spares & cache
* devices.
*/
/*
* Load any hot spares for this pool.
*/
error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object,
B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
if (load_nvlist(spa, spa->spa_spares.sav_object,
&spa->spa_spares.sav_config) != 0) {
spa_load_failed(spa, "error loading spares nvlist");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_spares(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
} else if (error == 0) {
spa->spa_spares.sav_sync = B_TRUE;
}
/*
* Load any level 2 ARC devices for this pool.
*/
error = spa_dir_prop(spa, DMU_POOL_L2CACHE,
&spa->spa_l2cache.sav_object, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
if (load_nvlist(spa, spa->spa_l2cache.sav_object,
&spa->spa_l2cache.sav_config) != 0) {
spa_load_failed(spa, "error loading l2cache nvlist");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_l2cache(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
} else if (error == 0) {
spa->spa_l2cache.sav_sync = B_TRUE;
}
return (0);
}
static int
spa_ld_load_vdev_metadata(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
/*
* If the 'multihost' property is set, then never allow a pool to
* be imported when the system hostid is zero. The exception to
* this rule is zdb which is always allowed to access pools.
*/
if (spa_multihost(spa) && spa_get_hostid(spa) == 0 &&
(spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP) == 0) {
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID);
return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO));
}
/*
* If the 'autoreplace' property is set, then post a resource notifying
* the ZFS DE that it should not issue any faults for unopenable
* devices. We also iterate over the vdevs, and post a sysevent for any
* unopenable vdevs so that the normal autoreplace handler can take
* over.
*/
if (spa->spa_autoreplace && spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
spa_check_removed(spa->spa_root_vdev);
/*
* For the import case, this is done in spa_import(), because
* at this point we're using the spare definitions from
* the MOS config, not necessarily from the userland config.
*/
if (spa->spa_load_state != SPA_LOAD_IMPORT) {
spa_aux_check_removed(&spa->spa_spares);
spa_aux_check_removed(&spa->spa_l2cache);
}
}
/*
* Load the vdev metadata such as metaslabs, DTLs, spacemap object, etc.
*/
error = vdev_load(rvd);
if (error != 0) {
spa_load_failed(spa, "vdev_load failed [error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
}
error = spa_ld_log_spacemaps(spa);
if (error != 0) {
spa_load_failed(spa, "spa_ld_log_sm_data failed [error=%d]",
error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
}
/*
* Propagate the leaf DTLs we just loaded all the way up the vdev tree.
*/
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
vdev_dtl_reassess(rvd, 0, 0, B_FALSE, B_FALSE);
spa_config_exit(spa, SCL_ALL, FTAG);
return (0);
}
static int
spa_ld_load_dedup_tables(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
error = ddt_load(spa);
if (error != 0) {
spa_load_failed(spa, "ddt_load failed [error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
return (0);
}
static int
spa_ld_verify_logs(spa_t *spa, spa_import_type_t type, char **ereport)
{
vdev_t *rvd = spa->spa_root_vdev;
if (type != SPA_IMPORT_ASSEMBLE && spa_writeable(spa)) {
boolean_t missing = spa_check_logs(spa);
if (missing) {
if (spa->spa_missing_tvds != 0) {
spa_load_note(spa, "spa_check_logs failed "
"so dropping the logs");
} else {
*ereport = FM_EREPORT_ZFS_LOG_REPLAY;
spa_load_failed(spa, "spa_check_logs failed");
return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG,
ENXIO));
}
}
}
return (0);
}
static int
spa_ld_verify_pool_data(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
/*
* We've successfully opened the pool, verify that we're ready
* to start pushing transactions.
*/
if (spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
error = spa_load_verify(spa);
if (error != 0) {
spa_load_failed(spa, "spa_load_verify failed "
"[error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
error));
}
}
return (0);
}
static void
spa_ld_claim_log_blocks(spa_t *spa)
{
dmu_tx_t *tx;
dsl_pool_t *dp = spa_get_dsl(spa);
/*
* Claim log blocks that haven't been committed yet.
* This must all happen in a single txg.
* Note: spa_claim_max_txg is updated by spa_claim_notify(),
* invoked from zil_claim_log_block()'s i/o done callback.
* Price of rollback is that we abandon the log.
*/
spa->spa_claiming = B_TRUE;
tx = dmu_tx_create_assigned(dp, spa_first_txg(spa));
(void) dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
zil_claim, tx, DS_FIND_CHILDREN);
dmu_tx_commit(tx);
spa->spa_claiming = B_FALSE;
spa_set_log_state(spa, SPA_LOG_GOOD);
}
static void
spa_ld_check_for_config_update(spa_t *spa, uint64_t config_cache_txg,
boolean_t update_config_cache)
{
vdev_t *rvd = spa->spa_root_vdev;
int need_update = B_FALSE;
/*
* If the config cache is stale, or we have uninitialized
* metaslabs (see spa_vdev_add()), then update the config.
*
* If this is a verbatim import, trust the current
* in-core spa_config and update the disk labels.
*/
if (update_config_cache || config_cache_txg != spa->spa_config_txg ||
spa->spa_load_state == SPA_LOAD_IMPORT ||
spa->spa_load_state == SPA_LOAD_RECOVER ||
(spa->spa_import_flags & ZFS_IMPORT_VERBATIM))
need_update = B_TRUE;
for (int c = 0; c < rvd->vdev_children; c++)
if (rvd->vdev_child[c]->vdev_ms_array == 0)
need_update = B_TRUE;
/*
* Update the config cache asynchronously in case we're the
* root pool, in which case the config cache isn't writable yet.
*/
if (need_update)
spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
}
static void
spa_ld_prepare_for_reload(spa_t *spa)
{
spa_mode_t mode = spa->spa_mode;
int async_suspended = spa->spa_async_suspended;
spa_unload(spa);
spa_deactivate(spa);
spa_activate(spa, mode);
/*
* We save the value of spa_async_suspended as it gets reset to 0 by
* spa_unload(). We want to restore it back to the original value before
* returning as we might be calling spa_async_resume() later.
*/
spa->spa_async_suspended = async_suspended;
}
static int
spa_ld_read_checkpoint_txg(spa_t *spa)
{
uberblock_t checkpoint;
int error = 0;
ASSERT0(spa->spa_checkpoint_txg);
ASSERT(MUTEX_HELD(&spa_namespace_lock));
error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t),
sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint);
if (error == ENOENT)
return (0);
if (error != 0)
return (error);
ASSERT3U(checkpoint.ub_txg, !=, 0);
ASSERT3U(checkpoint.ub_checkpoint_txg, !=, 0);
ASSERT3U(checkpoint.ub_timestamp, !=, 0);
spa->spa_checkpoint_txg = checkpoint.ub_txg;
spa->spa_checkpoint_info.sci_timestamp = checkpoint.ub_timestamp;
return (0);
}
static int
spa_ld_mos_init(spa_t *spa, spa_import_type_t type)
{
int error = 0;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE);
/*
* Never trust the config that is provided unless we are assembling
* a pool following a split.
* This means don't trust blkptrs and the vdev tree in general. This
* also effectively puts the spa in read-only mode since
* spa_writeable() checks for spa_trust_config to be true.
* We will later load a trusted config from the MOS.
*/
if (type != SPA_IMPORT_ASSEMBLE)
spa->spa_trust_config = B_FALSE;
/*
* Parse the config provided to create a vdev tree.
*/
error = spa_ld_parse_config(spa, type);
if (error != 0)
return (error);
spa_import_progress_add(spa);
/*
* Now that we have the vdev tree, try to open each vdev. This involves
* opening the underlying physical device, retrieving its geometry and
* probing the vdev with a dummy I/O. The state of each vdev will be set
* based on the success of those operations. After this we'll be ready
* to read from the vdevs.
*/
error = spa_ld_open_vdevs(spa);
if (error != 0)
return (error);
/*
* Read the label of each vdev and make sure that the GUIDs stored
* there match the GUIDs in the config provided.
* If we're assembling a new pool that's been split off from an
* existing pool, the labels haven't yet been updated so we skip
* validation for now.
*/
if (type != SPA_IMPORT_ASSEMBLE) {
error = spa_ld_validate_vdevs(spa);
if (error != 0)
return (error);
}
/*
* Read all vdev labels to find the best uberblock (i.e. latest,
* unless spa_load_max_txg is set) and store it in spa_uberblock. We
* get the list of features required to read blkptrs in the MOS from
* the vdev label with the best uberblock and verify that our version
* of zfs supports them all.
*/
error = spa_ld_select_uberblock(spa, type);
if (error != 0)
return (error);
/*
* Pass that uberblock to the dsl_pool layer which will open the root
* blkptr. This blkptr points to the latest version of the MOS and will
* allow us to read its contents.
*/
error = spa_ld_open_rootbp(spa);
if (error != 0)
return (error);
return (0);
}
static int
spa_ld_checkpoint_rewind(spa_t *spa)
{
uberblock_t checkpoint;
int error = 0;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t),
sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint);
if (error != 0) {
spa_load_failed(spa, "unable to retrieve checkpointed "
"uberblock from the MOS config [error=%d]", error);
if (error == ENOENT)
error = ZFS_ERR_NO_CHECKPOINT;
return (error);
}
ASSERT3U(checkpoint.ub_txg, <, spa->spa_uberblock.ub_txg);
ASSERT3U(checkpoint.ub_txg, ==, checkpoint.ub_checkpoint_txg);
/*
* We need to update the txg and timestamp of the checkpointed
* uberblock to be higher than the latest one. This ensures that
* the checkpointed uberblock is selected if we were to close and
* reopen the pool right after we've written it in the vdev labels.
* (also see block comment in vdev_uberblock_compare)
*/
checkpoint.ub_txg = spa->spa_uberblock.ub_txg + 1;
checkpoint.ub_timestamp = gethrestime_sec();
/*
* Set current uberblock to be the checkpointed uberblock.
*/
spa->spa_uberblock = checkpoint;
/*
* If we are doing a normal rewind, then the pool is open for
* writing and we sync the "updated" checkpointed uberblock to
* disk. Once this is done, we've basically rewound the whole
* pool and there is no way back.
*
* There are cases when we don't want to attempt and sync the
* checkpointed uberblock to disk because we are opening a
* pool as read-only. Specifically, verifying the checkpointed
* state with zdb, and importing the checkpointed state to get
* a "preview" of its content.
*/
if (spa_writeable(spa)) {
vdev_t *rvd = spa->spa_root_vdev;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL };
int svdcount = 0;
int children = rvd->vdev_children;
int c0 = random_in_range(children);
for (int c = 0; c < children; c++) {
vdev_t *vd = rvd->vdev_child[(c0 + c) % children];
/* Stop when revisiting the first vdev */
if (c > 0 && svd[0] == vd)
break;
if (vd->vdev_ms_array == 0 || vd->vdev_islog ||
!vdev_is_concrete(vd))
continue;
svd[svdcount++] = vd;
if (svdcount == SPA_SYNC_MIN_VDEVS)
break;
}
error = vdev_config_sync(svd, svdcount, spa->spa_first_txg);
if (error == 0)
spa->spa_last_synced_guid = rvd->vdev_guid;
spa_config_exit(spa, SCL_ALL, FTAG);
if (error != 0) {
spa_load_failed(spa, "failed to write checkpointed "
"uberblock to the vdev labels [error=%d]", error);
return (error);
}
}
return (0);
}
static int
spa_ld_mos_with_trusted_config(spa_t *spa, spa_import_type_t type,
boolean_t *update_config_cache)
{
int error;
/*
* Parse the config for pool, open and validate vdevs,
* select an uberblock, and use that uberblock to open
* the MOS.
*/
error = spa_ld_mos_init(spa, type);
if (error != 0)
return (error);
/*
* Retrieve the trusted config stored in the MOS and use it to create
* a new, exact version of the vdev tree, then reopen all vdevs.
*/
error = spa_ld_trusted_config(spa, type, B_FALSE);
if (error == EAGAIN) {
if (update_config_cache != NULL)
*update_config_cache = B_TRUE;
/*
* Redo the loading process with the trusted config if it is
* too different from the untrusted config.
*/
spa_ld_prepare_for_reload(spa);
spa_load_note(spa, "RELOADING");
error = spa_ld_mos_init(spa, type);
if (error != 0)
return (error);
error = spa_ld_trusted_config(spa, type, B_TRUE);
if (error != 0)
return (error);
} else if (error != 0) {
return (error);
}
return (0);
}
/*
* Load an existing storage pool, using the config provided. This config
* describes which vdevs are part of the pool and is later validated against
* partial configs present in each vdev's label and an entire copy of the
* config stored in the MOS.
*/
static int
spa_load_impl(spa_t *spa, spa_import_type_t type, char **ereport)
{
int error = 0;
boolean_t missing_feat_write = B_FALSE;
boolean_t checkpoint_rewind =
(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
boolean_t update_config_cache = B_FALSE;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE);
spa_load_note(spa, "LOADING");
error = spa_ld_mos_with_trusted_config(spa, type, &update_config_cache);
if (error != 0)
return (error);
/*
* If we are rewinding to the checkpoint then we need to repeat
* everything we've done so far in this function but this time
* selecting the checkpointed uberblock and using that to open
* the MOS.
*/
if (checkpoint_rewind) {
/*
* If we are rewinding to the checkpoint update config cache
* anyway.
*/
update_config_cache = B_TRUE;
/*
* Extract the checkpointed uberblock from the current MOS
* and use this as the pool's uberblock from now on. If the
* pool is imported as writeable we also write the checkpoint
* uberblock to the labels, making the rewind permanent.
*/
error = spa_ld_checkpoint_rewind(spa);
if (error != 0)
return (error);
/*
* Redo the loading process again with the
* checkpointed uberblock.
*/
spa_ld_prepare_for_reload(spa);
spa_load_note(spa, "LOADING checkpointed uberblock");
error = spa_ld_mos_with_trusted_config(spa, type, NULL);
if (error != 0)
return (error);
}
/*
* Retrieve the checkpoint txg if the pool has a checkpoint.
*/
error = spa_ld_read_checkpoint_txg(spa);
if (error != 0)
return (error);
/*
* Retrieve the mapping of indirect vdevs. Those vdevs were removed
* from the pool and their contents were re-mapped to other vdevs. Note
* that everything that we read before this step must have been
* rewritten on concrete vdevs after the last device removal was
* initiated. Otherwise we could be reading from indirect vdevs before
* we have loaded their mappings.
*/
error = spa_ld_open_indirect_vdev_metadata(spa);
if (error != 0)
return (error);
/*
* Retrieve the full list of active features from the MOS and check if
* they are all supported.
*/
error = spa_ld_check_features(spa, &missing_feat_write);
if (error != 0)
return (error);
/*
* Load several special directories from the MOS needed by the dsl_pool
* layer.
*/
error = spa_ld_load_special_directories(spa);
if (error != 0)
return (error);
/*
* Retrieve pool properties from the MOS.
*/
error = spa_ld_get_props(spa);
if (error != 0)
return (error);
/*
* Retrieve the list of auxiliary devices - cache devices and spares -
* and open them.
*/
error = spa_ld_open_aux_vdevs(spa, type);
if (error != 0)
return (error);
/*
* Load the metadata for all vdevs. Also check if unopenable devices
* should be autoreplaced.
*/
error = spa_ld_load_vdev_metadata(spa);
if (error != 0)
return (error);
error = spa_ld_load_dedup_tables(spa);
if (error != 0)
return (error);
/*
* Verify the logs now to make sure we don't have any unexpected errors
* when we claim log blocks later.
*/
error = spa_ld_verify_logs(spa, type, ereport);
if (error != 0)
return (error);
if (missing_feat_write) {
ASSERT(spa->spa_load_state == SPA_LOAD_TRYIMPORT);
/*
* At this point, we know that we can open the pool in
* read-only mode but not read-write mode. We now have enough
* information and can return to userland.
*/
return (spa_vdev_err(spa->spa_root_vdev, VDEV_AUX_UNSUP_FEAT,
ENOTSUP));
}
/*
* Traverse the last txgs to make sure the pool was left off in a safe
* state. When performing an extreme rewind, we verify the whole pool,
* which can take a very long time.
*/
error = spa_ld_verify_pool_data(spa);
if (error != 0)
return (error);
/*
* Calculate the deflated space for the pool. This must be done before
* we write anything to the pool because we'd need to update the space
* accounting using the deflated sizes.
*/
spa_update_dspace(spa);
/*
* We have now retrieved all the information we needed to open the
* pool. If we are importing the pool in read-write mode, a few
* additional steps must be performed to finish the import.
*/
if (spa_writeable(spa) && (spa->spa_load_state == SPA_LOAD_RECOVER ||
spa->spa_load_max_txg == UINT64_MAX)) {
uint64_t config_cache_txg = spa->spa_config_txg;
ASSERT(spa->spa_load_state != SPA_LOAD_TRYIMPORT);
/*
* In case of a checkpoint rewind, log the original txg
* of the checkpointed uberblock.
*/
if (checkpoint_rewind) {
spa_history_log_internal(spa, "checkpoint rewind",
NULL, "rewound state to txg=%llu",
(u_longlong_t)spa->spa_uberblock.ub_checkpoint_txg);
}
/*
* Traverse the ZIL and claim all blocks.
*/
spa_ld_claim_log_blocks(spa);
/*
* Kick-off the syncing thread.
*/
spa->spa_sync_on = B_TRUE;
txg_sync_start(spa->spa_dsl_pool);
mmp_thread_start(spa);
/*
* Wait for all claims to sync. We sync up to the highest
* claimed log block birth time so that claimed log blocks
* don't appear to be from the future. spa_claim_max_txg
* will have been set for us by ZIL traversal operations
* performed above.
*/
txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg);
/*
* Check if we need to request an update of the config. On the
* next sync, we would update the config stored in vdev labels
* and the cachefile (by default /etc/zfs/zpool.cache).
*/
spa_ld_check_for_config_update(spa, config_cache_txg,
update_config_cache);
/*
* Check if a rebuild was in progress and if so resume it.
* Then check all DTLs to see if anything needs resilvering.
* The resilver will be deferred if a rebuild was started.
*/
if (vdev_rebuild_active(spa->spa_root_vdev)) {
vdev_rebuild_restart(spa);
} else if (!dsl_scan_resilvering(spa->spa_dsl_pool) &&
vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
spa_async_request(spa, SPA_ASYNC_RESILVER);
}
/*
* Log the fact that we booted up (so that we can detect if
* we rebooted in the middle of an operation).
*/
spa_history_log_version(spa, "open", NULL);
spa_restart_removal(spa);
spa_spawn_aux_threads(spa);
/*
* Delete any inconsistent datasets.
*
* Note:
* Since we may be issuing deletes for clones here,
* we make sure to do so after we've spawned all the
* auxiliary threads above (from which the livelist
* deletion zthr is part of).
*/
(void) dmu_objset_find(spa_name(spa),
dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN);
/*
* Clean up any stale temporary dataset userrefs.
*/
dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_initialize_restart(spa->spa_root_vdev);
vdev_trim_restart(spa->spa_root_vdev);
vdev_autotrim_restart(spa);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
spa_import_progress_remove(spa_guid(spa));
spa_async_request(spa, SPA_ASYNC_L2CACHE_REBUILD);
spa_load_note(spa, "LOADED");
return (0);
}
static int
spa_load_retry(spa_t *spa, spa_load_state_t state)
{
spa_mode_t mode = spa->spa_mode;
spa_unload(spa);
spa_deactivate(spa);
spa->spa_load_max_txg = spa->spa_uberblock.ub_txg - 1;
spa_activate(spa, mode);
spa_async_suspend(spa);
spa_load_note(spa, "spa_load_retry: rewind, max txg: %llu",
(u_longlong_t)spa->spa_load_max_txg);
return (spa_load(spa, state, SPA_IMPORT_EXISTING));
}
/*
* If spa_load() fails this function will try loading prior txg's. If
* 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool
* will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this
* function will not rewind the pool and will return the same error as
* spa_load().
*/
static int
spa_load_best(spa_t *spa, spa_load_state_t state, uint64_t max_request,
int rewind_flags)
{
nvlist_t *loadinfo = NULL;
nvlist_t *config = NULL;
int load_error, rewind_error;
uint64_t safe_rewind_txg;
uint64_t min_txg;
if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) {
spa->spa_load_max_txg = spa->spa_load_txg;
spa_set_log_state(spa, SPA_LOG_CLEAR);
} else {
spa->spa_load_max_txg = max_request;
if (max_request != UINT64_MAX)
spa->spa_extreme_rewind = B_TRUE;
}
load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING);
if (load_error == 0)
return (0);
if (load_error == ZFS_ERR_NO_CHECKPOINT) {
/*
* When attempting checkpoint-rewind on a pool with no
* checkpoint, we should not attempt to load uberblocks
* from previous txgs when spa_load fails.
*/
ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
spa_import_progress_remove(spa_guid(spa));
return (load_error);
}
if (spa->spa_root_vdev != NULL)
config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg;
spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp;
if (rewind_flags & ZPOOL_NEVER_REWIND) {
nvlist_free(config);
spa_import_progress_remove(spa_guid(spa));
return (load_error);
}
if (state == SPA_LOAD_RECOVER) {
/* Price of rolling back is discarding txgs, including log */
spa_set_log_state(spa, SPA_LOG_CLEAR);
} else {
/*
* If we aren't rolling back save the load info from our first
* import attempt so that we can restore it after attempting
* to rewind.
*/
loadinfo = spa->spa_load_info;
spa->spa_load_info = fnvlist_alloc();
}
spa->spa_load_max_txg = spa->spa_last_ubsync_txg;
safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE;
min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ?
TXG_INITIAL : safe_rewind_txg;
/*
* Continue as long as we're finding errors, we're still within
* the acceptable rewind range, and we're still finding uberblocks
*/
while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg &&
spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) {
if (spa->spa_load_max_txg < safe_rewind_txg)
spa->spa_extreme_rewind = B_TRUE;
rewind_error = spa_load_retry(spa, state);
}
spa->spa_extreme_rewind = B_FALSE;
spa->spa_load_max_txg = UINT64_MAX;
if (config && (rewind_error || state != SPA_LOAD_RECOVER))
spa_config_set(spa, config);
else
nvlist_free(config);
if (state == SPA_LOAD_RECOVER) {
ASSERT3P(loadinfo, ==, NULL);
spa_import_progress_remove(spa_guid(spa));
return (rewind_error);
} else {
/* Store the rewind info as part of the initial load info */
fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO,
spa->spa_load_info);
/* Restore the initial load info */
fnvlist_free(spa->spa_load_info);
spa->spa_load_info = loadinfo;
spa_import_progress_remove(spa_guid(spa));
return (load_error);
}
}
/*
* Pool Open/Import
*
* The import case is identical to an open except that the configuration is sent
* down from userland, instead of grabbed from the configuration cache. For the
* case of an open, the pool configuration will exist in the
* POOL_STATE_UNINITIALIZED state.
*
* The stats information (gen/count/ustats) is used to gather vdev statistics at
* the same time open the pool, without having to keep around the spa_t in some
* ambiguous state.
*/
static int
spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy,
nvlist_t **config)
{
spa_t *spa;
spa_load_state_t state = SPA_LOAD_OPEN;
int error;
int locked = B_FALSE;
int firstopen = B_FALSE;
*spapp = NULL;
/*
* As disgusting as this is, we need to support recursive calls to this
* function because dsl_dir_open() is called during spa_load(), and ends
* up calling spa_open() again. The real fix is to figure out how to
* avoid dsl_dir_open() calling this in the first place.
*/
if (MUTEX_NOT_HELD(&spa_namespace_lock)) {
mutex_enter(&spa_namespace_lock);
locked = B_TRUE;
}
if ((spa = spa_lookup(pool)) == NULL) {
if (locked)
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(ENOENT));
}
if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
zpool_load_policy_t policy;
firstopen = B_TRUE;
zpool_get_load_policy(nvpolicy ? nvpolicy : spa->spa_config,
&policy);
if (policy.zlp_rewind & ZPOOL_DO_REWIND)
state = SPA_LOAD_RECOVER;
spa_activate(spa, spa_mode_global);
if (state != SPA_LOAD_RECOVER)
spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE;
zfs_dbgmsg("spa_open_common: opening %s", pool);
error = spa_load_best(spa, state, policy.zlp_txg,
policy.zlp_rewind);
if (error == EBADF) {
/*
* If vdev_validate() returns failure (indicated by
* EBADF), it indicates that one of the vdevs indicates
* that the pool has been exported or destroyed. If
* this is the case, the config cache is out of sync and
* we should remove the pool from the namespace.
*/
spa_unload(spa);
spa_deactivate(spa);
spa_write_cachefile(spa, B_TRUE, B_TRUE);
spa_remove(spa);
if (locked)
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(ENOENT));
}
if (error) {
/*
* We can't open the pool, but we still have useful
* information: the state of each vdev after the
* attempted vdev_open(). Return this to the user.
*/
if (config != NULL && spa->spa_config) {
VERIFY(nvlist_dup(spa->spa_config, config,
KM_SLEEP) == 0);
VERIFY(nvlist_add_nvlist(*config,
ZPOOL_CONFIG_LOAD_INFO,
spa->spa_load_info) == 0);
}
spa_unload(spa);
spa_deactivate(spa);
spa->spa_last_open_failed = error;
if (locked)
mutex_exit(&spa_namespace_lock);
*spapp = NULL;
return (error);
}
}
spa_open_ref(spa, tag);
if (config != NULL)
*config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
/*
* If we've recovered the pool, pass back any information we
* gathered while doing the load.
*/
if (state == SPA_LOAD_RECOVER) {
VERIFY(nvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO,
spa->spa_load_info) == 0);
}
if (locked) {
spa->spa_last_open_failed = 0;
spa->spa_last_ubsync_txg = 0;
spa->spa_load_txg = 0;
mutex_exit(&spa_namespace_lock);
}
if (firstopen)
zvol_create_minors_recursive(spa_name(spa));
*spapp = spa;
return (0);
}
int
spa_open_rewind(const char *name, spa_t **spapp, void *tag, nvlist_t *policy,
nvlist_t **config)
{
return (spa_open_common(name, spapp, tag, policy, config));
}
int
spa_open(const char *name, spa_t **spapp, void *tag)
{
return (spa_open_common(name, spapp, tag, NULL, NULL));
}
/*
* Lookup the given spa_t, incrementing the inject count in the process,
* preventing it from being exported or destroyed.
*/
spa_t *
spa_inject_addref(char *name)
{
spa_t *spa;
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(name)) == NULL) {
mutex_exit(&spa_namespace_lock);
return (NULL);
}
spa->spa_inject_ref++;
mutex_exit(&spa_namespace_lock);
return (spa);
}
void
spa_inject_delref(spa_t *spa)
{
mutex_enter(&spa_namespace_lock);
spa->spa_inject_ref--;
mutex_exit(&spa_namespace_lock);
}
/*
* Add spares device information to the nvlist.
*/
static void
spa_add_spares(spa_t *spa, nvlist_t *config)
{
nvlist_t **spares;
uint_t i, nspares;
nvlist_t *nvroot;
uint64_t guid;
vdev_stat_t *vs;
uint_t vsc;
uint64_t pool;
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
if (spa->spa_spares.sav_count == 0)
return;
VERIFY(nvlist_lookup_nvlist(config,
ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
if (nspares != 0) {
VERIFY(nvlist_add_nvlist_array(nvroot,
ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
VERIFY(nvlist_lookup_nvlist_array(nvroot,
ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
/*
* Go through and find any spares which have since been
* repurposed as an active spare. If this is the case, update
* their status appropriately.
*/
for (i = 0; i < nspares; i++) {
VERIFY(nvlist_lookup_uint64(spares[i],
ZPOOL_CONFIG_GUID, &guid) == 0);
if (spa_spare_exists(guid, &pool, NULL) &&
pool != 0ULL) {
VERIFY(nvlist_lookup_uint64_array(
spares[i], ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &vsc) == 0);
vs->vs_state = VDEV_STATE_CANT_OPEN;
vs->vs_aux = VDEV_AUX_SPARED;
}
}
}
}
/*
* Add l2cache device information to the nvlist, including vdev stats.
*/
static void
spa_add_l2cache(spa_t *spa, nvlist_t *config)
{
nvlist_t **l2cache;
uint_t i, j, nl2cache;
nvlist_t *nvroot;
uint64_t guid;
vdev_t *vd;
vdev_stat_t *vs;
uint_t vsc;
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
if (spa->spa_l2cache.sav_count == 0)
return;
VERIFY(nvlist_lookup_nvlist(config,
ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
if (nl2cache != 0) {
VERIFY(nvlist_add_nvlist_array(nvroot,
ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
VERIFY(nvlist_lookup_nvlist_array(nvroot,
ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
/*
* Update level 2 cache device stats.
*/
for (i = 0; i < nl2cache; i++) {
VERIFY(nvlist_lookup_uint64(l2cache[i],
ZPOOL_CONFIG_GUID, &guid) == 0);
vd = NULL;
for (j = 0; j < spa->spa_l2cache.sav_count; j++) {
if (guid ==
spa->spa_l2cache.sav_vdevs[j]->vdev_guid) {
vd = spa->spa_l2cache.sav_vdevs[j];
break;
}
}
ASSERT(vd != NULL);
VERIFY(nvlist_lookup_uint64_array(l2cache[i],
ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
== 0);
vdev_get_stats(vd, vs);
vdev_config_generate_stats(vd, l2cache[i]);
}
}
}
static void
spa_feature_stats_from_disk(spa_t *spa, nvlist_t *features)
{
zap_cursor_t zc;
zap_attribute_t za;
if (spa->spa_feat_for_read_obj != 0) {
for (zap_cursor_init(&zc, spa->spa_meta_objset,
spa->spa_feat_for_read_obj);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
ASSERT(za.za_integer_length == sizeof (uint64_t) &&
za.za_num_integers == 1);
VERIFY0(nvlist_add_uint64(features, za.za_name,
za.za_first_integer));
}
zap_cursor_fini(&zc);
}
if (spa->spa_feat_for_write_obj != 0) {
for (zap_cursor_init(&zc, spa->spa_meta_objset,
spa->spa_feat_for_write_obj);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
ASSERT(za.za_integer_length == sizeof (uint64_t) &&
za.za_num_integers == 1);
VERIFY0(nvlist_add_uint64(features, za.za_name,
za.za_first_integer));
}
zap_cursor_fini(&zc);
}
}
static void
spa_feature_stats_from_cache(spa_t *spa, nvlist_t *features)
{
int i;
for (i = 0; i < SPA_FEATURES; i++) {
zfeature_info_t feature = spa_feature_table[i];
uint64_t refcount;
if (feature_get_refcount(spa, &feature, &refcount) != 0)
continue;
VERIFY0(nvlist_add_uint64(features, feature.fi_guid, refcount));
}
}
/*
* Store a list of pool features and their reference counts in the
* config.
*
* The first time this is called on a spa, allocate a new nvlist, fetch
* the pool features and reference counts from disk, then save the list
* in the spa. In subsequent calls on the same spa use the saved nvlist
* and refresh its values from the cached reference counts. This
* ensures we don't block here on I/O on a suspended pool so 'zpool
* clear' can resume the pool.
*/
static void
spa_add_feature_stats(spa_t *spa, nvlist_t *config)
{
nvlist_t *features;
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
mutex_enter(&spa->spa_feat_stats_lock);
features = spa->spa_feat_stats;
if (features != NULL) {
spa_feature_stats_from_cache(spa, features);
} else {
VERIFY0(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP));
spa->spa_feat_stats = features;
spa_feature_stats_from_disk(spa, features);
}
VERIFY0(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS,
features));
mutex_exit(&spa->spa_feat_stats_lock);
}
int
spa_get_stats(const char *name, nvlist_t **config,
char *altroot, size_t buflen)
{
int error;
spa_t *spa;
*config = NULL;
error = spa_open_common(name, &spa, FTAG, NULL, config);
if (spa != NULL) {
/*
* This still leaves a window of inconsistency where the spares
* or l2cache devices could change and the config would be
* self-inconsistent.
*/
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
if (*config != NULL) {
uint64_t loadtimes[2];
loadtimes[0] = spa->spa_loaded_ts.tv_sec;
loadtimes[1] = spa->spa_loaded_ts.tv_nsec;
VERIFY(nvlist_add_uint64_array(*config,
ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2) == 0);
VERIFY(nvlist_add_uint64(*config,
ZPOOL_CONFIG_ERRCOUNT,
spa_get_errlog_size(spa)) == 0);
if (spa_suspended(spa)) {
VERIFY(nvlist_add_uint64(*config,
ZPOOL_CONFIG_SUSPENDED,
spa->spa_failmode) == 0);
VERIFY(nvlist_add_uint64(*config,
ZPOOL_CONFIG_SUSPENDED_REASON,
spa->spa_suspended) == 0);
}
spa_add_spares(spa, *config);
spa_add_l2cache(spa, *config);
spa_add_feature_stats(spa, *config);
}
}
/*
* We want to get the alternate root even for faulted pools, so we cheat
* and call spa_lookup() directly.
*/
if (altroot) {
if (spa == NULL) {
mutex_enter(&spa_namespace_lock);
spa = spa_lookup(name);
if (spa)
spa_altroot(spa, altroot, buflen);
else
altroot[0] = '\0';
spa = NULL;
mutex_exit(&spa_namespace_lock);
} else {
spa_altroot(spa, altroot, buflen);
}
}
if (spa != NULL) {
spa_config_exit(spa, SCL_CONFIG, FTAG);
spa_close(spa, FTAG);
}
return (error);
}
/*
* Validate that the auxiliary device array is well formed. We must have an
* array of nvlists, each which describes a valid leaf vdev. If this is an
* import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be
* specified, as long as they are well-formed.
*/
static int
spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
spa_aux_vdev_t *sav, const char *config, uint64_t version,
vdev_labeltype_t label)
{
nvlist_t **dev;
uint_t i, ndev;
vdev_t *vd;
int error;
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
/*
* It's acceptable to have no devs specified.
*/
if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0)
return (0);
if (ndev == 0)
return (SET_ERROR(EINVAL));
/*
* Make sure the pool is formatted with a version that supports this
* device type.
*/
if (spa_version(spa) < version)
return (SET_ERROR(ENOTSUP));
/*
* Set the pending device list so we correctly handle device in-use
* checking.
*/
sav->sav_pending = dev;
sav->sav_npending = ndev;
for (i = 0; i < ndev; i++) {
if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0,
mode)) != 0)
goto out;
if (!vd->vdev_ops->vdev_op_leaf) {
vdev_free(vd);
error = SET_ERROR(EINVAL);
goto out;
}
vd->vdev_top = vd;
if ((error = vdev_open(vd)) == 0 &&
(error = vdev_label_init(vd, crtxg, label)) == 0) {
VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID,
vd->vdev_guid) == 0);
}
vdev_free(vd);
if (error &&
(mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE))
goto out;
else
error = 0;
}
out:
sav->sav_pending = NULL;
sav->sav_npending = 0;
return (error);
}
static int
spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
{
int error;
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode,
&spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES,
VDEV_LABEL_SPARE)) != 0) {
return (error);
}
return (spa_validate_aux_devs(spa, nvroot, crtxg, mode,
&spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE,
VDEV_LABEL_L2CACHE));
}
static void
spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
const char *config)
{
int i;
if (sav->sav_config != NULL) {
nvlist_t **olddevs;
uint_t oldndevs;
nvlist_t **newdevs;
/*
* Generate new dev list by concatenating with the
* current dev list.
*/
VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config,
&olddevs, &oldndevs) == 0);
newdevs = kmem_alloc(sizeof (void *) *
(ndevs + oldndevs), KM_SLEEP);
for (i = 0; i < oldndevs; i++)
VERIFY(nvlist_dup(olddevs[i], &newdevs[i],
KM_SLEEP) == 0);
for (i = 0; i < ndevs; i++)
VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs],
KM_SLEEP) == 0);
VERIFY(nvlist_remove(sav->sav_config, config,
DATA_TYPE_NVLIST_ARRAY) == 0);
VERIFY(nvlist_add_nvlist_array(sav->sav_config,
config, newdevs, ndevs + oldndevs) == 0);
for (i = 0; i < oldndevs + ndevs; i++)
nvlist_free(newdevs[i]);
kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *));
} else {
/*
* Generate a new dev list.
*/
VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME,
KM_SLEEP) == 0);
VERIFY(nvlist_add_nvlist_array(sav->sav_config, config,
devs, ndevs) == 0);
}
}
/*
* Stop and drop level 2 ARC devices
*/
void
spa_l2cache_drop(spa_t *spa)
{
vdev_t *vd;
int i;
spa_aux_vdev_t *sav = &spa->spa_l2cache;
for (i = 0; i < sav->sav_count; i++) {
uint64_t pool;
vd = sav->sav_vdevs[i];
ASSERT(vd != NULL);
if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
pool != 0ULL && l2arc_vdev_present(vd))
l2arc_remove_vdev(vd);
}
}
/*
* Verify encryption parameters for spa creation. If we are encrypting, we must
* have the encryption feature flag enabled.
*/
static int
spa_create_check_encryption_params(dsl_crypto_params_t *dcp,
boolean_t has_encryption)
{
if (dcp->cp_crypt != ZIO_CRYPT_OFF &&
dcp->cp_crypt != ZIO_CRYPT_INHERIT &&
!has_encryption)
return (SET_ERROR(ENOTSUP));
return (dmu_objset_create_crypt_check(NULL, dcp, NULL));
}
/*
* Pool Creation
*/
int
spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
nvlist_t *zplprops, dsl_crypto_params_t *dcp)
{
spa_t *spa;
char *altroot = NULL;
vdev_t *rvd;
dsl_pool_t *dp;
dmu_tx_t *tx;
int error = 0;
uint64_t txg = TXG_INITIAL;
nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache;
uint64_t version, obj, ndraid = 0;
boolean_t has_features;
boolean_t has_encryption;
boolean_t has_allocclass;
spa_feature_t feat;
char *feat_name;
char *poolname;
nvlist_t *nvl;
if (props == NULL ||
nvlist_lookup_string(props, "tname", &poolname) != 0)
poolname = (char *)pool;
/*
* If this pool already exists, return failure.
*/
mutex_enter(&spa_namespace_lock);
if (spa_lookup(poolname) != NULL) {
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(EEXIST));
}
/*
* Allocate a new spa_t structure.
*/
nvl = fnvlist_alloc();
fnvlist_add_string(nvl, ZPOOL_CONFIG_POOL_NAME, pool);
(void) nvlist_lookup_string(props,
zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
spa = spa_add(poolname, nvl, altroot);
fnvlist_free(nvl);
spa_activate(spa, spa_mode_global);
if (props && (error = spa_prop_validate(spa, props))) {
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (error);
}
/*
* Temporary pool names should never be written to disk.
*/
if (poolname != pool)
spa->spa_import_flags |= ZFS_IMPORT_TEMP_NAME;
has_features = B_FALSE;
has_encryption = B_FALSE;
has_allocclass = B_FALSE;
for (nvpair_t *elem = nvlist_next_nvpair(props, NULL);
elem != NULL; elem = nvlist_next_nvpair(props, elem)) {
if (zpool_prop_feature(nvpair_name(elem))) {
has_features = B_TRUE;
feat_name = strchr(nvpair_name(elem), '@') + 1;
VERIFY0(zfeature_lookup_name(feat_name, &feat));
if (feat == SPA_FEATURE_ENCRYPTION)
has_encryption = B_TRUE;
if (feat == SPA_FEATURE_ALLOCATION_CLASSES)
has_allocclass = B_TRUE;
}
}
/* verify encryption params, if they were provided */
if (dcp != NULL) {
error = spa_create_check_encryption_params(dcp, has_encryption);
if (error != 0) {
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (error);
}
}
if (!has_allocclass && zfs_special_devs(nvroot, NULL)) {
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (ENOTSUP);
}
if (has_features || nvlist_lookup_uint64(props,
zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) {
version = SPA_VERSION;
}
ASSERT(SPA_VERSION_IS_SUPPORTED(version));
spa->spa_first_txg = txg;
spa->spa_uberblock.ub_txg = txg - 1;
spa->spa_uberblock.ub_version = version;
spa->spa_ubsync = spa->spa_uberblock;
spa->spa_load_state = SPA_LOAD_CREATE;
spa->spa_removing_phys.sr_state = DSS_NONE;
spa->spa_removing_phys.sr_removing_vdev = -1;
spa->spa_removing_phys.sr_prev_indirect_vdev = -1;
spa->spa_indirect_vdevs_loaded = B_TRUE;
/*
* Create "The Godfather" zio to hold all async IOs
*/
spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
KM_SLEEP);
for (int i = 0; i < max_ncpus; i++) {
spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_GODFATHER);
}
/*
* Create the root vdev.
*/
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
ASSERT(error != 0 || rvd != NULL);
ASSERT(error != 0 || spa->spa_root_vdev == rvd);
if (error == 0 && !zfs_allocatable_devs(nvroot))
error = SET_ERROR(EINVAL);
if (error == 0 &&
(error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
(error = vdev_draid_spare_create(nvroot, rvd, &ndraid, 0)) == 0 &&
(error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) == 0) {
/*
* instantiate the metaslab groups (this will dirty the vdevs)
* we can no longer error exit past this point
*/
for (int c = 0; error == 0 && c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
vdev_metaslab_set_size(vd);
vdev_expand(vd, txg);
}
}
spa_config_exit(spa, SCL_ALL, FTAG);
if (error != 0) {
spa_unload(spa);
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (error);
}
/*
* Get the list of spares, if specified.
*/
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) == 0) {
VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME,
KM_SLEEP) == 0);
VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_spares(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
spa->spa_spares.sav_sync = B_TRUE;
}
/*
* Get the list of level 2 cache devices, if specified.
*/
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2cache, &nl2cache) == 0) {
VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_l2cache(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
spa->spa_l2cache.sav_sync = B_TRUE;
}
spa->spa_is_initializing = B_TRUE;
spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, dcp, txg);
spa->spa_is_initializing = B_FALSE;
/*
* Create DDTs (dedup tables).
*/
ddt_create(spa);
spa_update_dspace(spa);
tx = dmu_tx_create_assigned(dp, txg);
/*
* Create the pool's history object.
*/
if (version >= SPA_VERSION_ZPOOL_HISTORY && !spa->spa_history)
spa_history_create_obj(spa, tx);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_CREATE);
spa_history_log_version(spa, "create", tx);
/*
* Create the pool config object.
*/
spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE,
DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
if (zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
cmn_err(CE_PANIC, "failed to add pool config");
}
if (zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION,
sizeof (uint64_t), 1, &version, tx) != 0) {
cmn_err(CE_PANIC, "failed to add pool version");
}
/* Newly created pools with the right version are always deflated. */
if (version >= SPA_VERSION_RAIDZ_DEFLATE) {
spa->spa_deflate = TRUE;
if (zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
cmn_err(CE_PANIC, "failed to add deflate");
}
}
/*
* Create the deferred-free bpobj. Turn off compression
* because sync-to-convergence takes longer if the blocksize
* keeps changing.
*/
obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx);
dmu_object_set_compress(spa->spa_meta_objset, obj,
ZIO_COMPRESS_OFF, tx);
if (zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ,
sizeof (uint64_t), 1, &obj, tx) != 0) {
cmn_err(CE_PANIC, "failed to add bpobj");
}
VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj,
spa->spa_meta_objset, obj));
/*
* Generate some random noise for salted checksums to operate on.
*/
(void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
sizeof (spa->spa_cksum_salt.zcs_bytes));
/*
* Set pool properties.
*/
spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND);
spa->spa_multihost = zpool_prop_default_numeric(ZPOOL_PROP_MULTIHOST);
spa->spa_autotrim = zpool_prop_default_numeric(ZPOOL_PROP_AUTOTRIM);
if (props != NULL) {
spa_configfile_set(spa, props, B_FALSE);
spa_sync_props(props, tx);
}
for (int i = 0; i < ndraid; i++)
spa_feature_incr(spa, SPA_FEATURE_DRAID, tx);
dmu_tx_commit(tx);
spa->spa_sync_on = B_TRUE;
txg_sync_start(dp);
mmp_thread_start(spa);
txg_wait_synced(dp, txg);
spa_spawn_aux_threads(spa);
spa_write_cachefile(spa, B_FALSE, B_TRUE);
/*
* Don't count references from objsets that are already closed
* and are making their way through the eviction process.
*/
spa_evicting_os_wait(spa);
spa->spa_minref = zfs_refcount_count(&spa->spa_refcount);
spa->spa_load_state = SPA_LOAD_NONE;
mutex_exit(&spa_namespace_lock);
return (0);
}
/*
* Import a non-root pool into the system.
*/
int
spa_import(char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
{
spa_t *spa;
char *altroot = NULL;
spa_load_state_t state = SPA_LOAD_IMPORT;
zpool_load_policy_t policy;
spa_mode_t mode = spa_mode_global;
uint64_t readonly = B_FALSE;
int error;
nvlist_t *nvroot;
nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache;
/*
* If a pool with this name exists, return failure.
*/
mutex_enter(&spa_namespace_lock);
if (spa_lookup(pool) != NULL) {
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(EEXIST));
}
/*
* Create and initialize the spa structure.
*/
(void) nvlist_lookup_string(props,
zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
(void) nvlist_lookup_uint64(props,
zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
if (readonly)
mode = SPA_MODE_READ;
spa = spa_add(pool, config, altroot);
spa->spa_import_flags = flags;
/*
* Verbatim import - Take a pool and insert it into the namespace
* as if it had been loaded at boot.
*/
if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) {
if (props != NULL)
spa_configfile_set(spa, props, B_FALSE);
spa_write_cachefile(spa, B_FALSE, B_TRUE);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT);
zfs_dbgmsg("spa_import: verbatim import of %s", pool);
mutex_exit(&spa_namespace_lock);
return (0);
}
spa_activate(spa, mode);
/*
* Don't start async tasks until we know everything is healthy.
*/
spa_async_suspend(spa);
zpool_get_load_policy(config, &policy);
if (policy.zlp_rewind & ZPOOL_DO_REWIND)
state = SPA_LOAD_RECOVER;
spa->spa_config_source = SPA_CONFIG_SRC_TRYIMPORT;
if (state != SPA_LOAD_RECOVER) {
spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
zfs_dbgmsg("spa_import: importing %s", pool);
} else {
zfs_dbgmsg("spa_import: importing %s, max_txg=%lld "
"(RECOVERY MODE)", pool, (longlong_t)policy.zlp_txg);
}
error = spa_load_best(spa, state, policy.zlp_txg, policy.zlp_rewind);
/*
* Propagate anything learned while loading the pool and pass it
* back to caller (i.e. rewind info, missing devices, etc).
*/
VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
spa->spa_load_info) == 0);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
/*
* Toss any existing sparelist, as it doesn't have any validity
* anymore, and conflicts with spa_has_spare().
*/
if (spa->spa_spares.sav_config) {
nvlist_free(spa->spa_spares.sav_config);
spa->spa_spares.sav_config = NULL;
spa_load_spares(spa);
}
if (spa->spa_l2cache.sav_config) {
nvlist_free(spa->spa_l2cache.sav_config);
spa->spa_l2cache.sav_config = NULL;
spa_load_l2cache(spa);
}
VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
spa_config_exit(spa, SCL_ALL, FTAG);
if (props != NULL)
spa_configfile_set(spa, props, B_FALSE);
if (error != 0 || (props && spa_writeable(spa) &&
(error = spa_prop_set(spa, props)))) {
spa_unload(spa);
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (error);
}
spa_async_resume(spa);
/*
* Override any spares and level 2 cache devices as specified by
* the user, as these may have correct device names/devids, etc.
*/
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) == 0) {
if (spa->spa_spares.sav_config)
VERIFY(nvlist_remove(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
else
VERIFY(nvlist_alloc(&spa->spa_spares.sav_config,
NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_spares(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
spa->spa_spares.sav_sync = B_TRUE;
}
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2cache, &nl2cache) == 0) {
if (spa->spa_l2cache.sav_config)
VERIFY(nvlist_remove(spa->spa_l2cache.sav_config,
ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0);
else
VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_l2cache(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
spa->spa_l2cache.sav_sync = B_TRUE;
}
/*
* Check for any removed devices.
*/
if (spa->spa_autoreplace) {
spa_aux_check_removed(&spa->spa_spares);
spa_aux_check_removed(&spa->spa_l2cache);
}
if (spa_writeable(spa)) {
/*
* Update the config cache to include the newly-imported pool.
*/
spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
}
/*
* It's possible that the pool was expanded while it was exported.
* We kick off an async task to handle this for us.
*/
spa_async_request(spa, SPA_ASYNC_AUTOEXPAND);
spa_history_log_version(spa, "import", NULL);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT);
mutex_exit(&spa_namespace_lock);
zvol_create_minors_recursive(pool);
return (0);
}
nvlist_t *
spa_tryimport(nvlist_t *tryconfig)
{
nvlist_t *config = NULL;
char *poolname, *cachefile;
spa_t *spa;
uint64_t state;
int error;
zpool_load_policy_t policy;
if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
return (NULL);
if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
return (NULL);
/*
* Create and initialize the spa structure.
*/
mutex_enter(&spa_namespace_lock);
spa = spa_add(TRYIMPORT_NAME, tryconfig, NULL);
spa_activate(spa, SPA_MODE_READ);
/*
* Rewind pool if a max txg was provided.
*/
zpool_get_load_policy(spa->spa_config, &policy);
if (policy.zlp_txg != UINT64_MAX) {
spa->spa_load_max_txg = policy.zlp_txg;
spa->spa_extreme_rewind = B_TRUE;
zfs_dbgmsg("spa_tryimport: importing %s, max_txg=%lld",
poolname, (longlong_t)policy.zlp_txg);
} else {
zfs_dbgmsg("spa_tryimport: importing %s", poolname);
}
if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_CACHEFILE, &cachefile)
== 0) {
zfs_dbgmsg("spa_tryimport: using cachefile '%s'", cachefile);
spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE;
} else {
spa->spa_config_source = SPA_CONFIG_SRC_SCAN;
}
error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING);
/*
* If 'tryconfig' was at least parsable, return the current config.
*/
if (spa->spa_root_vdev != NULL) {
config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME,
poolname) == 0);
VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
state) == 0);
VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
spa->spa_uberblock.ub_timestamp) == 0);
VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
spa->spa_load_info) == 0);
VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_ERRATA,
spa->spa_errata) == 0);
/*
* If the bootfs property exists on this pool then we
* copy it out so that external consumers can tell which
* pools are bootable.
*/
if ((!error || error == EEXIST) && spa->spa_bootfs) {
char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
/*
* We have to play games with the name since the
* pool was opened as TRYIMPORT_NAME.
*/
if (dsl_dsobj_to_dsname(spa_name(spa),
spa->spa_bootfs, tmpname) == 0) {
char *cp;
char *dsname;
dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
cp = strchr(tmpname, '/');
if (cp == NULL) {
(void) strlcpy(dsname, tmpname,
MAXPATHLEN);
} else {
(void) snprintf(dsname, MAXPATHLEN,
"%s/%s", poolname, ++cp);
}
VERIFY(nvlist_add_string(config,
ZPOOL_CONFIG_BOOTFS, dsname) == 0);
kmem_free(dsname, MAXPATHLEN);
}
kmem_free(tmpname, MAXPATHLEN);
}
/*
* Add the list of hot spares and level 2 cache devices.
*/
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
spa_add_spares(spa, config);
spa_add_l2cache(spa, config);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
spa_unload(spa);
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (config);
}
/*
* Pool export/destroy
*
* The act of destroying or exporting a pool is very simple. We make sure there
* is no more pending I/O and any references to the pool are gone. Then, we
* update the pool state and sync all the labels to disk, removing the
* configuration from the cache afterwards. If the 'hardforce' flag is set, then
* we don't sync the labels or remove the configuration cache.
*/
static int
spa_export_common(const char *pool, int new_state, nvlist_t **oldconfig,
boolean_t force, boolean_t hardforce)
{
int error;
spa_t *spa;
if (oldconfig)
*oldconfig = NULL;
if (!(spa_mode_global & SPA_MODE_WRITE))
return (SET_ERROR(EROFS));
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(pool)) == NULL) {
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(ENOENT));
}
if (spa->spa_is_exporting) {
/* the pool is being exported by another thread */
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(ZFS_ERR_EXPORT_IN_PROGRESS));
}
spa->spa_is_exporting = B_TRUE;
/*
* Put a hold on the pool, drop the namespace lock, stop async tasks,
* reacquire the namespace lock, and see if we can export.
*/
spa_open_ref(spa, FTAG);
mutex_exit(&spa_namespace_lock);
spa_async_suspend(spa);
if (spa->spa_zvol_taskq) {
zvol_remove_minors(spa, spa_name(spa), B_TRUE);
taskq_wait(spa->spa_zvol_taskq);
}
mutex_enter(&spa_namespace_lock);
spa_close(spa, FTAG);
if (spa->spa_state == POOL_STATE_UNINITIALIZED)
goto export_spa;
/*
* The pool will be in core if it's openable, in which case we can
* modify its state. Objsets may be open only because they're dirty,
* so we have to force it to sync before checking spa_refcnt.
*/
if (spa->spa_sync_on) {
txg_wait_synced(spa->spa_dsl_pool, 0);
spa_evicting_os_wait(spa);
}
/*
* A pool cannot be exported or destroyed if there are active
* references. If we are resetting a pool, allow references by
* fault injection handlers.
*/
if (!spa_refcount_zero(spa) || (spa->spa_inject_ref != 0)) {
error = SET_ERROR(EBUSY);
goto fail;
}
if (spa->spa_sync_on) {
/*
* A pool cannot be exported if it has an active shared spare.
* This is to prevent other pools stealing the active spare
* from an exported pool. At user's own will, such pool can
* be forcedly exported.
*/
if (!force && new_state == POOL_STATE_EXPORTED &&
spa_has_active_shared_spare(spa)) {
error = SET_ERROR(EXDEV);
goto fail;
}
/*
* We're about to export or destroy this pool. Make sure
* we stop all initialization and trim activity here before
* we set the spa_final_txg. This will ensure that all
* dirty data resulting from the initialization is
* committed to disk before we unload the pool.
*/
if (spa->spa_root_vdev != NULL) {
vdev_t *rvd = spa->spa_root_vdev;
vdev_initialize_stop_all(rvd, VDEV_INITIALIZE_ACTIVE);
vdev_trim_stop_all(rvd, VDEV_TRIM_ACTIVE);
vdev_autotrim_stop_all(spa);
vdev_rebuild_stop_all(spa);
}
/*
* We want this to be reflected on every label,
* so mark them all dirty. spa_unload() will do the
* final sync that pushes these changes out.
*/
if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa->spa_state = new_state;
spa->spa_final_txg = spa_last_synced_txg(spa) +
TXG_DEFER_SIZE + 1;
vdev_config_dirty(spa->spa_root_vdev);
spa_config_exit(spa, SCL_ALL, FTAG);
}
}
export_spa:
if (new_state == POOL_STATE_DESTROYED)
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_DESTROY);
else if (new_state == POOL_STATE_EXPORTED)
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_EXPORT);
if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
spa_unload(spa);
spa_deactivate(spa);
}
if (oldconfig && spa->spa_config)
VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0);
if (new_state != POOL_STATE_UNINITIALIZED) {
if (!hardforce)
spa_write_cachefile(spa, B_TRUE, B_TRUE);
spa_remove(spa);
} else {
/*
* If spa_remove() is not called for this spa_t and
* there is any possibility that it can be reused,
* we make sure to reset the exporting flag.
*/
spa->spa_is_exporting = B_FALSE;
}
mutex_exit(&spa_namespace_lock);
return (0);
fail:
spa->spa_is_exporting = B_FALSE;
spa_async_resume(spa);
mutex_exit(&spa_namespace_lock);
return (error);
}
/*
* Destroy a storage pool.
*/
int
spa_destroy(const char *pool)
{
return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL,
B_FALSE, B_FALSE));
}
/*
* Export a storage pool.
*/
int
spa_export(const char *pool, nvlist_t **oldconfig, boolean_t force,
boolean_t hardforce)
{
return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig,
force, hardforce));
}
/*
* Similar to spa_export(), this unloads the spa_t without actually removing it
* from the namespace in any way.
*/
int
spa_reset(const char *pool)
{
return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL,
B_FALSE, B_FALSE));
}
/*
* ==========================================================================
* Device manipulation
* ==========================================================================
*/
/*
* This is called as a synctask to increment the draid feature flag
*/
static void
spa_draid_feature_incr(void *arg, dmu_tx_t *tx)
{
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
int draid = (int)(uintptr_t)arg;
for (int c = 0; c < draid; c++)
spa_feature_incr(spa, SPA_FEATURE_DRAID, tx);
}
/*
* Add a device to a storage pool.
*/
int
spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
{
uint64_t txg, ndraid = 0;
int error;
vdev_t *rvd = spa->spa_root_vdev;
vdev_t *vd, *tvd;
nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache;
ASSERT(spa_writeable(spa));
txg = spa_vdev_enter(spa);
if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
VDEV_ALLOC_ADD)) != 0)
return (spa_vdev_exit(spa, NULL, txg, error));
spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares,
&nspares) != 0)
nspares = 0;
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache,
&nl2cache) != 0)
nl2cache = 0;
if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0)
return (spa_vdev_exit(spa, vd, txg, EINVAL));
if (vd->vdev_children != 0 &&
(error = vdev_create(vd, txg, B_FALSE)) != 0) {
return (spa_vdev_exit(spa, vd, txg, error));
}
/*
* The virtual dRAID spares must be added after vdev tree is created
* and the vdev guids are generated. The guid of their associated
* dRAID is stored in the config and used when opening the spare.
*/
if ((error = vdev_draid_spare_create(nvroot, vd, &ndraid,
rvd->vdev_children)) == 0) {
if (ndraid > 0 && nvlist_lookup_nvlist_array(nvroot,
ZPOOL_CONFIG_SPARES, &spares, &nspares) != 0)
nspares = 0;
} else {
return (spa_vdev_exit(spa, vd, txg, error));
}
/*
* We must validate the spares and l2cache devices after checking the
* children. Otherwise, vdev_inuse() will blindly overwrite the spare.
*/
if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0)
return (spa_vdev_exit(spa, vd, txg, error));
/*
* If we are in the middle of a device removal, we can only add
* devices which match the existing devices in the pool.
* If we are in the middle of a removal, or have some indirect
* vdevs, we can not add raidz or dRAID top levels.
*/
if (spa->spa_vdev_removal != NULL ||
spa->spa_removing_phys.sr_prev_indirect_vdev != -1) {
for (int c = 0; c < vd->vdev_children; c++) {
tvd = vd->vdev_child[c];
if (spa->spa_vdev_removal != NULL &&
tvd->vdev_ashift != spa->spa_max_ashift) {
return (spa_vdev_exit(spa, vd, txg, EINVAL));
}
/* Fail if top level vdev is raidz or a dRAID */
if (vdev_get_nparity(tvd) != 0)
return (spa_vdev_exit(spa, vd, txg, EINVAL));
/*
* Need the top level mirror to be
* a mirror of leaf vdevs only
*/
if (tvd->vdev_ops == &vdev_mirror_ops) {
for (uint64_t cid = 0;
cid < tvd->vdev_children; cid++) {
vdev_t *cvd = tvd->vdev_child[cid];
if (!cvd->vdev_ops->vdev_op_leaf) {
return (spa_vdev_exit(spa, vd,
txg, EINVAL));
}
}
}
}
}
for (int c = 0; c < vd->vdev_children; c++) {
tvd = vd->vdev_child[c];
vdev_remove_child(vd, tvd);
tvd->vdev_id = rvd->vdev_children;
vdev_add_child(rvd, tvd);
vdev_config_dirty(tvd);
}
if (nspares != 0) {
spa_set_aux_vdevs(&spa->spa_spares, spares, nspares,
ZPOOL_CONFIG_SPARES);
spa_load_spares(spa);
spa->spa_spares.sav_sync = B_TRUE;
}
if (nl2cache != 0) {
spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache,
ZPOOL_CONFIG_L2CACHE);
spa_load_l2cache(spa);
spa->spa_l2cache.sav_sync = B_TRUE;
}
/*
* We can't increment a feature while holding spa_vdev so we
* have to do it in a synctask.
*/
if (ndraid != 0) {
dmu_tx_t *tx;
tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
dsl_sync_task_nowait(spa->spa_dsl_pool, spa_draid_feature_incr,
(void *)(uintptr_t)ndraid, tx);
dmu_tx_commit(tx);
}
/*
* We have to be careful when adding new vdevs to an existing pool.
* If other threads start allocating from these vdevs before we
* sync the config cache, and we lose power, then upon reboot we may
* fail to open the pool because there are DVAs that the config cache
* can't translate. Therefore, we first add the vdevs without
* initializing metaslabs; sync the config cache (via spa_vdev_exit());
* and then let spa_config_update() initialize the new metaslabs.
*
* spa_load() checks for added-but-not-initialized vdevs, so that
* if we lose power at any point in this sequence, the remaining
* steps will be completed the next time we load the pool.
*/
(void) spa_vdev_exit(spa, vd, txg, 0);
mutex_enter(&spa_namespace_lock);
spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_VDEV_ADD);
mutex_exit(&spa_namespace_lock);
return (0);
}
/*
* Attach a device to a mirror. The arguments are the path to any device
* in the mirror, and the nvroot for the new device. If the path specifies
* a device that is not mirrored, we automatically insert the mirror vdev.
*
* If 'replacing' is specified, the new device is intended to replace the
* existing device; in this case the two devices are made into their own
* mirror using the 'replacing' vdev, which is functionally identical to
* the mirror vdev (it actually reuses all the same ops) but has a few
* extra rules: you can't attach to it after it's been created, and upon
* completion of resilvering, the first disk (the one being replaced)
* is automatically detached.
*
* If 'rebuild' is specified, then sequential reconstruction (a.ka. rebuild)
* should be performed instead of traditional healing reconstruction. From
* an administrators perspective these are both resilver operations.
*/
int
spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing,
int rebuild)
{
uint64_t txg, dtl_max_txg;
vdev_t *rvd = spa->spa_root_vdev;
vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
vdev_ops_t *pvops;
char *oldvdpath, *newvdpath;
int newvd_isspare;
int error;
ASSERT(spa_writeable(spa));
txg = spa_vdev_enter(spa);
oldvd = spa_lookup_by_guid(spa, guid, B_FALSE);
ASSERT(MUTEX_HELD(&spa_namespace_lock));
if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
error = (spa_has_checkpoint(spa)) ?
ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
return (spa_vdev_exit(spa, NULL, txg, error));
}
if (rebuild) {
if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REBUILD))
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
if (dsl_scan_resilvering(spa_get_dsl(spa)))
return (spa_vdev_exit(spa, NULL, txg,
ZFS_ERR_RESILVER_IN_PROGRESS));
} else {
if (vdev_rebuild_active(rvd))
return (spa_vdev_exit(spa, NULL, txg,
ZFS_ERR_REBUILD_IN_PROGRESS));
}
if (spa->spa_vdev_removal != NULL)
return (spa_vdev_exit(spa, NULL, txg, EBUSY));
if (oldvd == NULL)
return (spa_vdev_exit(spa, NULL, txg, ENODEV));
if (!oldvd->vdev_ops->vdev_op_leaf)
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
pvd = oldvd->vdev_parent;
if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
VDEV_ALLOC_ATTACH)) != 0)
return (spa_vdev_exit(spa, NULL, txg, EINVAL));
if (newrootvd->vdev_children != 1)
return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
newvd = newrootvd->vdev_child[0];
if (!newvd->vdev_ops->vdev_op_leaf)
return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
return (spa_vdev_exit(spa, newrootvd, txg, error));
/*
* Spares can't replace logs
*/
if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare)
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
/*
* A dRAID spare can only replace a child of its parent dRAID vdev.
*/
if (newvd->vdev_ops == &vdev_draid_spare_ops &&
oldvd->vdev_top != vdev_draid_spare_get_parent(newvd)) {
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
}
if (rebuild) {
/*
* For rebuilds, the top vdev must support reconstruction
* using only space maps. This means the only allowable
* vdevs types are the root vdev, a mirror, or dRAID.
*/
tvd = pvd;
if (pvd->vdev_top != NULL)
tvd = pvd->vdev_top;
if (tvd->vdev_ops != &vdev_mirror_ops &&
tvd->vdev_ops != &vdev_root_ops &&
tvd->vdev_ops != &vdev_draid_ops) {
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
}
}
if (!replacing) {
/*
* For attach, the only allowable parent is a mirror or the root
* vdev.
*/
if (pvd->vdev_ops != &vdev_mirror_ops &&
pvd->vdev_ops != &vdev_root_ops)
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
pvops = &vdev_mirror_ops;
} else {
/*
* Active hot spares can only be replaced by inactive hot
* spares.
*/
if (pvd->vdev_ops == &vdev_spare_ops &&
oldvd->vdev_isspare &&
!spa_has_spare(spa, newvd->vdev_guid))
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
/*
* If the source is a hot spare, and the parent isn't already a
* spare, then we want to create a new hot spare. Otherwise, we
* want to create a replacing vdev. The user is not allowed to
* attach to a spared vdev child unless the 'isspare' state is
* the same (spare replaces spare, non-spare replaces
* non-spare).
*/
if (pvd->vdev_ops == &vdev_replacing_ops &&
spa_version(spa) < SPA_VERSION_MULTI_REPLACE) {
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
} else if (pvd->vdev_ops == &vdev_spare_ops &&
newvd->vdev_isspare != oldvd->vdev_isspare) {
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
}
if (newvd->vdev_isspare)
pvops = &vdev_spare_ops;
else
pvops = &vdev_replacing_ops;
}
/*
* Make sure the new device is big enough.
*/
if (newvd->vdev_asize < vdev_get_min_asize(oldvd))
return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
/*
* The new device cannot have a higher alignment requirement
* than the top-level vdev.
*/
if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift)
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
/*
* If this is an in-place replacement, update oldvd's path and devid
* to make it distinguishable from newvd, and unopenable from now on.
*/
if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) {
spa_strfree(oldvd->vdev_path);
oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
KM_SLEEP);
(void) snprintf(oldvd->vdev_path, strlen(newvd->vdev_path) + 5,
"%s/%s", newvd->vdev_path, "old");
if (oldvd->vdev_devid != NULL) {
spa_strfree(oldvd->vdev_devid);
oldvd->vdev_devid = NULL;
}
}
/*
* If the parent is not a mirror, or if we're replacing, insert the new
* mirror/replacing/spare vdev above oldvd.
*/
if (pvd->vdev_ops != pvops)
pvd = vdev_add_parent(oldvd, pvops);
ASSERT(pvd->vdev_top->vdev_parent == rvd);
ASSERT(pvd->vdev_ops == pvops);
ASSERT(oldvd->vdev_parent == pvd);
/*
* Extract the new device from its root and add it to pvd.
*/
vdev_remove_child(newrootvd, newvd);
newvd->vdev_id = pvd->vdev_children;
newvd->vdev_crtxg = oldvd->vdev_crtxg;
vdev_add_child(pvd, newvd);
/*
* Reevaluate the parent vdev state.
*/
vdev_propagate_state(pvd);
tvd = newvd->vdev_top;
ASSERT(pvd->vdev_top == tvd);
ASSERT(tvd->vdev_parent == rvd);
vdev_config_dirty(tvd);
/*
* Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account
* for any dmu_sync-ed blocks. It will propagate upward when
* spa_vdev_exit() calls vdev_dtl_reassess().
*/
dtl_max_txg = txg + TXG_CONCURRENT_STATES;
vdev_dtl_dirty(newvd, DTL_MISSING,
TXG_INITIAL, dtl_max_txg - TXG_INITIAL);
if (newvd->vdev_isspare) {
spa_spare_activate(newvd);
spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_SPARE);
}
oldvdpath = spa_strdup(oldvd->vdev_path);
newvdpath = spa_strdup(newvd->vdev_path);
newvd_isspare = newvd->vdev_isspare;
/*
* Mark newvd's DTL dirty in this txg.
*/
vdev_dirty(tvd, VDD_DTL, newvd, txg);
/*
* Schedule the resilver or rebuild to restart in the future. We do
* this to ensure that dmu_sync-ed blocks have been stitched into the
* respective datasets.
*/
if (rebuild) {
newvd->vdev_rebuild_txg = txg;
vdev_rebuild(tvd);
} else {
newvd->vdev_resilver_txg = txg;
if (dsl_scan_resilvering(spa_get_dsl(spa)) &&
spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER)) {
vdev_defer_resilver(newvd);
} else {
dsl_scan_restart_resilver(spa->spa_dsl_pool,
dtl_max_txg);
}
}
if (spa->spa_bootfs)
spa_event_notify(spa, newvd, NULL, ESC_ZFS_BOOTFS_VDEV_ATTACH);
spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_ATTACH);
/*
* Commit the config
*/
(void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0);
spa_history_log_internal(spa, "vdev attach", NULL,
"%s vdev=%s %s vdev=%s",
replacing && newvd_isspare ? "spare in" :
replacing ? "replace" : "attach", newvdpath,
replacing ? "for" : "to", oldvdpath);
spa_strfree(oldvdpath);
spa_strfree(newvdpath);
return (0);
}
/*
* Detach a device from a mirror or replacing vdev.
*
* If 'replace_done' is specified, only detach if the parent
* is a replacing vdev.
*/
int
spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
{
uint64_t txg;
int error;
vdev_t *rvd __maybe_unused = spa->spa_root_vdev;
vdev_t *vd, *pvd, *cvd, *tvd;
boolean_t unspare = B_FALSE;
uint64_t unspare_guid = 0;
char *vdpath;
ASSERT(spa_writeable(spa));
txg = spa_vdev_detach_enter(spa, guid);
vd = spa_lookup_by_guid(spa, guid, B_FALSE);
/*
* Besides being called directly from the userland through the
* ioctl interface, spa_vdev_detach() can be potentially called
* at the end of spa_vdev_resilver_done().
*
* In the regular case, when we have a checkpoint this shouldn't
* happen as we never empty the DTLs of a vdev during the scrub
* [see comment in dsl_scan_done()]. Thus spa_vdev_resilvering_done()
* should never get here when we have a checkpoint.
*
* That said, even in a case when we checkpoint the pool exactly
* as spa_vdev_resilver_done() calls this function everything
* should be fine as the resilver will return right away.
*/
ASSERT(MUTEX_HELD(&spa_namespace_lock));
if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
error = (spa_has_checkpoint(spa)) ?
ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
return (spa_vdev_exit(spa, NULL, txg, error));
}
if (vd == NULL)
return (spa_vdev_exit(spa, NULL, txg, ENODEV));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
pvd = vd->vdev_parent;
/*
* If the parent/child relationship is not as expected, don't do it.
* Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing
* vdev that's replacing B with C. The user's intent in replacing
* is to go from M(A,B) to M(A,C). If the user decides to cancel
* the replace by detaching C, the expected behavior is to end up
* M(A,B). But suppose that right after deciding to detach C,
* the replacement of B completes. We would have M(A,C), and then
* ask to detach C, which would leave us with just A -- not what
* the user wanted. To prevent this, we make sure that the
* parent/child relationship hasn't changed -- in this example,
* that C's parent is still the replacing vdev R.
*/
if (pvd->vdev_guid != pguid && pguid != 0)
return (spa_vdev_exit(spa, NULL, txg, EBUSY));
/*
* Only 'replacing' or 'spare' vdevs can be replaced.
*/
if (replace_done && pvd->vdev_ops != &vdev_replacing_ops &&
pvd->vdev_ops != &vdev_spare_ops)
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
spa_version(spa) >= SPA_VERSION_SPARES);
/*
* Only mirror, replacing, and spare vdevs support detach.
*/
if (pvd->vdev_ops != &vdev_replacing_ops &&
pvd->vdev_ops != &vdev_mirror_ops &&
pvd->vdev_ops != &vdev_spare_ops)
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
/*
* If this device has the only valid copy of some data,
* we cannot safely detach it.
*/
if (vdev_dtl_required(vd))
return (spa_vdev_exit(spa, NULL, txg, EBUSY));
ASSERT(pvd->vdev_children >= 2);
/*
* If we are detaching the second disk from a replacing vdev, then
* check to see if we changed the original vdev's path to have "/old"
* at the end in spa_vdev_attach(). If so, undo that change now.
*/
if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 &&
vd->vdev_path != NULL) {
size_t len = strlen(vd->vdev_path);
for (int c = 0; c < pvd->vdev_children; c++) {
cvd = pvd->vdev_child[c];
if (cvd == vd || cvd->vdev_path == NULL)
continue;
if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 &&
strcmp(cvd->vdev_path + len, "/old") == 0) {
spa_strfree(cvd->vdev_path);
cvd->vdev_path = spa_strdup(vd->vdev_path);
break;
}
}
}
/*
* If we are detaching the original disk from a normal spare, then it
* implies that the spare should become a real disk, and be removed
* from the active spare list for the pool. dRAID spares on the
* other hand are coupled to the pool and thus should never be removed
* from the spares list.
*/
if (pvd->vdev_ops == &vdev_spare_ops && vd->vdev_id == 0) {
vdev_t *last_cvd = pvd->vdev_child[pvd->vdev_children - 1];
if (last_cvd->vdev_isspare &&
last_cvd->vdev_ops != &vdev_draid_spare_ops) {
unspare = B_TRUE;
}
}
/*
* Erase the disk labels so the disk can be used for other things.
* This must be done after all other error cases are handled,
* but before we disembowel vd (so we can still do I/O to it).
* But if we can't do it, don't treat the error as fatal --
* it may be that the unwritability of the disk is the reason
* it's being detached!
*/
error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
/*
* Remove vd from its parent and compact the parent's children.
*/
vdev_remove_child(pvd, vd);
vdev_compact_children(pvd);
/*
* Remember one of the remaining children so we can get tvd below.
*/
cvd = pvd->vdev_child[pvd->vdev_children - 1];
/*
* If we need to remove the remaining child from the list of hot spares,
* do it now, marking the vdev as no longer a spare in the process.
* We must do this before vdev_remove_parent(), because that can
* change the GUID if it creates a new toplevel GUID. For a similar
* reason, we must remove the spare now, in the same txg as the detach;
* otherwise someone could attach a new sibling, change the GUID, and
* the subsequent attempt to spa_vdev_remove(unspare_guid) would fail.
*/
if (unspare) {
ASSERT(cvd->vdev_isspare);
spa_spare_remove(cvd);
unspare_guid = cvd->vdev_guid;
(void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
cvd->vdev_unspare = B_TRUE;
}
/*
* If the parent mirror/replacing vdev only has one child,
* the parent is no longer needed. Remove it from the tree.
*/
if (pvd->vdev_children == 1) {
if (pvd->vdev_ops == &vdev_spare_ops)
cvd->vdev_unspare = B_FALSE;
vdev_remove_parent(cvd);
}
/*
* We don't set tvd until now because the parent we just removed
* may have been the previous top-level vdev.
*/
tvd = cvd->vdev_top;
ASSERT(tvd->vdev_parent == rvd);
/*
* Reevaluate the parent vdev state.
*/
vdev_propagate_state(cvd);
/*
* If the 'autoexpand' property is set on the pool then automatically
* try to expand the size of the pool. For example if the device we
* just detached was smaller than the others, it may be possible to
* add metaslabs (i.e. grow the pool). We need to reopen the vdev
* first so that we can obtain the updated sizes of the leaf vdevs.
*/
if (spa->spa_autoexpand) {
vdev_reopen(tvd);
vdev_expand(tvd, txg);
}
vdev_config_dirty(tvd);
/*
* Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that
* vd->vdev_detached is set and free vd's DTL object in syncing context.
* But first make sure we're not on any *other* txg's DTL list, to
* prevent vd from being accessed after it's freed.
*/
vdpath = spa_strdup(vd->vdev_path ? vd->vdev_path : "none");
for (int t = 0; t < TXG_SIZE; t++)
(void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
vd->vdev_detached = B_TRUE;
vdev_dirty(tvd, VDD_DTL, vd, txg);
spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE);
spa_notify_waiters(spa);
/* hang on to the spa before we release the lock */
spa_open_ref(spa, FTAG);
error = spa_vdev_exit(spa, vd, txg, 0);
spa_history_log_internal(spa, "detach", NULL,
"vdev=%s", vdpath);
spa_strfree(vdpath);
/*
* If this was the removal of the original device in a hot spare vdev,
* then we want to go through and remove the device from the hot spare
* list of every other pool.
*/
if (unspare) {
spa_t *altspa = NULL;
mutex_enter(&spa_namespace_lock);
while ((altspa = spa_next(altspa)) != NULL) {
if (altspa->spa_state != POOL_STATE_ACTIVE ||
altspa == spa)
continue;
spa_open_ref(altspa, FTAG);
mutex_exit(&spa_namespace_lock);
(void) spa_vdev_remove(altspa, unspare_guid, B_TRUE);
mutex_enter(&spa_namespace_lock);
spa_close(altspa, FTAG);
}
mutex_exit(&spa_namespace_lock);
/* search the rest of the vdevs for spares to remove */
spa_vdev_resilver_done(spa);
}
/* all done with the spa; OK to release */
mutex_enter(&spa_namespace_lock);
spa_close(spa, FTAG);
mutex_exit(&spa_namespace_lock);
return (error);
}
static int
spa_vdev_initialize_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type,
list_t *vd_list)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
/* Look up vdev and ensure it's a leaf. */
vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE);
if (vd == NULL || vd->vdev_detached) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(ENODEV));
} else if (!vd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(vd)) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(EINVAL));
} else if (!vdev_writeable(vd)) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(EROFS));
}
mutex_enter(&vd->vdev_initialize_lock);
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
/*
* When we activate an initialize action we check to see
* if the vdev_initialize_thread is NULL. We do this instead
* of using the vdev_initialize_state since there might be
* a previous initialization process which has completed but
* the thread is not exited.
*/
if (cmd_type == POOL_INITIALIZE_START &&
(vd->vdev_initialize_thread != NULL ||
vd->vdev_top->vdev_removing)) {
mutex_exit(&vd->vdev_initialize_lock);
return (SET_ERROR(EBUSY));
} else if (cmd_type == POOL_INITIALIZE_CANCEL &&
(vd->vdev_initialize_state != VDEV_INITIALIZE_ACTIVE &&
vd->vdev_initialize_state != VDEV_INITIALIZE_SUSPENDED)) {
mutex_exit(&vd->vdev_initialize_lock);
return (SET_ERROR(ESRCH));
} else if (cmd_type == POOL_INITIALIZE_SUSPEND &&
vd->vdev_initialize_state != VDEV_INITIALIZE_ACTIVE) {
mutex_exit(&vd->vdev_initialize_lock);
return (SET_ERROR(ESRCH));
}
switch (cmd_type) {
case POOL_INITIALIZE_START:
vdev_initialize(vd);
break;
case POOL_INITIALIZE_CANCEL:
vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED, vd_list);
break;
case POOL_INITIALIZE_SUSPEND:
vdev_initialize_stop(vd, VDEV_INITIALIZE_SUSPENDED, vd_list);
break;
default:
panic("invalid cmd_type %llu", (unsigned long long)cmd_type);
}
mutex_exit(&vd->vdev_initialize_lock);
return (0);
}
int
spa_vdev_initialize(spa_t *spa, nvlist_t *nv, uint64_t cmd_type,
nvlist_t *vdev_errlist)
{
int total_errors = 0;
list_t vd_list;
list_create(&vd_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_initialize_node));
/*
* We hold the namespace lock through the whole function
* to prevent any changes to the pool while we're starting or
* stopping initialization. The config and state locks are held so that
* we can properly assess the vdev state before we commit to
* the initializing operation.
*/
mutex_enter(&spa_namespace_lock);
for (nvpair_t *pair = nvlist_next_nvpair(nv, NULL);
pair != NULL; pair = nvlist_next_nvpair(nv, pair)) {
uint64_t vdev_guid = fnvpair_value_uint64(pair);
int error = spa_vdev_initialize_impl(spa, vdev_guid, cmd_type,
&vd_list);
if (error != 0) {
char guid_as_str[MAXNAMELEN];
(void) snprintf(guid_as_str, sizeof (guid_as_str),
"%llu", (unsigned long long)vdev_guid);
fnvlist_add_int64(vdev_errlist, guid_as_str, error);
total_errors++;
}
}
/* Wait for all initialize threads to stop. */
vdev_initialize_stop_wait(spa, &vd_list);
/* Sync out the initializing state */
txg_wait_synced(spa->spa_dsl_pool, 0);
mutex_exit(&spa_namespace_lock);
list_destroy(&vd_list);
return (total_errors);
}
static int
spa_vdev_trim_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type,
uint64_t rate, boolean_t partial, boolean_t secure, list_t *vd_list)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
/* Look up vdev and ensure it's a leaf. */
vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE);
if (vd == NULL || vd->vdev_detached) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(ENODEV));
} else if (!vd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(vd)) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(EINVAL));
} else if (!vdev_writeable(vd)) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(EROFS));
} else if (!vd->vdev_has_trim) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(EOPNOTSUPP));
} else if (secure && !vd->vdev_has_securetrim) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(EOPNOTSUPP));
}
mutex_enter(&vd->vdev_trim_lock);
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
/*
* When we activate a TRIM action we check to see if the
* vdev_trim_thread is NULL. We do this instead of using the
* vdev_trim_state since there might be a previous TRIM process
* which has completed but the thread is not exited.
*/
if (cmd_type == POOL_TRIM_START &&
(vd->vdev_trim_thread != NULL || vd->vdev_top->vdev_removing)) {
mutex_exit(&vd->vdev_trim_lock);
return (SET_ERROR(EBUSY));
} else if (cmd_type == POOL_TRIM_CANCEL &&
(vd->vdev_trim_state != VDEV_TRIM_ACTIVE &&
vd->vdev_trim_state != VDEV_TRIM_SUSPENDED)) {
mutex_exit(&vd->vdev_trim_lock);
return (SET_ERROR(ESRCH));
} else if (cmd_type == POOL_TRIM_SUSPEND &&
vd->vdev_trim_state != VDEV_TRIM_ACTIVE) {
mutex_exit(&vd->vdev_trim_lock);
return (SET_ERROR(ESRCH));
}
switch (cmd_type) {
case POOL_TRIM_START:
vdev_trim(vd, rate, partial, secure);
break;
case POOL_TRIM_CANCEL:
vdev_trim_stop(vd, VDEV_TRIM_CANCELED, vd_list);
break;
case POOL_TRIM_SUSPEND:
vdev_trim_stop(vd, VDEV_TRIM_SUSPENDED, vd_list);
break;
default:
panic("invalid cmd_type %llu", (unsigned long long)cmd_type);
}
mutex_exit(&vd->vdev_trim_lock);
return (0);
}
/*
* Initiates a manual TRIM for the requested vdevs. This kicks off individual
* TRIM threads for each child vdev. These threads pass over all of the free
* space in the vdev's metaslabs and issues TRIM commands for that space.
*/
int
spa_vdev_trim(spa_t *spa, nvlist_t *nv, uint64_t cmd_type, uint64_t rate,
boolean_t partial, boolean_t secure, nvlist_t *vdev_errlist)
{
int total_errors = 0;
list_t vd_list;
list_create(&vd_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_trim_node));
/*
* We hold the namespace lock through the whole function
* to prevent any changes to the pool while we're starting or
* stopping TRIM. The config and state locks are held so that
* we can properly assess the vdev state before we commit to
* the TRIM operation.
*/
mutex_enter(&spa_namespace_lock);
for (nvpair_t *pair = nvlist_next_nvpair(nv, NULL);
pair != NULL; pair = nvlist_next_nvpair(nv, pair)) {
uint64_t vdev_guid = fnvpair_value_uint64(pair);
int error = spa_vdev_trim_impl(spa, vdev_guid, cmd_type,
rate, partial, secure, &vd_list);
if (error != 0) {
char guid_as_str[MAXNAMELEN];
(void) snprintf(guid_as_str, sizeof (guid_as_str),
"%llu", (unsigned long long)vdev_guid);
fnvlist_add_int64(vdev_errlist, guid_as_str, error);
total_errors++;
}
}
/* Wait for all TRIM threads to stop. */
vdev_trim_stop_wait(spa, &vd_list);
/* Sync out the TRIM state */
txg_wait_synced(spa->spa_dsl_pool, 0);
mutex_exit(&spa_namespace_lock);
list_destroy(&vd_list);
return (total_errors);
}
/*
* Split a set of devices from their mirrors, and create a new pool from them.
*/
int
spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config,
nvlist_t *props, boolean_t exp)
{
int error = 0;
uint64_t txg, *glist;
spa_t *newspa;
uint_t c, children, lastlog;
nvlist_t **child, *nvl, *tmp;
dmu_tx_t *tx;
char *altroot = NULL;
vdev_t *rvd, **vml = NULL; /* vdev modify list */
boolean_t activate_slog;
ASSERT(spa_writeable(spa));
txg = spa_vdev_enter(spa);
ASSERT(MUTEX_HELD(&spa_namespace_lock));
if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
error = (spa_has_checkpoint(spa)) ?
ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
return (spa_vdev_exit(spa, NULL, txg, error));
}
/* clear the log and flush everything up to now */
activate_slog = spa_passivate_log(spa);
(void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
error = spa_reset_logs(spa);
txg = spa_vdev_config_enter(spa);
if (activate_slog)
spa_activate_log(spa);
if (error != 0)
return (spa_vdev_exit(spa, NULL, txg, error));
/* check new spa name before going any further */
if (spa_lookup(newname) != NULL)
return (spa_vdev_exit(spa, NULL, txg, EEXIST));
/*
* scan through all the children to ensure they're all mirrors
*/
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 ||
nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child,
&children) != 0)
return (spa_vdev_exit(spa, NULL, txg, EINVAL));
/* first, check to ensure we've got the right child count */
rvd = spa->spa_root_vdev;
lastlog = 0;
for (c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
/* don't count the holes & logs as children */
if (vd->vdev_islog || (vd->vdev_ops != &vdev_indirect_ops &&
!vdev_is_concrete(vd))) {
if (lastlog == 0)
lastlog = c;
continue;
}
lastlog = 0;
}
if (children != (lastlog != 0 ? lastlog : rvd->vdev_children))
return (spa_vdev_exit(spa, NULL, txg, EINVAL));
/* next, ensure no spare or cache devices are part of the split */
if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 ||
nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0)
return (spa_vdev_exit(spa, NULL, txg, EINVAL));
vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP);
glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP);
/* then, loop over each vdev and validate it */
for (c = 0; c < children; c++) {
uint64_t is_hole = 0;
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
&is_hole);
if (is_hole != 0) {
if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole ||
spa->spa_root_vdev->vdev_child[c]->vdev_islog) {
continue;
} else {
error = SET_ERROR(EINVAL);
break;
}
}
/* deal with indirect vdevs */
if (spa->spa_root_vdev->vdev_child[c]->vdev_ops ==
&vdev_indirect_ops)
continue;
/* which disk is going to be split? */
if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID,
&glist[c]) != 0) {
error = SET_ERROR(EINVAL);
break;
}
/* look it up in the spa */
vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE);
if (vml[c] == NULL) {
error = SET_ERROR(ENODEV);
break;
}
/* make sure there's nothing stopping the split */
if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops ||
vml[c]->vdev_islog ||
!vdev_is_concrete(vml[c]) ||
vml[c]->vdev_isspare ||
vml[c]->vdev_isl2cache ||
!vdev_writeable(vml[c]) ||
vml[c]->vdev_children != 0 ||
vml[c]->vdev_state != VDEV_STATE_HEALTHY ||
c != spa->spa_root_vdev->vdev_child[c]->vdev_id) {
error = SET_ERROR(EINVAL);
break;
}
if (vdev_dtl_required(vml[c]) ||
vdev_resilver_needed(vml[c], NULL, NULL)) {
error = SET_ERROR(EBUSY);
break;
}
/* we need certain info from the top level */
VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY,
vml[c]->vdev_top->vdev_ms_array) == 0);
VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT,
vml[c]->vdev_top->vdev_ms_shift) == 0);
VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE,
vml[c]->vdev_top->vdev_asize) == 0);
VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT,
vml[c]->vdev_top->vdev_ashift) == 0);
/* transfer per-vdev ZAPs */
ASSERT3U(vml[c]->vdev_leaf_zap, !=, 0);
VERIFY0(nvlist_add_uint64(child[c],
ZPOOL_CONFIG_VDEV_LEAF_ZAP, vml[c]->vdev_leaf_zap));
ASSERT3U(vml[c]->vdev_top->vdev_top_zap, !=, 0);
VERIFY0(nvlist_add_uint64(child[c],
ZPOOL_CONFIG_VDEV_TOP_ZAP,
vml[c]->vdev_parent->vdev_top_zap));
}
if (error != 0) {
kmem_free(vml, children * sizeof (vdev_t *));
kmem_free(glist, children * sizeof (uint64_t));
return (spa_vdev_exit(spa, NULL, txg, error));
}
/* stop writers from using the disks */
for (c = 0; c < children; c++) {
if (vml[c] != NULL)
vml[c]->vdev_offline = B_TRUE;
}
vdev_reopen(spa->spa_root_vdev);
/*
* Temporarily record the splitting vdevs in the spa config. This
* will disappear once the config is regenerated.
*/
VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
glist, children) == 0);
kmem_free(glist, children * sizeof (uint64_t));
mutex_enter(&spa->spa_props_lock);
VERIFY(nvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT,
nvl) == 0);
mutex_exit(&spa->spa_props_lock);
spa->spa_config_splitting = nvl;
vdev_config_dirty(spa->spa_root_vdev);
/* configure and create the new pool */
VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname) == 0);
VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE) == 0);
VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
spa_version(spa)) == 0);
VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG,
spa->spa_config_txg) == 0);
VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID,
spa_generate_guid(NULL)) == 0);
VERIFY0(nvlist_add_boolean(config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS));
(void) nvlist_lookup_string(props,
zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
/* add the new pool to the namespace */
newspa = spa_add(newname, config, altroot);
newspa->spa_avz_action = AVZ_ACTION_REBUILD;
newspa->spa_config_txg = spa->spa_config_txg;
spa_set_log_state(newspa, SPA_LOG_CLEAR);
/* release the spa config lock, retaining the namespace lock */
spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
if (zio_injection_enabled)
zio_handle_panic_injection(spa, FTAG, 1);
spa_activate(newspa, spa_mode_global);
spa_async_suspend(newspa);
/*
* Temporarily stop the initializing and TRIM activity. We set the
* state to ACTIVE so that we know to resume initializing or TRIM
* once the split has completed.
*/
list_t vd_initialize_list;
list_create(&vd_initialize_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_initialize_node));
list_t vd_trim_list;
list_create(&vd_trim_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_trim_node));
for (c = 0; c < children; c++) {
if (vml[c] != NULL && vml[c]->vdev_ops != &vdev_indirect_ops) {
mutex_enter(&vml[c]->vdev_initialize_lock);
vdev_initialize_stop(vml[c],
VDEV_INITIALIZE_ACTIVE, &vd_initialize_list);
mutex_exit(&vml[c]->vdev_initialize_lock);
mutex_enter(&vml[c]->vdev_trim_lock);
vdev_trim_stop(vml[c], VDEV_TRIM_ACTIVE, &vd_trim_list);
mutex_exit(&vml[c]->vdev_trim_lock);
}
}
vdev_initialize_stop_wait(spa, &vd_initialize_list);
vdev_trim_stop_wait(spa, &vd_trim_list);
list_destroy(&vd_initialize_list);
list_destroy(&vd_trim_list);
newspa->spa_config_source = SPA_CONFIG_SRC_SPLIT;
newspa->spa_is_splitting = B_TRUE;
/* create the new pool from the disks of the original pool */
error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE);
if (error)
goto out;
/* if that worked, generate a real config for the new pool */
if (newspa->spa_root_vdev != NULL) {
VERIFY(nvlist_alloc(&newspa->spa_config_splitting,
NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_uint64(newspa->spa_config_splitting,
ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa)) == 0);
spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL,
B_TRUE));
}
/* set the props */
if (props != NULL) {
spa_configfile_set(newspa, props, B_FALSE);
error = spa_prop_set(newspa, props);
if (error)
goto out;
}
/* flush everything */
txg = spa_vdev_config_enter(newspa);
vdev_config_dirty(newspa->spa_root_vdev);
(void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG);
if (zio_injection_enabled)
zio_handle_panic_injection(spa, FTAG, 2);
spa_async_resume(newspa);
/* finally, update the original pool's config */
txg = spa_vdev_config_enter(spa);
tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error != 0)
dmu_tx_abort(tx);
for (c = 0; c < children; c++) {
if (vml[c] != NULL && vml[c]->vdev_ops != &vdev_indirect_ops) {
vdev_t *tvd = vml[c]->vdev_top;
/*
* Need to be sure the detachable VDEV is not
* on any *other* txg's DTL list to prevent it
* from being accessed after it's freed.
*/
for (int t = 0; t < TXG_SIZE; t++) {
(void) txg_list_remove_this(
&tvd->vdev_dtl_list, vml[c], t);
}
vdev_split(vml[c]);
if (error == 0)
spa_history_log_internal(spa, "detach", tx,
"vdev=%s", vml[c]->vdev_path);
vdev_free(vml[c]);
}
}
spa->spa_avz_action = AVZ_ACTION_REBUILD;
vdev_config_dirty(spa->spa_root_vdev);
spa->spa_config_splitting = NULL;
nvlist_free(nvl);
if (error == 0)
dmu_tx_commit(tx);
(void) spa_vdev_exit(spa, NULL, txg, 0);
if (zio_injection_enabled)
zio_handle_panic_injection(spa, FTAG, 3);
/* split is complete; log a history record */
spa_history_log_internal(newspa, "split", NULL,
"from pool %s", spa_name(spa));
newspa->spa_is_splitting = B_FALSE;
kmem_free(vml, children * sizeof (vdev_t *));
/* if we're not going to mount the filesystems in userland, export */
if (exp)
error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL,
B_FALSE, B_FALSE);
return (error);
out:
spa_unload(newspa);
spa_deactivate(newspa);
spa_remove(newspa);
txg = spa_vdev_config_enter(spa);
/* re-online all offlined disks */
for (c = 0; c < children; c++) {
if (vml[c] != NULL)
vml[c]->vdev_offline = B_FALSE;
}
/* restart initializing or trimming disks as necessary */
spa_async_request(spa, SPA_ASYNC_INITIALIZE_RESTART);
spa_async_request(spa, SPA_ASYNC_TRIM_RESTART);
spa_async_request(spa, SPA_ASYNC_AUTOTRIM_RESTART);
vdev_reopen(spa->spa_root_vdev);
nvlist_free(spa->spa_config_splitting);
spa->spa_config_splitting = NULL;
(void) spa_vdev_exit(spa, NULL, txg, error);
kmem_free(vml, children * sizeof (vdev_t *));
return (error);
}
/*
* Find any device that's done replacing, or a vdev marked 'unspare' that's
* currently spared, so we can detach it.
*/
static vdev_t *
spa_vdev_resilver_done_hunt(vdev_t *vd)
{
vdev_t *newvd, *oldvd;
for (int c = 0; c < vd->vdev_children; c++) {
oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
if (oldvd != NULL)
return (oldvd);
}
/*
* Check for a completed replacement. We always consider the first
* vdev in the list to be the oldest vdev, and the last one to be
* the newest (see spa_vdev_attach() for how that works). In
* the case where the newest vdev is faulted, we will not automatically
* remove it after a resilver completes. This is OK as it will require
* user intervention to determine which disk the admin wishes to keep.
*/
if (vd->vdev_ops == &vdev_replacing_ops) {
ASSERT(vd->vdev_children > 1);
newvd = vd->vdev_child[vd->vdev_children - 1];
oldvd = vd->vdev_child[0];
if (vdev_dtl_empty(newvd, DTL_MISSING) &&
vdev_dtl_empty(newvd, DTL_OUTAGE) &&
!vdev_dtl_required(oldvd))
return (oldvd);
}
/*
* Check for a completed resilver with the 'unspare' flag set.
* Also potentially update faulted state.
*/
if (vd->vdev_ops == &vdev_spare_ops) {
vdev_t *first = vd->vdev_child[0];
vdev_t *last = vd->vdev_child[vd->vdev_children - 1];
if (last->vdev_unspare) {
oldvd = first;
newvd = last;
} else if (first->vdev_unspare) {
oldvd = last;
newvd = first;
} else {
oldvd = NULL;
}
if (oldvd != NULL &&
vdev_dtl_empty(newvd, DTL_MISSING) &&
vdev_dtl_empty(newvd, DTL_OUTAGE) &&
!vdev_dtl_required(oldvd))
return (oldvd);
vdev_propagate_state(vd);
/*
* If there are more than two spares attached to a disk,
* and those spares are not required, then we want to
* attempt to free them up now so that they can be used
* by other pools. Once we're back down to a single
* disk+spare, we stop removing them.
*/
if (vd->vdev_children > 2) {
newvd = vd->vdev_child[1];
if (newvd->vdev_isspare && last->vdev_isspare &&
vdev_dtl_empty(last, DTL_MISSING) &&
vdev_dtl_empty(last, DTL_OUTAGE) &&
!vdev_dtl_required(newvd))
return (newvd);
}
}
return (NULL);
}
static void
spa_vdev_resilver_done(spa_t *spa)
{
vdev_t *vd, *pvd, *ppvd;
uint64_t guid, sguid, pguid, ppguid;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) {
pvd = vd->vdev_parent;
ppvd = pvd->vdev_parent;
guid = vd->vdev_guid;
pguid = pvd->vdev_guid;
ppguid = ppvd->vdev_guid;
sguid = 0;
/*
* If we have just finished replacing a hot spared device, then
* we need to detach the parent's first child (the original hot
* spare) as well.
*/
if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 &&
ppvd->vdev_children == 2) {
ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
sguid = ppvd->vdev_child[1]->vdev_guid;
}
ASSERT(vd->vdev_resilver_txg == 0 || !vdev_dtl_required(vd));
spa_config_exit(spa, SCL_ALL, FTAG);
if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0)
return;
if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0)
return;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
}
spa_config_exit(spa, SCL_ALL, FTAG);
/*
* If a detach was not performed above replace waiters will not have
* been notified. In which case we must do so now.
*/
spa_notify_waiters(spa);
}
/*
* Update the stored path or FRU for this vdev.
*/
static int
spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value,
boolean_t ispath)
{
vdev_t *vd;
boolean_t sync = B_FALSE;
ASSERT(spa_writeable(spa));
spa_vdev_state_enter(spa, SCL_ALL);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, ENOENT));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
if (ispath) {
if (strcmp(value, vd->vdev_path) != 0) {
spa_strfree(vd->vdev_path);
vd->vdev_path = spa_strdup(value);
sync = B_TRUE;
}
} else {
if (vd->vdev_fru == NULL) {
vd->vdev_fru = spa_strdup(value);
sync = B_TRUE;
} else if (strcmp(value, vd->vdev_fru) != 0) {
spa_strfree(vd->vdev_fru);
vd->vdev_fru = spa_strdup(value);
sync = B_TRUE;
}
}
return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0));
}
int
spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
{
return (spa_vdev_set_common(spa, guid, newpath, B_TRUE));
}
int
spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru)
{
return (spa_vdev_set_common(spa, guid, newfru, B_FALSE));
}
/*
* ==========================================================================
* SPA Scanning
* ==========================================================================
*/
int
spa_scrub_pause_resume(spa_t *spa, pool_scrub_cmd_t cmd)
{
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
if (dsl_scan_resilvering(spa->spa_dsl_pool))
return (SET_ERROR(EBUSY));
return (dsl_scrub_set_pause_resume(spa->spa_dsl_pool, cmd));
}
int
spa_scan_stop(spa_t *spa)
{
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
if (dsl_scan_resilvering(spa->spa_dsl_pool))
return (SET_ERROR(EBUSY));
return (dsl_scan_cancel(spa->spa_dsl_pool));
}
int
spa_scan(spa_t *spa, pool_scan_func_t func)
{
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE)
return (SET_ERROR(ENOTSUP));
if (func == POOL_SCAN_RESILVER &&
!spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER))
return (SET_ERROR(ENOTSUP));
/*
* If a resilver was requested, but there is no DTL on a
* writeable leaf device, we have nothing to do.
*/
if (func == POOL_SCAN_RESILVER &&
!vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
return (0);
}
return (dsl_scan(spa->spa_dsl_pool, func));
}
/*
* ==========================================================================
* SPA async task processing
* ==========================================================================
*/
static void
spa_async_remove(spa_t *spa, vdev_t *vd)
{
if (vd->vdev_remove_wanted) {
vd->vdev_remove_wanted = B_FALSE;
vd->vdev_delayed_close = B_FALSE;
vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE);
/*
* We want to clear the stats, but we don't want to do a full
* vdev_clear() as that will cause us to throw away
* degraded/faulted state as well as attempt to reopen the
* device, all of which is a waste.
*/
vd->vdev_stat.vs_read_errors = 0;
vd->vdev_stat.vs_write_errors = 0;
vd->vdev_stat.vs_checksum_errors = 0;
vdev_state_dirty(vd->vdev_top);
/* Tell userspace that the vdev is gone. */
zfs_post_remove(spa, vd);
}
for (int c = 0; c < vd->vdev_children; c++)
spa_async_remove(spa, vd->vdev_child[c]);
}
static void
spa_async_probe(spa_t *spa, vdev_t *vd)
{
if (vd->vdev_probe_wanted) {
vd->vdev_probe_wanted = B_FALSE;
vdev_reopen(vd); /* vdev_open() does the actual probe */
}
for (int c = 0; c < vd->vdev_children; c++)
spa_async_probe(spa, vd->vdev_child[c]);
}
static void
spa_async_autoexpand(spa_t *spa, vdev_t *vd)
{
if (!spa->spa_autoexpand)
return;
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
spa_async_autoexpand(spa, cvd);
}
if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL)
return;
spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_AUTOEXPAND);
}
static void
spa_async_thread(void *arg)
{
spa_t *spa = (spa_t *)arg;
dsl_pool_t *dp = spa->spa_dsl_pool;
int tasks;
ASSERT(spa->spa_sync_on);
mutex_enter(&spa->spa_async_lock);
tasks = spa->spa_async_tasks;
spa->spa_async_tasks = 0;
mutex_exit(&spa->spa_async_lock);
/*
* See if the config needs to be updated.
*/
if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
uint64_t old_space, new_space;
mutex_enter(&spa_namespace_lock);
old_space = metaslab_class_get_space(spa_normal_class(spa));
old_space += metaslab_class_get_space(spa_special_class(spa));
old_space += metaslab_class_get_space(spa_dedup_class(spa));
old_space += metaslab_class_get_space(
spa_embedded_log_class(spa));
spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
new_space = metaslab_class_get_space(spa_normal_class(spa));
new_space += metaslab_class_get_space(spa_special_class(spa));
new_space += metaslab_class_get_space(spa_dedup_class(spa));
new_space += metaslab_class_get_space(
spa_embedded_log_class(spa));
mutex_exit(&spa_namespace_lock);
/*
* If the pool grew as a result of the config update,
* then log an internal history event.
*/
if (new_space != old_space) {
spa_history_log_internal(spa, "vdev online", NULL,
"pool '%s' size: %llu(+%llu)",
spa_name(spa), (u_longlong_t)new_space,
(u_longlong_t)(new_space - old_space));
}
}
/*
* See if any devices need to be marked REMOVED.
*/
if (tasks & SPA_ASYNC_REMOVE) {
spa_vdev_state_enter(spa, SCL_NONE);
spa_async_remove(spa, spa->spa_root_vdev);
for (int i = 0; i < spa->spa_l2cache.sav_count; i++)
spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]);
for (int i = 0; i < spa->spa_spares.sav_count; i++)
spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]);
(void) spa_vdev_state_exit(spa, NULL, 0);
}
if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) {
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
spa_async_autoexpand(spa, spa->spa_root_vdev);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
/*
* See if any devices need to be probed.
*/
if (tasks & SPA_ASYNC_PROBE) {
spa_vdev_state_enter(spa, SCL_NONE);
spa_async_probe(spa, spa->spa_root_vdev);
(void) spa_vdev_state_exit(spa, NULL, 0);
}
/*
* If any devices are done replacing, detach them.
*/
if (tasks & SPA_ASYNC_RESILVER_DONE ||
tasks & SPA_ASYNC_REBUILD_DONE) {
spa_vdev_resilver_done(spa);
}
/*
* Kick off a resilver.
*/
if (tasks & SPA_ASYNC_RESILVER &&
!vdev_rebuild_active(spa->spa_root_vdev) &&
(!dsl_scan_resilvering(dp) ||
!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER)))
dsl_scan_restart_resilver(dp, 0);
if (tasks & SPA_ASYNC_INITIALIZE_RESTART) {
mutex_enter(&spa_namespace_lock);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_initialize_restart(spa->spa_root_vdev);
spa_config_exit(spa, SCL_CONFIG, FTAG);
mutex_exit(&spa_namespace_lock);
}
if (tasks & SPA_ASYNC_TRIM_RESTART) {
mutex_enter(&spa_namespace_lock);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_trim_restart(spa->spa_root_vdev);
spa_config_exit(spa, SCL_CONFIG, FTAG);
mutex_exit(&spa_namespace_lock);
}
if (tasks & SPA_ASYNC_AUTOTRIM_RESTART) {
mutex_enter(&spa_namespace_lock);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_autotrim_restart(spa);
spa_config_exit(spa, SCL_CONFIG, FTAG);
mutex_exit(&spa_namespace_lock);
}
/*
* Kick off L2 cache whole device TRIM.
*/
if (tasks & SPA_ASYNC_L2CACHE_TRIM) {
mutex_enter(&spa_namespace_lock);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_trim_l2arc(spa);
spa_config_exit(spa, SCL_CONFIG, FTAG);
mutex_exit(&spa_namespace_lock);
}
/*
* Kick off L2 cache rebuilding.
*/
if (tasks & SPA_ASYNC_L2CACHE_REBUILD) {
mutex_enter(&spa_namespace_lock);
spa_config_enter(spa, SCL_L2ARC, FTAG, RW_READER);
l2arc_spa_rebuild_start(spa);
spa_config_exit(spa, SCL_L2ARC, FTAG);
mutex_exit(&spa_namespace_lock);
}
/*
* Let the world know that we're done.
*/
mutex_enter(&spa->spa_async_lock);
spa->spa_async_thread = NULL;
cv_broadcast(&spa->spa_async_cv);
mutex_exit(&spa->spa_async_lock);
thread_exit();
}
void
spa_async_suspend(spa_t *spa)
{
mutex_enter(&spa->spa_async_lock);
spa->spa_async_suspended++;
while (spa->spa_async_thread != NULL)
cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
mutex_exit(&spa->spa_async_lock);
spa_vdev_remove_suspend(spa);
zthr_t *condense_thread = spa->spa_condense_zthr;
if (condense_thread != NULL)
zthr_cancel(condense_thread);
zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr;
if (discard_thread != NULL)
zthr_cancel(discard_thread);
zthr_t *ll_delete_thread = spa->spa_livelist_delete_zthr;
if (ll_delete_thread != NULL)
zthr_cancel(ll_delete_thread);
zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr;
if (ll_condense_thread != NULL)
zthr_cancel(ll_condense_thread);
}
void
spa_async_resume(spa_t *spa)
{
mutex_enter(&spa->spa_async_lock);
ASSERT(spa->spa_async_suspended != 0);
spa->spa_async_suspended--;
mutex_exit(&spa->spa_async_lock);
spa_restart_removal(spa);
zthr_t *condense_thread = spa->spa_condense_zthr;
if (condense_thread != NULL)
zthr_resume(condense_thread);
zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr;
if (discard_thread != NULL)
zthr_resume(discard_thread);
zthr_t *ll_delete_thread = spa->spa_livelist_delete_zthr;
if (ll_delete_thread != NULL)
zthr_resume(ll_delete_thread);
zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr;
if (ll_condense_thread != NULL)
zthr_resume(ll_condense_thread);
}
static boolean_t
spa_async_tasks_pending(spa_t *spa)
{
uint_t non_config_tasks;
uint_t config_task;
boolean_t config_task_suspended;
non_config_tasks = spa->spa_async_tasks & ~SPA_ASYNC_CONFIG_UPDATE;
config_task = spa->spa_async_tasks & SPA_ASYNC_CONFIG_UPDATE;
if (spa->spa_ccw_fail_time == 0) {
config_task_suspended = B_FALSE;
} else {
config_task_suspended =
(gethrtime() - spa->spa_ccw_fail_time) <
((hrtime_t)zfs_ccw_retry_interval * NANOSEC);
}
return (non_config_tasks || (config_task && !config_task_suspended));
}
static void
spa_async_dispatch(spa_t *spa)
{
mutex_enter(&spa->spa_async_lock);
if (spa_async_tasks_pending(spa) &&
!spa->spa_async_suspended &&
spa->spa_async_thread == NULL)
spa->spa_async_thread = thread_create(NULL, 0,
spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
mutex_exit(&spa->spa_async_lock);
}
void
spa_async_request(spa_t *spa, int task)
{
zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task);
mutex_enter(&spa->spa_async_lock);
spa->spa_async_tasks |= task;
mutex_exit(&spa->spa_async_lock);
}
int
spa_async_tasks(spa_t *spa)
{
return (spa->spa_async_tasks);
}
/*
* ==========================================================================
* SPA syncing routines
* ==========================================================================
*/
static int
bpobj_enqueue_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
bpobj_t *bpo = arg;
bpobj_enqueue(bpo, bp, bp_freed, tx);
return (0);
}
int
bpobj_enqueue_alloc_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
return (bpobj_enqueue_cb(arg, bp, B_FALSE, tx));
}
int
bpobj_enqueue_free_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
return (bpobj_enqueue_cb(arg, bp, B_TRUE, tx));
}
static int
spa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
zio_t *pio = arg;
zio_nowait(zio_free_sync(pio, pio->io_spa, dmu_tx_get_txg(tx), bp,
pio->io_flags));
return (0);
}
static int
bpobj_spa_free_sync_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
ASSERT(!bp_freed);
return (spa_free_sync_cb(arg, bp, tx));
}
/*
* Note: this simple function is not inlined to make it easier to dtrace the
* amount of time spent syncing frees.
*/
static void
spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx)
{
zio_t *zio = zio_root(spa, NULL, NULL, 0);
bplist_iterate(bpl, spa_free_sync_cb, zio, tx);
VERIFY(zio_wait(zio) == 0);
}
/*
* Note: this simple function is not inlined to make it easier to dtrace the
* amount of time spent syncing deferred frees.
*/
static void
spa_sync_deferred_frees(spa_t *spa, dmu_tx_t *tx)
{
if (spa_sync_pass(spa) != 1)
return;
/*
* Note:
* If the log space map feature is active, we stop deferring
* frees to the next TXG and therefore running this function
* would be considered a no-op as spa_deferred_bpobj should
* not have any entries.
*
* That said we run this function anyway (instead of returning
* immediately) for the edge-case scenario where we just
* activated the log space map feature in this TXG but we have
* deferred frees from the previous TXG.
*/
zio_t *zio = zio_root(spa, NULL, NULL, 0);
VERIFY3U(bpobj_iterate(&spa->spa_deferred_bpobj,
bpobj_spa_free_sync_cb, zio, tx), ==, 0);
VERIFY0(zio_wait(zio));
}
static void
spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
{
char *packed = NULL;
size_t bufsize;
size_t nvsize = 0;
dmu_buf_t *db;
VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
/*
* Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
* information. This avoids the dmu_buf_will_dirty() path and
* saves us a pre-read to get data we don't actually care about.
*/
bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE);
packed = vmem_alloc(bufsize, KM_SLEEP);
VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
KM_SLEEP) == 0);
bzero(packed + nvsize, bufsize - nvsize);
dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
vmem_free(packed, bufsize);
VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
dmu_buf_will_dirty(db, tx);
*(uint64_t *)db->db_data = nvsize;
dmu_buf_rele(db, FTAG);
}
static void
spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx,
const char *config, const char *entry)
{
nvlist_t *nvroot;
nvlist_t **list;
int i;
if (!sav->sav_sync)
return;
/*
* Update the MOS nvlist describing the list of available devices.
* spa_validate_aux() will have already made sure this nvlist is
* valid and the vdevs are labeled appropriately.
*/
if (sav->sav_object == 0) {
sav->sav_object = dmu_object_alloc(spa->spa_meta_objset,
DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE,
sizeof (uint64_t), tx);
VERIFY(zap_update(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1,
&sav->sav_object, tx) == 0);
}
VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
if (sav->sav_count == 0) {
VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0);
} else {
list = kmem_alloc(sav->sav_count*sizeof (void *), KM_SLEEP);
for (i = 0; i < sav->sav_count; i++)
list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
B_FALSE, VDEV_CONFIG_L2CACHE);
VERIFY(nvlist_add_nvlist_array(nvroot, config, list,
sav->sav_count) == 0);
for (i = 0; i < sav->sav_count; i++)
nvlist_free(list[i]);
kmem_free(list, sav->sav_count * sizeof (void *));
}
spa_sync_nvlist(spa, sav->sav_object, nvroot, tx);
nvlist_free(nvroot);
sav->sav_sync = B_FALSE;
}
/*
* Rebuild spa's all-vdev ZAP from the vdev ZAPs indicated in each vdev_t.
* The all-vdev ZAP must be empty.
*/
static void
spa_avz_build(vdev_t *vd, uint64_t avz, dmu_tx_t *tx)
{
spa_t *spa = vd->vdev_spa;
if (vd->vdev_top_zap != 0) {
VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
vd->vdev_top_zap, tx));
}
if (vd->vdev_leaf_zap != 0) {
VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
vd->vdev_leaf_zap, tx));
}
for (uint64_t i = 0; i < vd->vdev_children; i++) {
spa_avz_build(vd->vdev_child[i], avz, tx);
}
}
static void
spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
{
nvlist_t *config;
/*
* If the pool is being imported from a pre-per-vdev-ZAP version of ZFS,
* its config may not be dirty but we still need to build per-vdev ZAPs.
* Similarly, if the pool is being assembled (e.g. after a split), we
* need to rebuild the AVZ although the config may not be dirty.
*/
if (list_is_empty(&spa->spa_config_dirty_list) &&
spa->spa_avz_action == AVZ_ACTION_NONE)
return;
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
ASSERT(spa->spa_avz_action == AVZ_ACTION_NONE ||
spa->spa_avz_action == AVZ_ACTION_INITIALIZE ||
spa->spa_all_vdev_zaps != 0);
if (spa->spa_avz_action == AVZ_ACTION_REBUILD) {
/* Make and build the new AVZ */
uint64_t new_avz = zap_create(spa->spa_meta_objset,
DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0, tx);
spa_avz_build(spa->spa_root_vdev, new_avz, tx);
/* Diff old AVZ with new one */
zap_cursor_t zc;
zap_attribute_t za;
for (zap_cursor_init(&zc, spa->spa_meta_objset,
spa->spa_all_vdev_zaps);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
uint64_t vdzap = za.za_first_integer;
if (zap_lookup_int(spa->spa_meta_objset, new_avz,
vdzap) == ENOENT) {
/*
* ZAP is listed in old AVZ but not in new one;
* destroy it
*/
VERIFY0(zap_destroy(spa->spa_meta_objset, vdzap,
tx));
}
}
zap_cursor_fini(&zc);
/* Destroy the old AVZ */
VERIFY0(zap_destroy(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, tx));
/* Replace the old AVZ in the dir obj with the new one */
VERIFY0(zap_update(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP,
sizeof (new_avz), 1, &new_avz, tx));
spa->spa_all_vdev_zaps = new_avz;
} else if (spa->spa_avz_action == AVZ_ACTION_DESTROY) {
zap_cursor_t zc;
zap_attribute_t za;
/* Walk through the AVZ and destroy all listed ZAPs */
for (zap_cursor_init(&zc, spa->spa_meta_objset,
spa->spa_all_vdev_zaps);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
uint64_t zap = za.za_first_integer;
VERIFY0(zap_destroy(spa->spa_meta_objset, zap, tx));
}
zap_cursor_fini(&zc);
/* Destroy and unlink the AVZ itself */
VERIFY0(zap_destroy(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, tx));
VERIFY0(zap_remove(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP, tx));
spa->spa_all_vdev_zaps = 0;
}
if (spa->spa_all_vdev_zaps == 0) {
spa->spa_all_vdev_zaps = zap_create_link(spa->spa_meta_objset,
DMU_OTN_ZAP_METADATA, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_VDEV_ZAP_MAP, tx);
}
spa->spa_avz_action = AVZ_ACTION_NONE;
/* Create ZAPs for vdevs that don't have them. */
vdev_construct_zaps(spa->spa_root_vdev, tx);
config = spa_config_generate(spa, spa->spa_root_vdev,
dmu_tx_get_txg(tx), B_FALSE);
/*
* If we're upgrading the spa version then make sure that
* the config object gets updated with the correct version.
*/
if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version)
fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
spa->spa_uberblock.ub_version);
spa_config_exit(spa, SCL_STATE, FTAG);
nvlist_free(spa->spa_config_syncing);
spa->spa_config_syncing = config;
spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
}
static void
spa_sync_version(void *arg, dmu_tx_t *tx)
{
uint64_t *versionp = arg;
uint64_t version = *versionp;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
/*
* Setting the version is special cased when first creating the pool.
*/
ASSERT(tx->tx_txg != TXG_INITIAL);
ASSERT(SPA_VERSION_IS_SUPPORTED(version));
ASSERT(version >= spa_version(spa));
spa->spa_uberblock.ub_version = version;
vdev_config_dirty(spa->spa_root_vdev);
spa_history_log_internal(spa, "set", tx, "version=%lld",
(longlong_t)version);
}
/*
* Set zpool properties.
*/
static void
spa_sync_props(void *arg, dmu_tx_t *tx)
{
nvlist_t *nvp = arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
objset_t *mos = spa->spa_meta_objset;
nvpair_t *elem = NULL;
mutex_enter(&spa->spa_props_lock);
while ((elem = nvlist_next_nvpair(nvp, elem))) {
uint64_t intval;
char *strval, *fname;
zpool_prop_t prop;
const char *propname;
zprop_type_t proptype;
spa_feature_t fid;
switch (prop = zpool_name_to_prop(nvpair_name(elem))) {
case ZPOOL_PROP_INVAL:
/*
* We checked this earlier in spa_prop_validate().
*/
ASSERT(zpool_prop_feature(nvpair_name(elem)));
fname = strchr(nvpair_name(elem), '@') + 1;
VERIFY0(zfeature_lookup_name(fname, &fid));
spa_feature_enable(spa, fid, tx);
spa_history_log_internal(spa, "set", tx,
"%s=enabled", nvpair_name(elem));
break;
case ZPOOL_PROP_VERSION:
intval = fnvpair_value_uint64(elem);
/*
* The version is synced separately before other
* properties and should be correct by now.
*/
ASSERT3U(spa_version(spa), >=, intval);
break;
case ZPOOL_PROP_ALTROOT:
/*
* 'altroot' is a non-persistent property. It should
* have been set temporarily at creation or import time.
*/
ASSERT(spa->spa_root != NULL);
break;
case ZPOOL_PROP_READONLY:
case ZPOOL_PROP_CACHEFILE:
/*
* 'readonly' and 'cachefile' are also non-persistent
* properties.
*/
break;
case ZPOOL_PROP_COMMENT:
strval = fnvpair_value_string(elem);
if (spa->spa_comment != NULL)
spa_strfree(spa->spa_comment);
spa->spa_comment = spa_strdup(strval);
/*
* We need to dirty the configuration on all the vdevs
* so that their labels get updated. We also need to
* update the cache file to keep it in sync with the
* MOS version. It's unnecessary to do this for pool
* creation since the vdev's configuration has already
* been dirtied.
*/
if (tx->tx_txg != TXG_INITIAL) {
vdev_config_dirty(spa->spa_root_vdev);
spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
}
spa_history_log_internal(spa, "set", tx,
"%s=%s", nvpair_name(elem), strval);
break;
case ZPOOL_PROP_COMPATIBILITY:
strval = fnvpair_value_string(elem);
if (spa->spa_compatibility != NULL)
spa_strfree(spa->spa_compatibility);
spa->spa_compatibility = spa_strdup(strval);
/*
* Dirty the configuration on vdevs as above.
*/
if (tx->tx_txg != TXG_INITIAL) {
vdev_config_dirty(spa->spa_root_vdev);
spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
}
spa_history_log_internal(spa, "set", tx,
"%s=%s", nvpair_name(elem), strval);
break;
default:
/*
* Set pool property values in the poolprops mos object.
*/
if (spa->spa_pool_props_object == 0) {
spa->spa_pool_props_object =
zap_create_link(mos, DMU_OT_POOL_PROPS,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
tx);
}
/* normalize the property name */
propname = zpool_prop_to_name(prop);
proptype = zpool_prop_get_type(prop);
if (nvpair_type(elem) == DATA_TYPE_STRING) {
ASSERT(proptype == PROP_TYPE_STRING);
strval = fnvpair_value_string(elem);
VERIFY0(zap_update(mos,
spa->spa_pool_props_object, propname,
1, strlen(strval) + 1, strval, tx));
spa_history_log_internal(spa, "set", tx,
"%s=%s", nvpair_name(elem), strval);
} else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
intval = fnvpair_value_uint64(elem);
if (proptype == PROP_TYPE_INDEX) {
const char *unused;
VERIFY0(zpool_prop_index_to_string(
prop, intval, &unused));
}
VERIFY0(zap_update(mos,
spa->spa_pool_props_object, propname,
8, 1, &intval, tx));
spa_history_log_internal(spa, "set", tx,
"%s=%lld", nvpair_name(elem),
(longlong_t)intval);
} else {
ASSERT(0); /* not allowed */
}
switch (prop) {
case ZPOOL_PROP_DELEGATION:
spa->spa_delegation = intval;
break;
case ZPOOL_PROP_BOOTFS:
spa->spa_bootfs = intval;
break;
case ZPOOL_PROP_FAILUREMODE:
spa->spa_failmode = intval;
break;
case ZPOOL_PROP_AUTOTRIM:
spa->spa_autotrim = intval;
spa_async_request(spa,
SPA_ASYNC_AUTOTRIM_RESTART);
break;
case ZPOOL_PROP_AUTOEXPAND:
spa->spa_autoexpand = intval;
if (tx->tx_txg != TXG_INITIAL)
spa_async_request(spa,
SPA_ASYNC_AUTOEXPAND);
break;
case ZPOOL_PROP_MULTIHOST:
spa->spa_multihost = intval;
break;
default:
break;
}
}
}
mutex_exit(&spa->spa_props_lock);
}
/*
* Perform one-time upgrade on-disk changes. spa_version() does not
* reflect the new version this txg, so there must be no changes this
* txg to anything that the upgrade code depends on after it executes.
* Therefore this must be called after dsl_pool_sync() does the sync
* tasks.
*/
static void
spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx)
{
if (spa_sync_pass(spa) != 1)
return;
dsl_pool_t *dp = spa->spa_dsl_pool;
rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN &&
spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) {
dsl_pool_create_origin(dp, tx);
/* Keeping the origin open increases spa_minref */
spa->spa_minref += 3;
}
if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES &&
spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) {
dsl_pool_upgrade_clones(dp, tx);
}
if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES &&
spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) {
dsl_pool_upgrade_dir_clones(dp, tx);
/* Keeping the freedir open increases spa_minref */
spa->spa_minref += 3;
}
if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES &&
spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
spa_feature_create_zap_objects(spa, tx);
}
/*
* LZ4_COMPRESS feature's behaviour was changed to activate_on_enable
* when possibility to use lz4 compression for metadata was added
* Old pools that have this feature enabled must be upgraded to have
* this feature active
*/
if (spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
boolean_t lz4_en = spa_feature_is_enabled(spa,
SPA_FEATURE_LZ4_COMPRESS);
boolean_t lz4_ac = spa_feature_is_active(spa,
SPA_FEATURE_LZ4_COMPRESS);
if (lz4_en && !lz4_ac)
spa_feature_incr(spa, SPA_FEATURE_LZ4_COMPRESS, tx);
}
/*
* If we haven't written the salt, do so now. Note that the
* feature may not be activated yet, but that's fine since
* the presence of this ZAP entry is backwards compatible.
*/
if (zap_contains(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_CHECKSUM_SALT) == ENOENT) {
VERIFY0(zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CHECKSUM_SALT, 1,
sizeof (spa->spa_cksum_salt.zcs_bytes),
spa->spa_cksum_salt.zcs_bytes, tx));
}
rrw_exit(&dp->dp_config_rwlock, FTAG);
}
static void
vdev_indirect_state_sync_verify(vdev_t *vd)
{
vdev_indirect_mapping_t *vim __maybe_unused = vd->vdev_indirect_mapping;
vdev_indirect_births_t *vib __maybe_unused = vd->vdev_indirect_births;
if (vd->vdev_ops == &vdev_indirect_ops) {
ASSERT(vim != NULL);
ASSERT(vib != NULL);
}
uint64_t obsolete_sm_object = 0;
ASSERT0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
if (obsolete_sm_object != 0) {
ASSERT(vd->vdev_obsolete_sm != NULL);
ASSERT(vd->vdev_removing ||
vd->vdev_ops == &vdev_indirect_ops);
ASSERT(vdev_indirect_mapping_num_entries(vim) > 0);
ASSERT(vdev_indirect_mapping_bytes_mapped(vim) > 0);
ASSERT3U(obsolete_sm_object, ==,
space_map_object(vd->vdev_obsolete_sm));
ASSERT3U(vdev_indirect_mapping_bytes_mapped(vim), >=,
space_map_allocated(vd->vdev_obsolete_sm));
}
ASSERT(vd->vdev_obsolete_segments != NULL);
/*
* Since frees / remaps to an indirect vdev can only
* happen in syncing context, the obsolete segments
* tree must be empty when we start syncing.
*/
ASSERT0(range_tree_space(vd->vdev_obsolete_segments));
}
/*
* Set the top-level vdev's max queue depth. Evaluate each top-level's
* async write queue depth in case it changed. The max queue depth will
* not change in the middle of syncing out this txg.
*/
static void
spa_sync_adjust_vdev_max_queue_depth(spa_t *spa)
{
ASSERT(spa_writeable(spa));
vdev_t *rvd = spa->spa_root_vdev;
uint32_t max_queue_depth = zfs_vdev_async_write_max_active *
zfs_vdev_queue_depth_pct / 100;
metaslab_class_t *normal = spa_normal_class(spa);
metaslab_class_t *special = spa_special_class(spa);
metaslab_class_t *dedup = spa_dedup_class(spa);
uint64_t slots_per_allocator = 0;
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg;
if (mg == NULL || !metaslab_group_initialized(mg))
continue;
metaslab_class_t *mc = mg->mg_class;
if (mc != normal && mc != special && mc != dedup)
continue;
/*
* It is safe to do a lock-free check here because only async
* allocations look at mg_max_alloc_queue_depth, and async
* allocations all happen from spa_sync().
*/
for (int i = 0; i < mg->mg_allocators; i++) {
ASSERT0(zfs_refcount_count(
&(mg->mg_allocator[i].mga_alloc_queue_depth)));
}
mg->mg_max_alloc_queue_depth = max_queue_depth;
for (int i = 0; i < mg->mg_allocators; i++) {
mg->mg_allocator[i].mga_cur_max_alloc_queue_depth =
zfs_vdev_def_queue_depth;
}
slots_per_allocator += zfs_vdev_def_queue_depth;
}
for (int i = 0; i < spa->spa_alloc_count; i++) {
ASSERT0(zfs_refcount_count(&normal->mc_allocator[i].
mca_alloc_slots));
ASSERT0(zfs_refcount_count(&special->mc_allocator[i].
mca_alloc_slots));
ASSERT0(zfs_refcount_count(&dedup->mc_allocator[i].
mca_alloc_slots));
normal->mc_allocator[i].mca_alloc_max_slots =
slots_per_allocator;
special->mc_allocator[i].mca_alloc_max_slots =
slots_per_allocator;
dedup->mc_allocator[i].mca_alloc_max_slots =
slots_per_allocator;
}
normal->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
special->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
dedup->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
}
static void
spa_sync_condense_indirect(spa_t *spa, dmu_tx_t *tx)
{
ASSERT(spa_writeable(spa));
vdev_t *rvd = spa->spa_root_vdev;
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
vdev_indirect_state_sync_verify(vd);
if (vdev_indirect_should_condense(vd)) {
spa_condense_indirect_start_sync(vd, tx);
break;
}
}
}
static void
spa_sync_iterate_to_convergence(spa_t *spa, dmu_tx_t *tx)
{
objset_t *mos = spa->spa_meta_objset;
dsl_pool_t *dp = spa->spa_dsl_pool;
uint64_t txg = tx->tx_txg;
bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK];
do {
int pass = ++spa->spa_sync_pass;
spa_sync_config_object(spa, tx);
spa_sync_aux_dev(spa, &spa->spa_spares, tx,
ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES);
spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE);
spa_errlog_sync(spa, txg);
dsl_pool_sync(dp, txg);
if (pass < zfs_sync_pass_deferred_free ||
spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) {
/*
* If the log space map feature is active we don't
* care about deferred frees and the deferred bpobj
* as the log space map should effectively have the
* same results (i.e. appending only to one object).
*/
spa_sync_frees(spa, free_bpl, tx);
} else {
/*
* We can not defer frees in pass 1, because
* we sync the deferred frees later in pass 1.
*/
ASSERT3U(pass, >, 1);
bplist_iterate(free_bpl, bpobj_enqueue_alloc_cb,
&spa->spa_deferred_bpobj, tx);
}
ddt_sync(spa, txg);
dsl_scan_sync(dp, tx);
svr_sync(spa, tx);
spa_sync_upgrades(spa, tx);
spa_flush_metaslabs(spa, tx);
vdev_t *vd = NULL;
while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, txg))
!= NULL)
vdev_sync(vd, txg);
/*
* Note: We need to check if the MOS is dirty because we could
* have marked the MOS dirty without updating the uberblock
* (e.g. if we have sync tasks but no dirty user data). We need
* to check the uberblock's rootbp because it is updated if we
* have synced out dirty data (though in this case the MOS will
* most likely also be dirty due to second order effects, we
* don't want to rely on that here).
*/
if (pass == 1 &&
spa->spa_uberblock.ub_rootbp.blk_birth < txg &&
!dmu_objset_is_dirty(mos, txg)) {
/*
* Nothing changed on the first pass, therefore this
* TXG is a no-op. Avoid syncing deferred frees, so
* that we can keep this TXG as a no-op.
*/
ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
ASSERT(txg_list_empty(&dp->dp_sync_tasks, txg));
ASSERT(txg_list_empty(&dp->dp_early_sync_tasks, txg));
break;
}
spa_sync_deferred_frees(spa, tx);
} while (dmu_objset_is_dirty(mos, txg));
}
/*
* Rewrite the vdev configuration (which includes the uberblock) to
* commit the transaction group.
*
* If there are no dirty vdevs, we sync the uberblock to a few random
* top-level vdevs that are known to be visible in the config cache
* (see spa_vdev_add() for a complete description). If there *are* dirty
* vdevs, sync the uberblock to all vdevs.
*/
static void
spa_sync_rewrite_vdev_config(spa_t *spa, dmu_tx_t *tx)
{
vdev_t *rvd = spa->spa_root_vdev;
uint64_t txg = tx->tx_txg;
for (;;) {
int error = 0;
/*
* We hold SCL_STATE to prevent vdev open/close/etc.
* while we're attempting to write the vdev labels.
*/
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
if (list_is_empty(&spa->spa_config_dirty_list)) {
vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL };
int svdcount = 0;
int children = rvd->vdev_children;
int c0 = random_in_range(children);
for (int c = 0; c < children; c++) {
vdev_t *vd =
rvd->vdev_child[(c0 + c) % children];
/* Stop when revisiting the first vdev */
if (c > 0 && svd[0] == vd)
break;
if (vd->vdev_ms_array == 0 ||
vd->vdev_islog ||
!vdev_is_concrete(vd))
continue;
svd[svdcount++] = vd;
if (svdcount == SPA_SYNC_MIN_VDEVS)
break;
}
error = vdev_config_sync(svd, svdcount, txg);
} else {
error = vdev_config_sync(rvd->vdev_child,
rvd->vdev_children, txg);
}
if (error == 0)
spa->spa_last_synced_guid = rvd->vdev_guid;
spa_config_exit(spa, SCL_STATE, FTAG);
if (error == 0)
break;
zio_suspend(spa, NULL, ZIO_SUSPEND_IOERR);
zio_resume_wait(spa);
}
}
/*
* Sync the specified transaction group. New blocks may be dirtied as
* part of the process, so we iterate until it converges.
*/
void
spa_sync(spa_t *spa, uint64_t txg)
{
vdev_t *vd = NULL;
VERIFY(spa_writeable(spa));
/*
* Wait for i/os issued in open context that need to complete
* before this txg syncs.
*/
(void) zio_wait(spa->spa_txg_zio[txg & TXG_MASK]);
spa->spa_txg_zio[txg & TXG_MASK] = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL);
/*
* Lock out configuration changes.
*/
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
spa->spa_syncing_txg = txg;
spa->spa_sync_pass = 0;
for (int i = 0; i < spa->spa_alloc_count; i++) {
mutex_enter(&spa->spa_allocs[i].spaa_lock);
VERIFY0(avl_numnodes(&spa->spa_allocs[i].spaa_tree));
mutex_exit(&spa->spa_allocs[i].spaa_lock);
}
/*
* If there are any pending vdev state changes, convert them
* into config changes that go out with this transaction group.
*/
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
while (list_head(&spa->spa_state_dirty_list) != NULL) {
/*
* We need the write lock here because, for aux vdevs,
* calling vdev_config_dirty() modifies sav_config.
* This is ugly and will become unnecessary when we
* eliminate the aux vdev wart by integrating all vdevs
* into the root vdev tree.
*/
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER);
while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
vdev_state_clean(vd);
vdev_config_dirty(vd);
}
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
}
spa_config_exit(spa, SCL_STATE, FTAG);
dsl_pool_t *dp = spa->spa_dsl_pool;
dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg);
spa->spa_sync_starttime = gethrtime();
taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq,
spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() +
NSEC_TO_TICK(spa->spa_deadman_synctime));
/*
* If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg,
* set spa_deflate if we have no raid-z vdevs.
*/
if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE &&
spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) {
vdev_t *rvd = spa->spa_root_vdev;
int i;
for (i = 0; i < rvd->vdev_children; i++) {
vd = rvd->vdev_child[i];
if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
break;
}
if (i == rvd->vdev_children) {
spa->spa_deflate = TRUE;
VERIFY0(zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
sizeof (uint64_t), 1, &spa->spa_deflate, tx));
}
}
spa_sync_adjust_vdev_max_queue_depth(spa);
spa_sync_condense_indirect(spa, tx);
spa_sync_iterate_to_convergence(spa, tx);
#ifdef ZFS_DEBUG
if (!list_is_empty(&spa->spa_config_dirty_list)) {
/*
* Make sure that the number of ZAPs for all the vdevs matches
* the number of ZAPs in the per-vdev ZAP list. This only gets
* called if the config is dirty; otherwise there may be
* outstanding AVZ operations that weren't completed in
* spa_sync_config_object.
*/
uint64_t all_vdev_zap_entry_count;
ASSERT0(zap_count(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, &all_vdev_zap_entry_count));
ASSERT3U(vdev_count_verify_zaps(spa->spa_root_vdev), ==,
all_vdev_zap_entry_count);
}
#endif
if (spa->spa_vdev_removal != NULL) {
ASSERT0(spa->spa_vdev_removal->svr_bytes_done[txg & TXG_MASK]);
}
spa_sync_rewrite_vdev_config(spa, tx);
dmu_tx_commit(tx);
taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
spa->spa_deadman_tqid = 0;
/*
* Clear the dirty config list.
*/
while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL)
vdev_config_clean(vd);
/*
* Now that the new config has synced transactionally,
* let it become visible to the config cache.
*/
if (spa->spa_config_syncing != NULL) {
spa_config_set(spa, spa->spa_config_syncing);
spa->spa_config_txg = txg;
spa->spa_config_syncing = NULL;
}
dsl_pool_sync_done(dp, txg);
for (int i = 0; i < spa->spa_alloc_count; i++) {
mutex_enter(&spa->spa_allocs[i].spaa_lock);
VERIFY0(avl_numnodes(&spa->spa_allocs[i].spaa_tree));
mutex_exit(&spa->spa_allocs[i].spaa_lock);
}
/*
* Update usable space statistics.
*/
while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))
!= NULL)
vdev_sync_done(vd, txg);
metaslab_class_evict_old(spa->spa_normal_class, txg);
metaslab_class_evict_old(spa->spa_log_class, txg);
spa_sync_close_syncing_log_sm(spa);
spa_update_dspace(spa);
/*
* It had better be the case that we didn't dirty anything
* since vdev_config_sync().
*/
ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
while (zfs_pause_spa_sync)
delay(1);
spa->spa_sync_pass = 0;
/*
* Update the last synced uberblock here. We want to do this at
* the end of spa_sync() so that consumers of spa_last_synced_txg()
* will be guaranteed that all the processing associated with
* that txg has been completed.
*/
spa->spa_ubsync = spa->spa_uberblock;
spa_config_exit(spa, SCL_CONFIG, FTAG);
spa_handle_ignored_writes(spa);
/*
* If any async tasks have been requested, kick them off.
*/
spa_async_dispatch(spa);
}
/*
* Sync all pools. We don't want to hold the namespace lock across these
* operations, so we take a reference on the spa_t and drop the lock during the
* sync.
*/
void
spa_sync_allpools(void)
{
spa_t *spa = NULL;
mutex_enter(&spa_namespace_lock);
while ((spa = spa_next(spa)) != NULL) {
if (spa_state(spa) != POOL_STATE_ACTIVE ||
!spa_writeable(spa) || spa_suspended(spa))
continue;
spa_open_ref(spa, FTAG);
mutex_exit(&spa_namespace_lock);
txg_wait_synced(spa_get_dsl(spa), 0);
mutex_enter(&spa_namespace_lock);
spa_close(spa, FTAG);
}
mutex_exit(&spa_namespace_lock);
}
/*
* ==========================================================================
* Miscellaneous routines
* ==========================================================================
*/
/*
* Remove all pools in the system.
*/
void
spa_evict_all(void)
{
spa_t *spa;
/*
* Remove all cached state. All pools should be closed now,
* so every spa in the AVL tree should be unreferenced.
*/
mutex_enter(&spa_namespace_lock);
while ((spa = spa_next(NULL)) != NULL) {
/*
* Stop async tasks. The async thread may need to detach
* a device that's been replaced, which requires grabbing
* spa_namespace_lock, so we must drop it here.
*/
spa_open_ref(spa, FTAG);
mutex_exit(&spa_namespace_lock);
spa_async_suspend(spa);
mutex_enter(&spa_namespace_lock);
spa_close(spa, FTAG);
if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
spa_unload(spa);
spa_deactivate(spa);
}
spa_remove(spa);
}
mutex_exit(&spa_namespace_lock);
}
vdev_t *
spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux)
{
vdev_t *vd;
int i;
if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL)
return (vd);
if (aux) {
for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
vd = spa->spa_l2cache.sav_vdevs[i];
if (vd->vdev_guid == guid)
return (vd);
}
for (i = 0; i < spa->spa_spares.sav_count; i++) {
vd = spa->spa_spares.sav_vdevs[i];
if (vd->vdev_guid == guid)
return (vd);
}
}
return (NULL);
}
void
spa_upgrade(spa_t *spa, uint64_t version)
{
ASSERT(spa_writeable(spa));
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
/*
* This should only be called for a non-faulted pool, and since a
* future version would result in an unopenable pool, this shouldn't be
* possible.
*/
ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version));
ASSERT3U(version, >=, spa->spa_uberblock.ub_version);
spa->spa_uberblock.ub_version = version;
vdev_config_dirty(spa->spa_root_vdev);
spa_config_exit(spa, SCL_ALL, FTAG);
txg_wait_synced(spa_get_dsl(spa), 0);
}
boolean_t
spa_has_spare(spa_t *spa, uint64_t guid)
{
int i;
uint64_t spareguid;
spa_aux_vdev_t *sav = &spa->spa_spares;
for (i = 0; i < sav->sav_count; i++)
if (sav->sav_vdevs[i]->vdev_guid == guid)
return (B_TRUE);
for (i = 0; i < sav->sav_npending; i++) {
if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID,
&spareguid) == 0 && spareguid == guid)
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Check if a pool has an active shared spare device.
* Note: reference count of an active spare is 2, as a spare and as a replace
*/
static boolean_t
spa_has_active_shared_spare(spa_t *spa)
{
int i, refcnt;
uint64_t pool;
spa_aux_vdev_t *sav = &spa->spa_spares;
for (i = 0; i < sav->sav_count; i++) {
if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool,
&refcnt) && pool != 0ULL && pool == spa_guid(spa) &&
refcnt > 2)
return (B_TRUE);
}
return (B_FALSE);
}
uint64_t
spa_total_metaslabs(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
uint64_t m = 0;
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
if (!vdev_is_concrete(vd))
continue;
m += vd->vdev_ms_count;
}
return (m);
}
/*
* Notify any waiting threads that some activity has switched from being in-
* progress to not-in-progress so that the thread can wake up and determine
* whether it is finished waiting.
*/
void
spa_notify_waiters(spa_t *spa)
{
/*
* Acquiring spa_activities_lock here prevents the cv_broadcast from
* happening between the waiting thread's check and cv_wait.
*/
mutex_enter(&spa->spa_activities_lock);
cv_broadcast(&spa->spa_activities_cv);
mutex_exit(&spa->spa_activities_lock);
}
/*
* Notify any waiting threads that the pool is exporting, and then block until
* they are finished using the spa_t.
*/
void
spa_wake_waiters(spa_t *spa)
{
mutex_enter(&spa->spa_activities_lock);
spa->spa_waiters_cancel = B_TRUE;
cv_broadcast(&spa->spa_activities_cv);
while (spa->spa_waiters != 0)
cv_wait(&spa->spa_waiters_cv, &spa->spa_activities_lock);
spa->spa_waiters_cancel = B_FALSE;
mutex_exit(&spa->spa_activities_lock);
}
/* Whether the vdev or any of its descendants are being initialized/trimmed. */
static boolean_t
spa_vdev_activity_in_progress_impl(vdev_t *vd, zpool_wait_activity_t activity)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_config_held(spa, SCL_CONFIG | SCL_STATE, RW_READER));
ASSERT(MUTEX_HELD(&spa->spa_activities_lock));
ASSERT(activity == ZPOOL_WAIT_INITIALIZE ||
activity == ZPOOL_WAIT_TRIM);
kmutex_t *lock = activity == ZPOOL_WAIT_INITIALIZE ?
&vd->vdev_initialize_lock : &vd->vdev_trim_lock;
mutex_exit(&spa->spa_activities_lock);
mutex_enter(lock);
mutex_enter(&spa->spa_activities_lock);
boolean_t in_progress = (activity == ZPOOL_WAIT_INITIALIZE) ?
(vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE) :
(vd->vdev_trim_state == VDEV_TRIM_ACTIVE);
mutex_exit(lock);
if (in_progress)
return (B_TRUE);
for (int i = 0; i < vd->vdev_children; i++) {
if (spa_vdev_activity_in_progress_impl(vd->vdev_child[i],
activity))
return (B_TRUE);
}
return (B_FALSE);
}
/*
* If use_guid is true, this checks whether the vdev specified by guid is
* being initialized/trimmed. Otherwise, it checks whether any vdev in the pool
* is being initialized/trimmed. The caller must hold the config lock and
* spa_activities_lock.
*/
static int
spa_vdev_activity_in_progress(spa_t *spa, boolean_t use_guid, uint64_t guid,
zpool_wait_activity_t activity, boolean_t *in_progress)
{
mutex_exit(&spa->spa_activities_lock);
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
mutex_enter(&spa->spa_activities_lock);
vdev_t *vd;
if (use_guid) {
vd = spa_lookup_by_guid(spa, guid, B_FALSE);
if (vd == NULL || !vd->vdev_ops->vdev_op_leaf) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (EINVAL);
}
} else {
vd = spa->spa_root_vdev;
}
*in_progress = spa_vdev_activity_in_progress_impl(vd, activity);
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (0);
}
/*
* Locking for waiting threads
* ---------------------------
*
* Waiting threads need a way to check whether a given activity is in progress,
* and then, if it is, wait for it to complete. Each activity will have some
* in-memory representation of the relevant on-disk state which can be used to
* determine whether or not the activity is in progress. The in-memory state and
* the locking used to protect it will be different for each activity, and may
* not be suitable for use with a cvar (e.g., some state is protected by the
* config lock). To allow waiting threads to wait without any races, another
* lock, spa_activities_lock, is used.
*
* When the state is checked, both the activity-specific lock (if there is one)
* and spa_activities_lock are held. In some cases, the activity-specific lock
* is acquired explicitly (e.g. the config lock). In others, the locking is
* internal to some check (e.g. bpobj_is_empty). After checking, the waiting
* thread releases the activity-specific lock and, if the activity is in
* progress, then cv_waits using spa_activities_lock.
*
* The waiting thread is woken when another thread, one completing some
* activity, updates the state of the activity and then calls
* spa_notify_waiters, which will cv_broadcast. This 'completing' thread only
* needs to hold its activity-specific lock when updating the state, and this
* lock can (but doesn't have to) be dropped before calling spa_notify_waiters.
*
* Because spa_notify_waiters acquires spa_activities_lock before broadcasting,
* and because it is held when the waiting thread checks the state of the
* activity, it can never be the case that the completing thread both updates
* the activity state and cv_broadcasts in between the waiting thread's check
* and cv_wait. Thus, a waiting thread can never miss a wakeup.
*
* In order to prevent deadlock, when the waiting thread does its check, in some
* cases it will temporarily drop spa_activities_lock in order to acquire the
* activity-specific lock. The order in which spa_activities_lock and the
* activity specific lock are acquired in the waiting thread is determined by
* the order in which they are acquired in the completing thread; if the
* completing thread calls spa_notify_waiters with the activity-specific lock
* held, then the waiting thread must also acquire the activity-specific lock
* first.
*/
static int
spa_activity_in_progress(spa_t *spa, zpool_wait_activity_t activity,
boolean_t use_tag, uint64_t tag, boolean_t *in_progress)
{
int error = 0;
ASSERT(MUTEX_HELD(&spa->spa_activities_lock));
switch (activity) {
case ZPOOL_WAIT_CKPT_DISCARD:
*in_progress =
(spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT) &&
zap_contains(spa_meta_objset(spa),
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ZPOOL_CHECKPOINT) ==
ENOENT);
break;
case ZPOOL_WAIT_FREE:
*in_progress = ((spa_version(spa) >= SPA_VERSION_DEADLISTS &&
!bpobj_is_empty(&spa->spa_dsl_pool->dp_free_bpobj)) ||
spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY) ||
spa_livelist_delete_check(spa));
break;
case ZPOOL_WAIT_INITIALIZE:
case ZPOOL_WAIT_TRIM:
error = spa_vdev_activity_in_progress(spa, use_tag, tag,
activity, in_progress);
break;
case ZPOOL_WAIT_REPLACE:
mutex_exit(&spa->spa_activities_lock);
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
mutex_enter(&spa->spa_activities_lock);
*in_progress = vdev_replace_in_progress(spa->spa_root_vdev);
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
break;
case ZPOOL_WAIT_REMOVE:
*in_progress = (spa->spa_removing_phys.sr_state ==
DSS_SCANNING);
break;
case ZPOOL_WAIT_RESILVER:
if ((*in_progress = vdev_rebuild_active(spa->spa_root_vdev)))
break;
/* fall through */
case ZPOOL_WAIT_SCRUB:
{
boolean_t scanning, paused, is_scrub;
dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
is_scrub = (scn->scn_phys.scn_func == POOL_SCAN_SCRUB);
scanning = (scn->scn_phys.scn_state == DSS_SCANNING);
paused = dsl_scan_is_paused_scrub(scn);
*in_progress = (scanning && !paused &&
is_scrub == (activity == ZPOOL_WAIT_SCRUB));
break;
}
default:
panic("unrecognized value for activity %d", activity);
}
return (error);
}
static int
spa_wait_common(const char *pool, zpool_wait_activity_t activity,
boolean_t use_tag, uint64_t tag, boolean_t *waited)
{
/*
* The tag is used to distinguish between instances of an activity.
* 'initialize' and 'trim' are the only activities that we use this for.
* The other activities can only have a single instance in progress in a
* pool at one time, making the tag unnecessary.
*
* There can be multiple devices being replaced at once, but since they
* all finish once resilvering finishes, we don't bother keeping track
* of them individually, we just wait for them all to finish.
*/
if (use_tag && activity != ZPOOL_WAIT_INITIALIZE &&
activity != ZPOOL_WAIT_TRIM)
return (EINVAL);
if (activity < 0 || activity >= ZPOOL_WAIT_NUM_ACTIVITIES)
return (EINVAL);
spa_t *spa;
int error = spa_open(pool, &spa, FTAG);
if (error != 0)
return (error);
/*
* Increment the spa's waiter count so that we can call spa_close and
* still ensure that the spa_t doesn't get freed before this thread is
* finished with it when the pool is exported. We want to call spa_close
* before we start waiting because otherwise the additional ref would
* prevent the pool from being exported or destroyed throughout the
* potentially long wait.
*/
mutex_enter(&spa->spa_activities_lock);
spa->spa_waiters++;
spa_close(spa, FTAG);
*waited = B_FALSE;
for (;;) {
boolean_t in_progress;
error = spa_activity_in_progress(spa, activity, use_tag, tag,
&in_progress);
if (error || !in_progress || spa->spa_waiters_cancel)
break;
*waited = B_TRUE;
if (cv_wait_sig(&spa->spa_activities_cv,
&spa->spa_activities_lock) == 0) {
error = EINTR;
break;
}
}
spa->spa_waiters--;
cv_signal(&spa->spa_waiters_cv);
mutex_exit(&spa->spa_activities_lock);
return (error);
}
/*
* Wait for a particular instance of the specified activity to complete, where
* the instance is identified by 'tag'
*/
int
spa_wait_tag(const char *pool, zpool_wait_activity_t activity, uint64_t tag,
boolean_t *waited)
{
return (spa_wait_common(pool, activity, B_TRUE, tag, waited));
}
/*
* Wait for all instances of the specified activity complete
*/
int
spa_wait(const char *pool, zpool_wait_activity_t activity, boolean_t *waited)
{
return (spa_wait_common(pool, activity, B_FALSE, 0, waited));
}
sysevent_t *
spa_event_create(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name)
{
sysevent_t *ev = NULL;
#ifdef _KERNEL
nvlist_t *resource;
resource = zfs_event_create(spa, vd, FM_SYSEVENT_CLASS, name, hist_nvl);
if (resource) {
ev = kmem_alloc(sizeof (sysevent_t), KM_SLEEP);
ev->resource = resource;
}
#endif
return (ev);
}
void
spa_event_post(sysevent_t *ev)
{
#ifdef _KERNEL
if (ev) {
zfs_zevent_post(ev->resource, NULL, zfs_zevent_post_cb);
kmem_free(ev, sizeof (*ev));
}
#endif
}
/*
* Post a zevent corresponding to the given sysevent. The 'name' must be one
* of the event definitions in sys/sysevent/eventdefs.h. The payload will be
* filled in from the spa and (optionally) the vdev. This doesn't do anything
* in the userland libzpool, as we don't want consumers to misinterpret ztest
* or zdb as real changes.
*/
void
spa_event_notify(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name)
{
spa_event_post(spa_event_create(spa, vd, hist_nvl, name));
}
/* state manipulation functions */
EXPORT_SYMBOL(spa_open);
EXPORT_SYMBOL(spa_open_rewind);
EXPORT_SYMBOL(spa_get_stats);
EXPORT_SYMBOL(spa_create);
EXPORT_SYMBOL(spa_import);
EXPORT_SYMBOL(spa_tryimport);
EXPORT_SYMBOL(spa_destroy);
EXPORT_SYMBOL(spa_export);
EXPORT_SYMBOL(spa_reset);
EXPORT_SYMBOL(spa_async_request);
EXPORT_SYMBOL(spa_async_suspend);
EXPORT_SYMBOL(spa_async_resume);
EXPORT_SYMBOL(spa_inject_addref);
EXPORT_SYMBOL(spa_inject_delref);
EXPORT_SYMBOL(spa_scan_stat_init);
EXPORT_SYMBOL(spa_scan_get_stats);
/* device manipulation */
EXPORT_SYMBOL(spa_vdev_add);
EXPORT_SYMBOL(spa_vdev_attach);
EXPORT_SYMBOL(spa_vdev_detach);
EXPORT_SYMBOL(spa_vdev_setpath);
EXPORT_SYMBOL(spa_vdev_setfru);
EXPORT_SYMBOL(spa_vdev_split_mirror);
/* spare statech is global across all pools) */
EXPORT_SYMBOL(spa_spare_add);
EXPORT_SYMBOL(spa_spare_remove);
EXPORT_SYMBOL(spa_spare_exists);
EXPORT_SYMBOL(spa_spare_activate);
/* L2ARC statech is global across all pools) */
EXPORT_SYMBOL(spa_l2cache_add);
EXPORT_SYMBOL(spa_l2cache_remove);
EXPORT_SYMBOL(spa_l2cache_exists);
EXPORT_SYMBOL(spa_l2cache_activate);
EXPORT_SYMBOL(spa_l2cache_drop);
/* scanning */
EXPORT_SYMBOL(spa_scan);
EXPORT_SYMBOL(spa_scan_stop);
/* spa syncing */
EXPORT_SYMBOL(spa_sync); /* only for DMU use */
EXPORT_SYMBOL(spa_sync_allpools);
/* properties */
EXPORT_SYMBOL(spa_prop_set);
EXPORT_SYMBOL(spa_prop_get);
EXPORT_SYMBOL(spa_prop_clear_bootfs);
/* asynchronous event notification */
EXPORT_SYMBOL(spa_event_notify);
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_shift, INT, ZMOD_RW,
"log2 fraction of arc that can be used by inflight I/Os when "
"verifying pool during import");
ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_metadata, INT, ZMOD_RW,
"Set to traverse metadata on pool import");
ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_data, INT, ZMOD_RW,
"Set to traverse data on pool import");
ZFS_MODULE_PARAM(zfs_spa, spa_, load_print_vdev_tree, INT, ZMOD_RW,
"Print vdev tree to zfs_dbgmsg during pool import");
ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_batch_pct, UINT, ZMOD_RD,
"Percentage of CPUs to run an IO worker thread");
ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_batch_tpq, UINT, ZMOD_RD,
"Number of threads per IO worker taskqueue");
ZFS_MODULE_PARAM(zfs, zfs_, max_missing_tvds, ULONG, ZMOD_RW,
"Allow importing pool with up to this number of missing top-level "
"vdevs (in read-only mode)");
ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, zthr_pause, INT, ZMOD_RW,
"Set the livelist condense zthr to pause");
ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, sync_pause, INT, ZMOD_RW,
"Set the livelist condense synctask to pause");
ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, sync_cancel, INT, ZMOD_RW,
"Whether livelist condensing was canceled in the synctask");
ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, zthr_cancel, INT, ZMOD_RW,
"Whether livelist condensing was canceled in the zthr function");
ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, new_alloc, INT, ZMOD_RW,
"Whether extra ALLOC blkptrs were added to a livelist entry while it "
"was being condensed");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/spa_log_spacemap.c b/sys/contrib/openzfs/module/zfs/spa_log_spacemap.c
index f4c2910ad7fe..6fd302b8df34 100644
--- a/sys/contrib/openzfs/module/zfs/spa_log_spacemap.c
+++ b/sys/contrib/openzfs/module/zfs/spa_log_spacemap.c
@@ -1,1322 +1,1322 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2018, 2019 by Delphix. All rights reserved.
*/
#include <sys/dmu_objset.h>
#include <sys/metaslab.h>
#include <sys/metaslab_impl.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/spa_log_spacemap.h>
#include <sys/vdev_impl.h>
#include <sys/zap.h>
/*
* Log Space Maps
*
* Log space maps are an optimization in ZFS metadata allocations for pools
* whose workloads are primarily random-writes. Random-write workloads are also
* typically random-free, meaning that they are freeing from locations scattered
* throughout the pool. This means that each TXG we will have to append some
* FREE records to almost every metaslab. With log space maps, we hold their
* changes in memory and log them altogether in one pool-wide space map on-disk
* for persistence. As more blocks are accumulated in the log space maps and
* more unflushed changes are accounted in memory, we flush a selected group
* of metaslabs every TXG to relieve memory pressure and potential overheads
* when loading the pool. Flushing a metaslab to disk relieves memory as we
* flush any unflushed changes from memory to disk (i.e. the metaslab's space
* map) and saves import time by making old log space maps obsolete and
* eventually destroying them. [A log space map is said to be obsolete when all
* its entries have made it to their corresponding metaslab space maps].
*
* == On disk data structures used ==
*
* - The pool has a new feature flag and a new entry in the MOS. The feature
* is activated when we create the first log space map and remains active
* for the lifetime of the pool. The new entry in the MOS Directory [refer
* to DMU_POOL_LOG_SPACEMAP_ZAP] is populated with a ZAP whose key-value
* pairs are of the form <key: txg, value: log space map object for that txg>.
* This entry is our on-disk reference of the log space maps that exist in
* the pool for each TXG and it is used during import to load all the
* metaslab unflushed changes in memory. To see how this structure is first
* created and later populated refer to spa_generate_syncing_log_sm(). To see
* how it is used during import time refer to spa_ld_log_sm_metadata().
*
* - Each vdev has a new entry in its vdev_top_zap (see field
* VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS) which holds the msp_unflushed_txg of
* each metaslab in this vdev. This field is the on-disk counterpart of the
* in-memory field ms_unflushed_txg which tells us from which TXG and onwards
* the metaslab haven't had its changes flushed. During import, we use this
* to ignore any entries in the space map log that are for this metaslab but
* from a TXG before msp_unflushed_txg. At that point, we also populate its
* in-memory counterpart and from there both fields are updated every time
* we flush that metaslab.
*
* - A space map is created every TXG and, during that TXG, it is used to log
* all incoming changes (the log space map). When created, the log space map
* is referenced in memory by spa_syncing_log_sm and its object ID is inserted
* to the space map ZAP mentioned above. The log space map is closed at the
* end of the TXG and will be destroyed when it becomes fully obsolete. We
* know when a log space map has become obsolete by looking at the oldest
* (and smallest) ms_unflushed_txg in the pool. If the value of that is bigger
* than the log space map's TXG, then it means that there is no metaslab who
* doesn't have the changes from that log and we can therefore destroy it.
* [see spa_cleanup_old_sm_logs()].
*
* == Important in-memory structures ==
*
* - The per-spa field spa_metaslabs_by_flushed sorts all the metaslabs in
* the pool by their ms_unflushed_txg field. It is primarily used for three
* reasons. First of all, it is used during flushing where we try to flush
* metaslabs in-order from the oldest-flushed to the most recently flushed
* every TXG. Secondly, it helps us to lookup the ms_unflushed_txg of the
* oldest flushed metaslab to distinguish which log space maps have become
* obsolete and which ones are still relevant. Finally it tells us which
* metaslabs have unflushed changes in a pool where this feature was just
* enabled, as we don't immediately add all of the pool's metaslabs but we
* add them over time as they go through metaslab_sync(). The reason that
* we do that is to ease these pools into the behavior of the flushing
* algorithm (described later on).
*
* - The per-spa field spa_sm_logs_by_txg can be thought as the in-memory
* counterpart of the space map ZAP mentioned above. It's an AVL tree whose
* nodes represent the log space maps in the pool. This in-memory
* representation of log space maps in the pool sorts the log space maps by
* the TXG that they were created (which is also the TXG of their unflushed
* changes). It also contains the following extra information for each
* space map:
* [1] The number of metaslabs that were last flushed on that TXG. This is
* important because if that counter is zero and this is the oldest
* log then it means that it is also obsolete.
* [2] The number of blocks of that space map. This field is used by the
* block heuristic of our flushing algorithm (described later on).
* It represents how many blocks of metadata changes ZFS had to write
* to disk for that TXG.
*
* - The per-spa field spa_log_summary is a list of entries that summarizes
* the metaslab and block counts of all the nodes of the spa_sm_logs_by_txg
* AVL tree mentioned above. The reason this exists is that our flushing
* algorithm (described later) tries to estimate how many metaslabs to flush
* in each TXG by iterating over all the log space maps and looking at their
* block counts. Summarizing that information means that don't have to
* iterate through each space map, minimizing the runtime overhead of the
* flushing algorithm which would be induced in syncing context. In terms of
* implementation the log summary is used as a queue:
* * we modify or pop entries from its head when we flush metaslabs
* * we modify or append entries to its tail when we sync changes.
*
* - Each metaslab has two new range trees that hold its unflushed changes,
* ms_unflushed_allocs and ms_unflushed_frees. These are always disjoint.
*
* == Flushing algorithm ==
*
* The decision of how many metaslabs to flush on a give TXG is guided by
* two heuristics:
*
* [1] The memory heuristic -
* We keep track of the memory used by the unflushed trees from all the
* metaslabs [see sus_memused of spa_unflushed_stats] and we ensure that it
* stays below a certain threshold which is determined by an arbitrary hard
* limit and an arbitrary percentage of the system's memory [see
* spa_log_exceeds_memlimit()]. When we see that the memory usage of the
* unflushed changes are passing that threshold, we flush metaslabs, which
* empties their unflushed range trees, reducing the memory used.
*
* [2] The block heuristic -
* We try to keep the total number of blocks in the log space maps in check
* so the log doesn't grow indefinitely and we don't induce a lot of overhead
* when loading the pool. At the same time we don't want to flush a lot of
* metaslabs too often as this would defeat the purpose of the log space map.
* As a result we set a limit in the amount of blocks that we think it's
* acceptable for the log space maps to have and try not to cross it.
* [see sus_blocklimit from spa_unflushed_stats].
*
* In order to stay below the block limit every TXG we have to estimate how
* many metaslabs we need to flush based on the current rate of incoming blocks
* and our history of log space map blocks. The main idea here is to answer
* the question of how many metaslabs do we need to flush in order to get rid
* at least an X amount of log space map blocks. We can answer this question
* by iterating backwards from the oldest log space map to the newest one
* and looking at their metaslab and block counts. At this point the log summary
* mentioned above comes handy as it reduces the amount of things that we have
* to iterate (even though it may reduce the preciseness of our estimates due
* to its aggregation of data). So with that in mind, we project the incoming
* rate of the current TXG into the future and attempt to approximate how many
* metaslabs would we need to flush from now in order to avoid exceeding our
* block limit in different points in the future (granted that we would keep
* flushing the same number of metaslabs for every TXG). Then we take the
* maximum number from all these estimates to be on the safe side. For the
* exact implementation details of algorithm refer to
* spa_estimate_metaslabs_to_flush.
*/
/*
* This is used as the block size for the space maps used for the
* log space map feature. These space maps benefit from a bigger
* block size as we expect to be writing a lot of data to them at
* once.
*/
unsigned long zfs_log_sm_blksz = 1ULL << 17;
/*
* Percentage of the overall system's memory that ZFS allows to be
* used for unflushed changes (e.g. the sum of size of all the nodes
* in the unflushed trees).
*
* Note that this value is calculated over 1000000 for finer granularity
* (thus the _ppm suffix; reads as "parts per million"). As an example,
* the default of 1000 allows 0.1% of memory to be used.
*/
unsigned long zfs_unflushed_max_mem_ppm = 1000;
/*
* Specific hard-limit in memory that ZFS allows to be used for
* unflushed changes.
*/
unsigned long zfs_unflushed_max_mem_amt = 1ULL << 30;
/*
* The following tunable determines the number of blocks that can be used for
* the log space maps. It is expressed as a percentage of the total number of
* metaslabs in the pool (i.e. the default of 400 means that the number of log
* blocks is capped at 4 times the number of metaslabs).
*
* This value exists to tune our flushing algorithm, with higher values
* flushing metaslabs less often (doing less I/Os) per TXG versus lower values
* flushing metaslabs more aggressively with the upside of saving overheads
* when loading the pool. Another factor in this tradeoff is that flushing
* less often can potentially lead to better utilization of the metaslab space
* map's block size as we accumulate more changes per flush.
*
* Given that this tunable indirectly controls the flush rate (metaslabs
* flushed per txg) and that's why making it a percentage in terms of the
* number of metaslabs in the pool makes sense here.
*
* As a rule of thumb we default this tunable to 400% based on the following:
*
* 1] Assuming a constant flush rate and a constant incoming rate of log blocks
* it is reasonable to expect that the amount of obsolete entries changes
* linearly from txg to txg (e.g. the oldest log should have the most
* obsolete entries, and the most recent one the least). With this we could
* say that, at any given time, about half of the entries in the whole space
* map log are obsolete. Thus for every two entries for a metaslab in the
* log space map, only one of them is valid and actually makes it to the
* metaslab's space map.
* [factor of 2]
* 2] Each entry in the log space map is guaranteed to be two words while
* entries in metaslab space maps are generally single-word.
* [an extra factor of 2 - 400% overall]
* 3] Even if [1] and [2] are slightly less than 2 each, we haven't taken into
* account any consolidation of segments from the log space map to the
* unflushed range trees nor their history (e.g. a segment being allocated,
* then freed, then allocated again means 3 log space map entries but 0
* metaslab space map entries). Depending on the workload, we've seen ~1.8
* non-obsolete log space map entries per metaslab entry, for a total of
* ~600%. Since most of these estimates though are workload dependent, we
* default on 400% to be conservative.
*
* Thus we could say that even in the worst
* case of [1] and [2], the factor should end up being 4.
*
* That said, regardless of the number of metaslabs in the pool we need to
* provide upper and lower bounds for the log block limit.
* [see zfs_unflushed_log_block_{min,max}]
*/
unsigned long zfs_unflushed_log_block_pct = 400;
/*
* If the number of metaslabs is small and our incoming rate is high, we could
* get into a situation that we are flushing all our metaslabs every TXG. Thus
* we always allow at least this many log blocks.
*/
unsigned long zfs_unflushed_log_block_min = 1000;
/*
* If the log becomes too big, the import time of the pool can take a hit in
* terms of performance. Thus we have a hard limit in the size of the log in
* terms of blocks.
*/
unsigned long zfs_unflushed_log_block_max = (1ULL << 18);
/*
* Max # of rows allowed for the log_summary. The tradeoff here is accuracy and
* stability of the flushing algorithm (longer summary) vs its runtime overhead
* (smaller summary is faster to traverse).
*/
unsigned long zfs_max_logsm_summary_length = 10;
/*
* Tunable that sets the lower bound on the metaslabs to flush every TXG.
*
* Setting this to 0 has no effect since if the pool is idle we won't even be
* creating log space maps and therefore we won't be flushing. On the other
* hand if the pool has any incoming workload our block heuristic will start
* flushing metaslabs anyway.
*
* The point of this tunable is to be used in extreme cases where we really
* want to flush more metaslabs than our adaptable heuristic plans to flush.
*/
unsigned long zfs_min_metaslabs_to_flush = 1;
/*
* Tunable that specifies how far in the past do we want to look when trying to
* estimate the incoming log blocks for the current TXG.
*
* Setting this too high may not only increase runtime but also minimize the
* effect of the incoming rates from the most recent TXGs as we take the
* average over all the blocks that we walk
* [see spa_estimate_incoming_log_blocks].
*/
unsigned long zfs_max_log_walking = 5;
/*
* This tunable exists solely for testing purposes. It ensures that the log
* spacemaps are not flushed and destroyed during export in order for the
* relevant log spacemap import code paths to be tested (effectively simulating
* a crash).
*/
int zfs_keep_log_spacemaps_at_export = 0;
static uint64_t
spa_estimate_incoming_log_blocks(spa_t *spa)
{
ASSERT3U(spa_sync_pass(spa), ==, 1);
uint64_t steps = 0, sum = 0;
for (spa_log_sm_t *sls = avl_last(&spa->spa_sm_logs_by_txg);
sls != NULL && steps < zfs_max_log_walking;
sls = AVL_PREV(&spa->spa_sm_logs_by_txg, sls)) {
if (sls->sls_txg == spa_syncing_txg(spa)) {
/*
* skip the log created in this TXG as this would
* make our estimations inaccurate.
*/
continue;
}
sum += sls->sls_nblocks;
steps++;
}
return ((steps > 0) ? DIV_ROUND_UP(sum, steps) : 0);
}
uint64_t
spa_log_sm_blocklimit(spa_t *spa)
{
return (spa->spa_unflushed_stats.sus_blocklimit);
}
void
spa_log_sm_set_blocklimit(spa_t *spa)
{
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) {
ASSERT0(spa_log_sm_blocklimit(spa));
return;
}
uint64_t calculated_limit =
(spa_total_metaslabs(spa) * zfs_unflushed_log_block_pct) / 100;
spa->spa_unflushed_stats.sus_blocklimit = MIN(MAX(calculated_limit,
zfs_unflushed_log_block_min), zfs_unflushed_log_block_max);
}
uint64_t
spa_log_sm_nblocks(spa_t *spa)
{
return (spa->spa_unflushed_stats.sus_nblocks);
}
/*
* Ensure that the in-memory log space map structures and the summary
* have the same block and metaslab counts.
*/
static void
spa_log_summary_verify_counts(spa_t *spa)
{
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
if ((zfs_flags & ZFS_DEBUG_LOG_SPACEMAP) == 0)
return;
uint64_t ms_in_avl = avl_numnodes(&spa->spa_metaslabs_by_flushed);
uint64_t ms_in_summary = 0, blk_in_summary = 0;
for (log_summary_entry_t *e = list_head(&spa->spa_log_summary);
e; e = list_next(&spa->spa_log_summary, e)) {
ms_in_summary += e->lse_mscount;
blk_in_summary += e->lse_blkcount;
}
uint64_t ms_in_logs = 0, blk_in_logs = 0;
for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls)) {
ms_in_logs += sls->sls_mscount;
blk_in_logs += sls->sls_nblocks;
}
VERIFY3U(ms_in_logs, ==, ms_in_summary);
VERIFY3U(ms_in_logs, ==, ms_in_avl);
VERIFY3U(blk_in_logs, ==, blk_in_summary);
VERIFY3U(blk_in_logs, ==, spa_log_sm_nblocks(spa));
}
static boolean_t
summary_entry_is_full(spa_t *spa, log_summary_entry_t *e)
{
uint64_t blocks_per_row = MAX(1,
DIV_ROUND_UP(spa_log_sm_blocklimit(spa),
zfs_max_logsm_summary_length));
return (blocks_per_row <= e->lse_blkcount);
}
/*
* Update the log summary information to reflect the fact that a metaslab
* was flushed or destroyed (e.g due to device removal or pool export/destroy).
*
* We typically flush the oldest flushed metaslab so the first (and oldest)
* entry of the summary is updated. However if that metaslab is getting loaded
* we may flush the second oldest one which may be part of an entry later in
* the summary. Moreover, if we call into this function from metaslab_fini()
* the metaslabs probably won't be ordered by ms_unflushed_txg. Thus we ask
* for a txg as an argument so we can locate the appropriate summary entry for
* the metaslab.
*/
void
spa_log_summary_decrement_mscount(spa_t *spa, uint64_t txg)
{
/*
* We don't track summary data for read-only pools and this function
* can be called from metaslab_fini(). In that case return immediately.
*/
if (!spa_writeable(spa))
return;
log_summary_entry_t *target = NULL;
for (log_summary_entry_t *e = list_head(&spa->spa_log_summary);
e != NULL; e = list_next(&spa->spa_log_summary, e)) {
if (e->lse_start > txg)
break;
target = e;
}
if (target == NULL || target->lse_mscount == 0) {
/*
* We didn't find a summary entry for this metaslab. We must be
* at the teardown of a spa_load() attempt that got an error
* while reading the log space maps.
*/
VERIFY3S(spa_load_state(spa), ==, SPA_LOAD_ERROR);
return;
}
target->lse_mscount--;
}
/*
* Update the log summary information to reflect the fact that we destroyed
* old log space maps. Since we can only destroy the oldest log space maps,
* we decrement the block count of the oldest summary entry and potentially
* destroy it when that count hits 0.
*
* This function is called after a metaslab is flushed and typically that
* metaslab is the oldest flushed, which means that this function will
* typically decrement the block count of the first entry of the summary and
* potentially free it if the block count gets to zero (its metaslab count
* should be zero too at that point).
*
* There are certain scenarios though that don't work exactly like that so we
* need to account for them:
*
* Scenario [1]: It is possible that after we flushed the oldest flushed
* metaslab and we destroyed the oldest log space map, more recent logs had 0
* metaslabs pointing to them so we got rid of them too. This can happen due
* to metaslabs being destroyed through device removal, or because the oldest
* flushed metaslab was loading but we kept flushing more recently flushed
* metaslabs due to the memory pressure of unflushed changes. Because of that,
* we always iterate from the beginning of the summary and if blocks_gone is
* bigger than the block_count of the current entry we free that entry (we
* expect its metaslab count to be zero), we decrement blocks_gone and on to
* the next entry repeating this procedure until blocks_gone gets decremented
* to 0. Doing this also works for the typical case mentioned above.
*
* Scenario [2]: The oldest flushed metaslab isn't necessarily accounted by
* the first (and oldest) entry in the summary. If the first few entries of
* the summary were only accounting metaslabs from a device that was just
* removed, then the current oldest flushed metaslab could be accounted by an
* entry somewhere in the middle of the summary. Moreover flushing that
* metaslab will destroy all the log space maps older than its ms_unflushed_txg
* because they became obsolete after the removal. Thus, iterating as we did
* for scenario [1] works out for this case too.
*
* Scenario [3]: At times we decide to flush all the metaslabs in the pool
* in one TXG (either because we are exporting the pool or because our flushing
* heuristics decided to do so). When that happens all the log space maps get
* destroyed except the one created for the current TXG which doesn't have
* any log blocks yet. As log space maps get destroyed with every metaslab that
* we flush, entries in the summary are also destroyed. This brings a weird
* corner-case when we flush the last metaslab and the log space map of the
* current TXG is in the same summary entry with other log space maps that
* are older. When that happens we are eventually left with this one last
* summary entry whose blocks are gone (blocks_gone equals the entry's block
* count) but its metaslab count is non-zero (because it accounts all the
* metaslabs in the pool as they all got flushed). Under this scenario we can't
* free this last summary entry as it's referencing all the metaslabs in the
* pool and its block count will get incremented at the end of this sync (when
* we close the syncing log space map). Thus we just decrement its current
* block count and leave it alone. In the case that the pool gets exported,
* its metaslab count will be decremented over time as we call metaslab_fini()
* for all the metaslabs in the pool and the entry will be freed at
* spa_unload_log_sm_metadata().
*/
void
spa_log_summary_decrement_blkcount(spa_t *spa, uint64_t blocks_gone)
{
for (log_summary_entry_t *e = list_head(&spa->spa_log_summary);
e != NULL; e = list_head(&spa->spa_log_summary)) {
if (e->lse_blkcount > blocks_gone) {
/*
* Assert that we stopped at an entry that is not
* obsolete.
*/
ASSERT(e->lse_mscount != 0);
e->lse_blkcount -= blocks_gone;
blocks_gone = 0;
break;
} else if (e->lse_mscount == 0) {
/* remove obsolete entry */
blocks_gone -= e->lse_blkcount;
list_remove(&spa->spa_log_summary, e);
kmem_free(e, sizeof (log_summary_entry_t));
} else {
/* Verify that this is scenario [3] mentioned above. */
VERIFY3U(blocks_gone, ==, e->lse_blkcount);
/*
* Assert that this is scenario [3] further by ensuring
* that this is the only entry in the summary.
*/
VERIFY3P(e, ==, list_tail(&spa->spa_log_summary));
ASSERT3P(e, ==, list_head(&spa->spa_log_summary));
blocks_gone = e->lse_blkcount = 0;
break;
}
}
/*
* Ensure that there is no way we are trying to remove more blocks
* than the # of blocks in the summary.
*/
ASSERT0(blocks_gone);
}
void
spa_log_sm_decrement_mscount(spa_t *spa, uint64_t txg)
{
spa_log_sm_t target = { .sls_txg = txg };
spa_log_sm_t *sls = avl_find(&spa->spa_sm_logs_by_txg,
&target, NULL);
if (sls == NULL) {
/*
* We must be at the teardown of a spa_load() attempt that
* got an error while reading the log space maps.
*/
VERIFY3S(spa_load_state(spa), ==, SPA_LOAD_ERROR);
return;
}
ASSERT(sls->sls_mscount > 0);
sls->sls_mscount--;
}
void
spa_log_sm_increment_current_mscount(spa_t *spa)
{
spa_log_sm_t *last_sls = avl_last(&spa->spa_sm_logs_by_txg);
ASSERT3U(last_sls->sls_txg, ==, spa_syncing_txg(spa));
last_sls->sls_mscount++;
}
static void
summary_add_data(spa_t *spa, uint64_t txg, uint64_t metaslabs_flushed,
uint64_t nblocks)
{
log_summary_entry_t *e = list_tail(&spa->spa_log_summary);
if (e == NULL || summary_entry_is_full(spa, e)) {
e = kmem_zalloc(sizeof (log_summary_entry_t), KM_SLEEP);
e->lse_start = txg;
list_insert_tail(&spa->spa_log_summary, e);
}
ASSERT3U(e->lse_start, <=, txg);
e->lse_mscount += metaslabs_flushed;
e->lse_blkcount += nblocks;
}
static void
spa_log_summary_add_incoming_blocks(spa_t *spa, uint64_t nblocks)
{
summary_add_data(spa, spa_syncing_txg(spa), 0, nblocks);
}
void
spa_log_summary_add_flushed_metaslab(spa_t *spa)
{
summary_add_data(spa, spa_syncing_txg(spa), 1, 0);
}
/*
* This function attempts to estimate how many metaslabs should
* we flush to satisfy our block heuristic for the log spacemap
* for the upcoming TXGs.
*
* Specifically, it first tries to estimate the number of incoming
* blocks in this TXG. Then by projecting that incoming rate to
* future TXGs and using the log summary, it figures out how many
* flushes we would need to do for future TXGs individually to
* stay below our block limit and returns the maximum number of
* flushes from those estimates.
*/
static uint64_t
spa_estimate_metaslabs_to_flush(spa_t *spa)
{
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
ASSERT3U(spa_sync_pass(spa), ==, 1);
ASSERT(spa_log_sm_blocklimit(spa) != 0);
/*
* This variable contains the incoming rate that will be projected
* and used for our flushing estimates in the future.
*/
uint64_t incoming = spa_estimate_incoming_log_blocks(spa);
/*
* At any point in time this variable tells us how many
* TXGs in the future we are so we can make our estimations.
*/
uint64_t txgs_in_future = 1;
/*
* This variable tells us how much room do we have until we hit
* our limit. When it goes negative, it means that we've exceeded
* our limit and we need to flush.
*
* Note that since we start at the first TXG in the future (i.e.
* txgs_in_future starts from 1) we already decrement this
* variable by the incoming rate.
*/
int64_t available_blocks =
spa_log_sm_blocklimit(spa) - spa_log_sm_nblocks(spa) - incoming;
/*
* This variable tells us the total number of flushes needed to
* keep the log size within the limit when we reach txgs_in_future.
*/
uint64_t total_flushes = 0;
/* Holds the current maximum of our estimates so far. */
uint64_t max_flushes_pertxg =
MIN(avl_numnodes(&spa->spa_metaslabs_by_flushed),
zfs_min_metaslabs_to_flush);
/*
* For our estimations we only look as far in the future
* as the summary allows us.
*/
for (log_summary_entry_t *e = list_head(&spa->spa_log_summary);
e; e = list_next(&spa->spa_log_summary, e)) {
/*
* If there is still room before we exceed our limit
* then keep skipping TXGs accumulating more blocks
* based on the incoming rate until we exceed it.
*/
if (available_blocks >= 0) {
uint64_t skip_txgs = (available_blocks / incoming) + 1;
available_blocks -= (skip_txgs * incoming);
txgs_in_future += skip_txgs;
ASSERT3S(available_blocks, >=, -incoming);
}
/*
* At this point we're far enough into the future where
* the limit was just exceeded and we flush metaslabs
* based on the current entry in the summary, updating
* our available_blocks.
*/
ASSERT3S(available_blocks, <, 0);
available_blocks += e->lse_blkcount;
total_flushes += e->lse_mscount;
/*
* Keep the running maximum of the total_flushes that
* we've done so far over the number of TXGs in the
* future that we are. The idea here is to estimate
* the average number of flushes that we should do
* every TXG so that when we are that many TXGs in the
* future we stay under the limit.
*/
max_flushes_pertxg = MAX(max_flushes_pertxg,
DIV_ROUND_UP(total_flushes, txgs_in_future));
ASSERT3U(avl_numnodes(&spa->spa_metaslabs_by_flushed), >=,
max_flushes_pertxg);
}
return (max_flushes_pertxg);
}
uint64_t
spa_log_sm_memused(spa_t *spa)
{
return (spa->spa_unflushed_stats.sus_memused);
}
static boolean_t
spa_log_exceeds_memlimit(spa_t *spa)
{
if (spa_log_sm_memused(spa) > zfs_unflushed_max_mem_amt)
return (B_TRUE);
uint64_t system_mem_allowed = ((physmem * PAGESIZE) *
zfs_unflushed_max_mem_ppm) / 1000000;
if (spa_log_sm_memused(spa) > system_mem_allowed)
return (B_TRUE);
return (B_FALSE);
}
boolean_t
spa_flush_all_logs_requested(spa_t *spa)
{
return (spa->spa_log_flushall_txg != 0);
}
void
spa_flush_metaslabs(spa_t *spa, dmu_tx_t *tx)
{
uint64_t txg = dmu_tx_get_txg(tx);
if (spa_sync_pass(spa) != 1)
return;
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
return;
/*
* If we don't have any metaslabs with unflushed changes
* return immediately.
*/
if (avl_numnodes(&spa->spa_metaslabs_by_flushed) == 0)
return;
/*
* During SPA export we leave a few empty TXGs to go by [see
* spa_final_dirty_txg() to understand why]. For this specific
* case, it is important to not flush any metaslabs as that
* would dirty this TXG.
*
* That said, during one of these dirty TXGs that is less or
* equal to spa_final_dirty(), spa_unload() will request that
* we try to flush all the metaslabs for that TXG before
* exporting the pool, thus we ensure that we didn't get a
* request of flushing everything before we attempt to return
* immediately.
*/
if (spa->spa_uberblock.ub_rootbp.blk_birth < txg &&
!dmu_objset_is_dirty(spa_meta_objset(spa), txg) &&
!spa_flush_all_logs_requested(spa))
return;
/*
* We need to generate a log space map before flushing because this
* will set up the in-memory data (i.e. node in spa_sm_logs_by_txg)
* for this TXG's flushed metaslab count (aka sls_mscount which is
* manipulated in many ways down the metaslab_flush() codepath).
*
* That is not to say that we may generate a log space map when we
* don't need it. If we are flushing metaslabs, that means that we
* were going to write changes to disk anyway, so even if we were
* not flushing, a log space map would have been created anyway in
* metaslab_sync().
*/
spa_generate_syncing_log_sm(spa, tx);
/*
* This variable tells us how many metaslabs we want to flush based
* on the block-heuristic of our flushing algorithm (see block comment
* of log space map feature). We also decrement this as we flush
* metaslabs and attempt to destroy old log space maps.
*/
uint64_t want_to_flush;
if (spa_flush_all_logs_requested(spa)) {
ASSERT3S(spa_state(spa), ==, POOL_STATE_EXPORTED);
want_to_flush = avl_numnodes(&spa->spa_metaslabs_by_flushed);
} else {
want_to_flush = spa_estimate_metaslabs_to_flush(spa);
}
ASSERT3U(avl_numnodes(&spa->spa_metaslabs_by_flushed), >=,
want_to_flush);
/* Used purely for verification purposes */
uint64_t visited = 0;
/*
* Ideally we would only iterate through spa_metaslabs_by_flushed
* using only one variable (curr). We can't do that because
* metaslab_flush() mutates position of curr in the AVL when
* it flushes that metaslab by moving it to the end of the tree.
* Thus we always keep track of the original next node of the
* current node (curr) in another variable (next).
*/
metaslab_t *next = NULL;
for (metaslab_t *curr = avl_first(&spa->spa_metaslabs_by_flushed);
curr != NULL; curr = next) {
next = AVL_NEXT(&spa->spa_metaslabs_by_flushed, curr);
/*
* If this metaslab has been flushed this txg then we've done
* a full circle over the metaslabs.
*/
if (metaslab_unflushed_txg(curr) == txg)
break;
/*
* If we are done flushing for the block heuristic and the
* unflushed changes don't exceed the memory limit just stop.
*/
if (want_to_flush == 0 && !spa_log_exceeds_memlimit(spa))
break;
mutex_enter(&curr->ms_sync_lock);
mutex_enter(&curr->ms_lock);
boolean_t flushed = metaslab_flush(curr, tx);
mutex_exit(&curr->ms_lock);
mutex_exit(&curr->ms_sync_lock);
/*
* If we failed to flush a metaslab (because it was loading),
* then we are done with the block heuristic as it's not
* possible to destroy any log space maps once you've skipped
* a metaslab. In that case we just set our counter to 0 but
* we continue looping in case there is still memory pressure
* due to unflushed changes. Note that, flushing a metaslab
* that is not the oldest flushed in the pool, will never
* destroy any log space maps [see spa_cleanup_old_sm_logs()].
*/
if (!flushed) {
want_to_flush = 0;
} else if (want_to_flush > 0) {
want_to_flush--;
}
visited++;
}
ASSERT3U(avl_numnodes(&spa->spa_metaslabs_by_flushed), >=, visited);
}
/*
* Close the log space map for this TXG and update the block counts
* for the log's in-memory structure and the summary.
*/
void
spa_sync_close_syncing_log_sm(spa_t *spa)
{
if (spa_syncing_log_sm(spa) == NULL)
return;
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
spa_log_sm_t *sls = avl_last(&spa->spa_sm_logs_by_txg);
ASSERT3U(sls->sls_txg, ==, spa_syncing_txg(spa));
sls->sls_nblocks = space_map_nblocks(spa_syncing_log_sm(spa));
spa->spa_unflushed_stats.sus_nblocks += sls->sls_nblocks;
/*
* Note that we can't assert that sls_mscount is not 0,
* because there is the case where the first metaslab
* in spa_metaslabs_by_flushed is loading and we were
* not able to flush any metaslabs the current TXG.
*/
ASSERT(sls->sls_nblocks != 0);
spa_log_summary_add_incoming_blocks(spa, sls->sls_nblocks);
spa_log_summary_verify_counts(spa);
space_map_close(spa->spa_syncing_log_sm);
spa->spa_syncing_log_sm = NULL;
/*
* At this point we tried to flush as many metaslabs as we
* can as the pool is getting exported. Reset the "flush all"
* so the last few TXGs before closing the pool can be empty
* (e.g. not dirty).
*/
if (spa_flush_all_logs_requested(spa)) {
ASSERT3S(spa_state(spa), ==, POOL_STATE_EXPORTED);
spa->spa_log_flushall_txg = 0;
}
}
void
spa_cleanup_old_sm_logs(spa_t *spa, dmu_tx_t *tx)
{
objset_t *mos = spa_meta_objset(spa);
uint64_t spacemap_zap;
int error = zap_lookup(mos, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_LOG_SPACEMAP_ZAP, sizeof (spacemap_zap), 1, &spacemap_zap);
if (error == ENOENT) {
ASSERT(avl_is_empty(&spa->spa_sm_logs_by_txg));
return;
}
VERIFY0(error);
metaslab_t *oldest = avl_first(&spa->spa_metaslabs_by_flushed);
uint64_t oldest_flushed_txg = metaslab_unflushed_txg(oldest);
/* Free all log space maps older than the oldest_flushed_txg. */
for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
sls && sls->sls_txg < oldest_flushed_txg;
sls = avl_first(&spa->spa_sm_logs_by_txg)) {
ASSERT0(sls->sls_mscount);
avl_remove(&spa->spa_sm_logs_by_txg, sls);
space_map_free_obj(mos, sls->sls_sm_obj, tx);
VERIFY0(zap_remove_int(mos, spacemap_zap, sls->sls_txg, tx));
spa->spa_unflushed_stats.sus_nblocks -= sls->sls_nblocks;
kmem_free(sls, sizeof (spa_log_sm_t));
}
}
static spa_log_sm_t *
spa_log_sm_alloc(uint64_t sm_obj, uint64_t txg)
{
spa_log_sm_t *sls = kmem_zalloc(sizeof (*sls), KM_SLEEP);
sls->sls_sm_obj = sm_obj;
sls->sls_txg = txg;
return (sls);
}
void
spa_generate_syncing_log_sm(spa_t *spa, dmu_tx_t *tx)
{
uint64_t txg = dmu_tx_get_txg(tx);
objset_t *mos = spa_meta_objset(spa);
if (spa_syncing_log_sm(spa) != NULL)
return;
if (!spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP))
return;
uint64_t spacemap_zap;
int error = zap_lookup(mos, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_LOG_SPACEMAP_ZAP, sizeof (spacemap_zap), 1, &spacemap_zap);
if (error == ENOENT) {
ASSERT(avl_is_empty(&spa->spa_sm_logs_by_txg));
error = 0;
spacemap_zap = zap_create(mos,
DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0, tx);
VERIFY0(zap_add(mos, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_LOG_SPACEMAP_ZAP, sizeof (spacemap_zap), 1,
&spacemap_zap, tx));
spa_feature_incr(spa, SPA_FEATURE_LOG_SPACEMAP, tx);
}
VERIFY0(error);
uint64_t sm_obj;
ASSERT3U(zap_lookup_int_key(mos, spacemap_zap, txg, &sm_obj),
==, ENOENT);
sm_obj = space_map_alloc(mos, zfs_log_sm_blksz, tx);
VERIFY0(zap_add_int_key(mos, spacemap_zap, txg, sm_obj, tx));
avl_add(&spa->spa_sm_logs_by_txg, spa_log_sm_alloc(sm_obj, txg));
/*
* We pass UINT64_MAX as the space map's representation size
* and SPA_MINBLOCKSHIFT as the shift, to make the space map
* accept any sorts of segments since there's no real advantage
* to being more restrictive (given that we're already going
* to be using 2-word entries).
*/
VERIFY0(space_map_open(&spa->spa_syncing_log_sm, mos, sm_obj,
0, UINT64_MAX, SPA_MINBLOCKSHIFT));
/*
* If the log space map feature was just enabled, the blocklimit
* has not yet been set.
*/
if (spa_log_sm_blocklimit(spa) == 0)
spa_log_sm_set_blocklimit(spa);
}
/*
* Find all the log space maps stored in the space map ZAP and sort
* them by their TXG in spa_sm_logs_by_txg.
*/
static int
spa_ld_log_sm_metadata(spa_t *spa)
{
int error;
uint64_t spacemap_zap;
ASSERT(avl_is_empty(&spa->spa_sm_logs_by_txg));
error = zap_lookup(spa_meta_objset(spa), DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_LOG_SPACEMAP_ZAP, sizeof (spacemap_zap), 1, &spacemap_zap);
if (error == ENOENT) {
/* the space map ZAP doesn't exist yet */
return (0);
} else if (error != 0) {
spa_load_failed(spa, "spa_ld_log_sm_metadata(): failed at "
"zap_lookup(DMU_POOL_DIRECTORY_OBJECT) [error %d]",
error);
return (error);
}
zap_cursor_t zc;
zap_attribute_t za;
for (zap_cursor_init(&zc, spa_meta_objset(spa), spacemap_zap);
(error = zap_cursor_retrieve(&zc, &za)) == 0;
zap_cursor_advance(&zc)) {
uint64_t log_txg = zfs_strtonum(za.za_name, NULL);
spa_log_sm_t *sls =
spa_log_sm_alloc(za.za_first_integer, log_txg);
avl_add(&spa->spa_sm_logs_by_txg, sls);
}
zap_cursor_fini(&zc);
if (error != ENOENT) {
spa_load_failed(spa, "spa_ld_log_sm_metadata(): failed at "
"zap_cursor_retrieve(spacemap_zap) [error %d]",
error);
return (error);
}
for (metaslab_t *m = avl_first(&spa->spa_metaslabs_by_flushed);
m; m = AVL_NEXT(&spa->spa_metaslabs_by_flushed, m)) {
spa_log_sm_t target = { .sls_txg = metaslab_unflushed_txg(m) };
spa_log_sm_t *sls = avl_find(&spa->spa_sm_logs_by_txg,
&target, NULL);
/*
* At this point if sls is zero it means that a bug occurred
* in ZFS the last time the pool was open or earlier in the
* import code path. In general, we would have placed a
* VERIFY() here or in this case just let the kernel panic
* with NULL pointer dereference when incrementing sls_mscount,
* but since this is the import code path we can be a bit more
* lenient. Thus, for DEBUG bits we always cause a panic, while
* in production we log the error and just fail the import.
*/
ASSERT(sls != NULL);
if (sls == NULL) {
spa_load_failed(spa, "spa_ld_log_sm_metadata(): bug "
"encountered: could not find log spacemap for "
- "TXG %ld [error %d]",
- metaslab_unflushed_txg(m), ENOENT);
+ "TXG %llu [error %d]",
+ (u_longlong_t)metaslab_unflushed_txg(m), ENOENT);
return (ENOENT);
}
sls->sls_mscount++;
}
return (0);
}
typedef struct spa_ld_log_sm_arg {
spa_t *slls_spa;
uint64_t slls_txg;
} spa_ld_log_sm_arg_t;
static int
spa_ld_log_sm_cb(space_map_entry_t *sme, void *arg)
{
uint64_t offset = sme->sme_offset;
uint64_t size = sme->sme_run;
uint32_t vdev_id = sme->sme_vdev;
spa_ld_log_sm_arg_t *slls = arg;
spa_t *spa = slls->slls_spa;
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
/*
* If the vdev has been removed (i.e. it is indirect or a hole)
* skip this entry. The contents of this vdev have already moved
* elsewhere.
*/
if (!vdev_is_concrete(vd))
return (0);
metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
ASSERT(!ms->ms_loaded);
/*
* If we have already flushed entries for this TXG to this
* metaslab's space map, then ignore it. Note that we flush
* before processing any allocations/frees for that TXG, so
* the metaslab's space map only has entries from *before*
* the unflushed TXG.
*/
if (slls->slls_txg < metaslab_unflushed_txg(ms))
return (0);
switch (sme->sme_type) {
case SM_ALLOC:
range_tree_remove_xor_add_segment(offset, offset + size,
ms->ms_unflushed_frees, ms->ms_unflushed_allocs);
break;
case SM_FREE:
range_tree_remove_xor_add_segment(offset, offset + size,
ms->ms_unflushed_allocs, ms->ms_unflushed_frees);
break;
default:
panic("invalid maptype_t");
break;
}
return (0);
}
static int
spa_ld_log_sm_data(spa_t *spa)
{
int error = 0;
/*
* If we are not going to do any writes there is no need
* to read the log space maps.
*/
if (!spa_writeable(spa))
return (0);
ASSERT0(spa->spa_unflushed_stats.sus_nblocks);
ASSERT0(spa->spa_unflushed_stats.sus_memused);
hrtime_t read_logs_starttime = gethrtime();
/* this is a no-op when we don't have space map logs */
for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls)) {
space_map_t *sm = NULL;
error = space_map_open(&sm, spa_meta_objset(spa),
sls->sls_sm_obj, 0, UINT64_MAX, SPA_MINBLOCKSHIFT);
if (error != 0) {
spa_load_failed(spa, "spa_ld_log_sm_data(): failed at "
"space_map_open(obj=%llu) [error %d]",
(u_longlong_t)sls->sls_sm_obj, error);
goto out;
}
struct spa_ld_log_sm_arg vla = {
.slls_spa = spa,
.slls_txg = sls->sls_txg
};
error = space_map_iterate(sm, space_map_length(sm),
spa_ld_log_sm_cb, &vla);
if (error != 0) {
space_map_close(sm);
spa_load_failed(spa, "spa_ld_log_sm_data(): failed "
"at space_map_iterate(obj=%llu) [error %d]",
(u_longlong_t)sls->sls_sm_obj, error);
goto out;
}
ASSERT0(sls->sls_nblocks);
sls->sls_nblocks = space_map_nblocks(sm);
spa->spa_unflushed_stats.sus_nblocks += sls->sls_nblocks;
summary_add_data(spa, sls->sls_txg,
sls->sls_mscount, sls->sls_nblocks);
space_map_close(sm);
}
hrtime_t read_logs_endtime = gethrtime();
spa_load_note(spa,
"read %llu log space maps (%llu total blocks - blksz = %llu bytes) "
"in %lld ms", (u_longlong_t)avl_numnodes(&spa->spa_sm_logs_by_txg),
(u_longlong_t)spa_log_sm_nblocks(spa),
(u_longlong_t)zfs_log_sm_blksz,
(longlong_t)((read_logs_endtime - read_logs_starttime) / 1000000));
out:
/*
* Now that the metaslabs contain their unflushed changes:
* [1] recalculate their actual allocated space
* [2] recalculate their weights
* [3] sum up the memory usage of their unflushed range trees
* [4] optionally load them, if debug_load is set
*
* Note that even in the case where we get here because of an
* error (e.g. error != 0), we still want to update the fields
* below in order to have a proper teardown in spa_unload().
*/
for (metaslab_t *m = avl_first(&spa->spa_metaslabs_by_flushed);
m != NULL; m = AVL_NEXT(&spa->spa_metaslabs_by_flushed, m)) {
mutex_enter(&m->ms_lock);
m->ms_allocated_space = space_map_allocated(m->ms_sm) +
range_tree_space(m->ms_unflushed_allocs) -
range_tree_space(m->ms_unflushed_frees);
vdev_t *vd = m->ms_group->mg_vd;
metaslab_space_update(vd, m->ms_group->mg_class,
range_tree_space(m->ms_unflushed_allocs), 0, 0);
metaslab_space_update(vd, m->ms_group->mg_class,
-range_tree_space(m->ms_unflushed_frees), 0, 0);
ASSERT0(m->ms_weight & METASLAB_ACTIVE_MASK);
metaslab_recalculate_weight_and_sort(m);
spa->spa_unflushed_stats.sus_memused +=
metaslab_unflushed_changes_memused(m);
if (metaslab_debug_load && m->ms_sm != NULL) {
VERIFY0(metaslab_load(m));
metaslab_set_selected_txg(m, 0);
}
mutex_exit(&m->ms_lock);
}
return (error);
}
static int
spa_ld_unflushed_txgs(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa_meta_objset(spa);
if (vd->vdev_top_zap == 0)
return (0);
uint64_t object = 0;
int error = zap_lookup(mos, vd->vdev_top_zap,
VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS,
sizeof (uint64_t), 1, &object);
if (error == ENOENT)
return (0);
else if (error != 0) {
spa_load_failed(spa, "spa_ld_unflushed_txgs(): failed at "
"zap_lookup(vdev_top_zap=%llu) [error %d]",
(u_longlong_t)vd->vdev_top_zap, error);
return (error);
}
for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *ms = vd->vdev_ms[m];
ASSERT(ms != NULL);
metaslab_unflushed_phys_t entry;
uint64_t entry_size = sizeof (entry);
uint64_t entry_offset = ms->ms_id * entry_size;
error = dmu_read(mos, object,
entry_offset, entry_size, &entry, 0);
if (error != 0) {
spa_load_failed(spa, "spa_ld_unflushed_txgs(): "
"failed at dmu_read(obj=%llu) [error %d]",
(u_longlong_t)object, error);
return (error);
}
ms->ms_unflushed_txg = entry.msp_unflushed_txg;
if (ms->ms_unflushed_txg != 0) {
mutex_enter(&spa->spa_flushed_ms_lock);
avl_add(&spa->spa_metaslabs_by_flushed, ms);
mutex_exit(&spa->spa_flushed_ms_lock);
}
}
return (0);
}
/*
* Read all the log space map entries into their respective
* metaslab unflushed trees and keep them sorted by TXG in the
* SPA's metadata. In addition, setup all the metadata for the
* memory and the block heuristics.
*/
int
spa_ld_log_spacemaps(spa_t *spa)
{
int error;
spa_log_sm_set_blocklimit(spa);
for (uint64_t c = 0; c < spa->spa_root_vdev->vdev_children; c++) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[c];
error = spa_ld_unflushed_txgs(vd);
if (error != 0)
return (error);
}
error = spa_ld_log_sm_metadata(spa);
if (error != 0)
return (error);
/*
* Note: we don't actually expect anything to change at this point
* but we grab the config lock so we don't fail any assertions
* when using vdev_lookup_top().
*/
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
error = spa_ld_log_sm_data(spa);
spa_config_exit(spa, SCL_CONFIG, FTAG);
return (error);
}
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs, zfs_, unflushed_max_mem_amt, ULONG, ZMOD_RW,
"Specific hard-limit in memory that ZFS allows to be used for "
"unflushed changes");
ZFS_MODULE_PARAM(zfs, zfs_, unflushed_max_mem_ppm, ULONG, ZMOD_RW,
"Percentage of the overall system memory that ZFS allows to be "
"used for unflushed changes (value is calculated over 1000000 for "
"finer granularity)");
ZFS_MODULE_PARAM(zfs, zfs_, unflushed_log_block_max, ULONG, ZMOD_RW,
"Hard limit (upper-bound) in the size of the space map log "
"in terms of blocks.");
ZFS_MODULE_PARAM(zfs, zfs_, unflushed_log_block_min, ULONG, ZMOD_RW,
"Lower-bound limit for the maximum amount of blocks allowed in "
"log spacemap (see zfs_unflushed_log_block_max)");
ZFS_MODULE_PARAM(zfs, zfs_, unflushed_log_block_pct, ULONG, ZMOD_RW,
"Tunable used to determine the number of blocks that can be used for "
"the spacemap log, expressed as a percentage of the total number of "
"metaslabs in the pool (e.g. 400 means the number of log blocks is "
"capped at 4 times the number of metaslabs)");
ZFS_MODULE_PARAM(zfs, zfs_, max_log_walking, ULONG, ZMOD_RW,
"The number of past TXGs that the flushing algorithm of the log "
"spacemap feature uses to estimate incoming log blocks");
ZFS_MODULE_PARAM(zfs, zfs_, max_logsm_summary_length, ULONG, ZMOD_RW,
"Maximum number of rows allowed in the summary of the spacemap log");
ZFS_MODULE_PARAM(zfs, zfs_, min_metaslabs_to_flush, ULONG, ZMOD_RW,
"Minimum number of metaslabs to flush per dirty TXG");
ZFS_MODULE_PARAM(zfs, zfs_, keep_log_spacemaps_at_export, INT, ZMOD_RW,
"Prevent the log spacemaps from being flushed and destroyed "
"during pool export/destroy");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/spa_misc.c b/sys/contrib/openzfs/module/zfs/spa_misc.c
index 58039f3d103c..1ecd2294dba0 100644
--- a/sys/contrib/openzfs/module/zfs/spa_misc.c
+++ b/sys/contrib/openzfs/module/zfs/spa_misc.c
@@ -1,2958 +1,2963 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2019 by Delphix. All rights reserved.
* Copyright 2015 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright 2013 Saso Kiselkov. All rights reserved.
* Copyright (c) 2017 Datto Inc.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/spa_impl.h>
#include <sys/zio.h>
#include <sys/zio_checksum.h>
#include <sys/zio_compress.h>
#include <sys/dmu.h>
#include <sys/dmu_tx.h>
#include <sys/zap.h>
#include <sys/zil.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_initialize.h>
#include <sys/vdev_trim.h>
#include <sys/vdev_file.h>
#include <sys/vdev_raidz.h>
#include <sys/metaslab.h>
#include <sys/uberblock_impl.h>
#include <sys/txg.h>
#include <sys/avl.h>
#include <sys/unique.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_prop.h>
#include <sys/fm/util.h>
#include <sys/dsl_scan.h>
#include <sys/fs/zfs.h>
#include <sys/metaslab_impl.h>
#include <sys/arc.h>
#include <sys/ddt.h>
#include <sys/kstat.h>
#include "zfs_prop.h"
#include <sys/btree.h>
#include <sys/zfeature.h>
#include <sys/qat.h>
#include <sys/zstd/zstd.h>
/*
* SPA locking
*
* There are three basic locks for managing spa_t structures:
*
* spa_namespace_lock (global mutex)
*
* This lock must be acquired to do any of the following:
*
* - Lookup a spa_t by name
* - Add or remove a spa_t from the namespace
* - Increase spa_refcount from non-zero
* - Check if spa_refcount is zero
* - Rename a spa_t
* - add/remove/attach/detach devices
* - Held for the duration of create/destroy/import/export
*
* It does not need to handle recursion. A create or destroy may
* reference objects (files or zvols) in other pools, but by
* definition they must have an existing reference, and will never need
* to lookup a spa_t by name.
*
* spa_refcount (per-spa zfs_refcount_t protected by mutex)
*
* This reference count keep track of any active users of the spa_t. The
* spa_t cannot be destroyed or freed while this is non-zero. Internally,
* the refcount is never really 'zero' - opening a pool implicitly keeps
* some references in the DMU. Internally we check against spa_minref, but
* present the image of a zero/non-zero value to consumers.
*
* spa_config_lock[] (per-spa array of rwlocks)
*
* This protects the spa_t from config changes, and must be held in
* the following circumstances:
*
* - RW_READER to perform I/O to the spa
* - RW_WRITER to change the vdev config
*
* The locking order is fairly straightforward:
*
* spa_namespace_lock -> spa_refcount
*
* The namespace lock must be acquired to increase the refcount from 0
* or to check if it is zero.
*
* spa_refcount -> spa_config_lock[]
*
* There must be at least one valid reference on the spa_t to acquire
* the config lock.
*
* spa_namespace_lock -> spa_config_lock[]
*
* The namespace lock must always be taken before the config lock.
*
*
* The spa_namespace_lock can be acquired directly and is globally visible.
*
* The namespace is manipulated using the following functions, all of which
* require the spa_namespace_lock to be held.
*
* spa_lookup() Lookup a spa_t by name.
*
* spa_add() Create a new spa_t in the namespace.
*
* spa_remove() Remove a spa_t from the namespace. This also
* frees up any memory associated with the spa_t.
*
* spa_next() Returns the next spa_t in the system, or the
* first if NULL is passed.
*
* spa_evict_all() Shutdown and remove all spa_t structures in
* the system.
*
* spa_guid_exists() Determine whether a pool/device guid exists.
*
* The spa_refcount is manipulated using the following functions:
*
* spa_open_ref() Adds a reference to the given spa_t. Must be
* called with spa_namespace_lock held if the
* refcount is currently zero.
*
* spa_close() Remove a reference from the spa_t. This will
* not free the spa_t or remove it from the
* namespace. No locking is required.
*
* spa_refcount_zero() Returns true if the refcount is currently
* zero. Must be called with spa_namespace_lock
* held.
*
* The spa_config_lock[] is an array of rwlocks, ordered as follows:
* SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV.
* spa_config_lock[] is manipulated with spa_config_{enter,exit,held}().
*
* To read the configuration, it suffices to hold one of these locks as reader.
* To modify the configuration, you must hold all locks as writer. To modify
* vdev state without altering the vdev tree's topology (e.g. online/offline),
* you must hold SCL_STATE and SCL_ZIO as writer.
*
* We use these distinct config locks to avoid recursive lock entry.
* For example, spa_sync() (which holds SCL_CONFIG as reader) induces
* block allocations (SCL_ALLOC), which may require reading space maps
* from disk (dmu_read() -> zio_read() -> SCL_ZIO).
*
* The spa config locks cannot be normal rwlocks because we need the
* ability to hand off ownership. For example, SCL_ZIO is acquired
* by the issuing thread and later released by an interrupt thread.
* They do, however, obey the usual write-wanted semantics to prevent
* writer (i.e. system administrator) starvation.
*
* The lock acquisition rules are as follows:
*
* SCL_CONFIG
* Protects changes to the vdev tree topology, such as vdev
* add/remove/attach/detach. Protects the dirty config list
* (spa_config_dirty_list) and the set of spares and l2arc devices.
*
* SCL_STATE
* Protects changes to pool state and vdev state, such as vdev
* online/offline/fault/degrade/clear. Protects the dirty state list
* (spa_state_dirty_list) and global pool state (spa_state).
*
* SCL_ALLOC
* Protects changes to metaslab groups and classes.
* Held as reader by metaslab_alloc() and metaslab_claim().
*
* SCL_ZIO
* Held by bp-level zios (those which have no io_vd upon entry)
* to prevent changes to the vdev tree. The bp-level zio implicitly
* protects all of its vdev child zios, which do not hold SCL_ZIO.
*
* SCL_FREE
* Protects changes to metaslab groups and classes.
* Held as reader by metaslab_free(). SCL_FREE is distinct from
* SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free
* blocks in zio_done() while another i/o that holds either
* SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete.
*
* SCL_VDEV
* Held as reader to prevent changes to the vdev tree during trivial
* inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the
* other locks, and lower than all of them, to ensure that it's safe
* to acquire regardless of caller context.
*
* In addition, the following rules apply:
*
* (a) spa_props_lock protects pool properties, spa_config and spa_config_list.
* The lock ordering is SCL_CONFIG > spa_props_lock.
*
* (b) I/O operations on leaf vdevs. For any zio operation that takes
* an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(),
* or zio_write_phys() -- the caller must ensure that the config cannot
* cannot change in the interim, and that the vdev cannot be reopened.
* SCL_STATE as reader suffices for both.
*
* The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit().
*
* spa_vdev_enter() Acquire the namespace lock and the config lock
* for writing.
*
* spa_vdev_exit() Release the config lock, wait for all I/O
* to complete, sync the updated configs to the
* cache, and release the namespace lock.
*
* vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit().
* Like spa_vdev_enter/exit, these are convenience wrappers -- the actual
* locking is, always, based on spa_namespace_lock and spa_config_lock[].
*/
static avl_tree_t spa_namespace_avl;
kmutex_t spa_namespace_lock;
static kcondvar_t spa_namespace_cv;
int spa_max_replication_override = SPA_DVAS_PER_BP;
static kmutex_t spa_spare_lock;
static avl_tree_t spa_spare_avl;
static kmutex_t spa_l2cache_lock;
static avl_tree_t spa_l2cache_avl;
kmem_cache_t *spa_buffer_pool;
spa_mode_t spa_mode_global = SPA_MODE_UNINIT;
#ifdef ZFS_DEBUG
/*
* Everything except dprintf, set_error, spa, and indirect_remap is on
* by default in debug builds.
*/
int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SET_ERROR |
ZFS_DEBUG_INDIRECT_REMAP);
#else
int zfs_flags = 0;
#endif
/*
* zfs_recover can be set to nonzero to attempt to recover from
* otherwise-fatal errors, typically caused by on-disk corruption. When
* set, calls to zfs_panic_recover() will turn into warning messages.
* This should only be used as a last resort, as it typically results
* in leaked space, or worse.
*/
int zfs_recover = B_FALSE;
/*
* If destroy encounters an EIO while reading metadata (e.g. indirect
* blocks), space referenced by the missing metadata can not be freed.
* Normally this causes the background destroy to become "stalled", as
* it is unable to make forward progress. While in this stalled state,
* all remaining space to free from the error-encountering filesystem is
* "temporarily leaked". Set this flag to cause it to ignore the EIO,
* permanently leak the space from indirect blocks that can not be read,
* and continue to free everything else that it can.
*
* The default, "stalling" behavior is useful if the storage partially
* fails (i.e. some but not all i/os fail), and then later recovers. In
* this case, we will be able to continue pool operations while it is
* partially failed, and when it recovers, we can continue to free the
* space, with no leaks. However, note that this case is actually
* fairly rare.
*
* Typically pools either (a) fail completely (but perhaps temporarily,
* e.g. a top-level vdev going offline), or (b) have localized,
* permanent errors (e.g. disk returns the wrong data due to bit flip or
* firmware bug). In case (a), this setting does not matter because the
* pool will be suspended and the sync thread will not be able to make
* forward progress regardless. In case (b), because the error is
* permanent, the best we can do is leak the minimum amount of space,
* which is what setting this flag will do. Therefore, it is reasonable
* for this flag to normally be set, but we chose the more conservative
* approach of not setting it, so that there is no possibility of
* leaking space in the "partial temporary" failure case.
*/
int zfs_free_leak_on_eio = B_FALSE;
/*
* Expiration time in milliseconds. This value has two meanings. First it is
* used to determine when the spa_deadman() logic should fire. By default the
* spa_deadman() will fire if spa_sync() has not completed in 600 seconds.
* Secondly, the value determines if an I/O is considered "hung". Any I/O that
* has not completed in zfs_deadman_synctime_ms is considered "hung" resulting
* in one of three behaviors controlled by zfs_deadman_failmode.
*/
unsigned long zfs_deadman_synctime_ms = 600000UL;
/*
* This value controls the maximum amount of time zio_wait() will block for an
* outstanding IO. By default this is 300 seconds at which point the "hung"
* behavior will be applied as described for zfs_deadman_synctime_ms.
*/
unsigned long zfs_deadman_ziotime_ms = 300000UL;
/*
* Check time in milliseconds. This defines the frequency at which we check
* for hung I/O.
*/
unsigned long zfs_deadman_checktime_ms = 60000UL;
/*
* By default the deadman is enabled.
*/
int zfs_deadman_enabled = 1;
/*
* Controls the behavior of the deadman when it detects a "hung" I/O.
* Valid values are zfs_deadman_failmode=<wait|continue|panic>.
*
* wait - Wait for the "hung" I/O (default)
* continue - Attempt to recover from a "hung" I/O
* panic - Panic the system
*/
char *zfs_deadman_failmode = "wait";
/*
* The worst case is single-sector max-parity RAID-Z blocks, in which
* case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1)
* times the size; so just assume that. Add to this the fact that
* we can have up to 3 DVAs per bp, and one more factor of 2 because
* the block may be dittoed with up to 3 DVAs by ddt_sync(). All together,
* the worst case is:
* (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24
*/
int spa_asize_inflation = 24;
/*
* Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in
* the pool to be consumed (bounded by spa_max_slop). This ensures that we
* don't run the pool completely out of space, due to unaccounted changes (e.g.
* to the MOS). It also limits the worst-case time to allocate space. If we
* have less than this amount of free space, most ZPL operations (e.g. write,
* create) will return ENOSPC. The ZIL metaslabs (spa_embedded_log_class) are
* also part of this 3.2% of space which can't be consumed by normal writes;
* the slop space "proper" (spa_get_slop_space()) is decreased by the embedded
* log space.
*
* Certain operations (e.g. file removal, most administrative actions) can
* use half the slop space. They will only return ENOSPC if less than half
* the slop space is free. Typically, once the pool has less than the slop
* space free, the user will use these operations to free up space in the pool.
* These are the operations that call dsl_pool_adjustedsize() with the netfree
* argument set to TRUE.
*
* Operations that are almost guaranteed to free up space in the absence of
* a pool checkpoint can use up to three quarters of the slop space
* (e.g zfs destroy).
*
* A very restricted set of operations are always permitted, regardless of
* the amount of free space. These are the operations that call
* dsl_sync_task(ZFS_SPACE_CHECK_NONE). If these operations result in a net
* increase in the amount of space used, it is possible to run the pool
* completely out of space, causing it to be permanently read-only.
*
* Note that on very small pools, the slop space will be larger than
* 3.2%, in an effort to have it be at least spa_min_slop (128MB),
* but we never allow it to be more than half the pool size.
*
* Further, on very large pools, the slop space will be smaller than
* 3.2%, to avoid reserving much more space than we actually need; bounded
* by spa_max_slop (128GB).
*
* See also the comments in zfs_space_check_t.
*/
int spa_slop_shift = 5;
uint64_t spa_min_slop = 128ULL * 1024 * 1024;
uint64_t spa_max_slop = 128ULL * 1024 * 1024 * 1024;
int spa_allocators = 4;
-/*PRINTFLIKE2*/
void
spa_load_failed(spa_t *spa, const char *fmt, ...)
{
va_list adx;
char buf[256];
va_start(adx, fmt);
(void) vsnprintf(buf, sizeof (buf), fmt, adx);
va_end(adx);
zfs_dbgmsg("spa_load(%s, config %s): FAILED: %s", spa->spa_name,
spa->spa_trust_config ? "trusted" : "untrusted", buf);
}
-/*PRINTFLIKE2*/
void
spa_load_note(spa_t *spa, const char *fmt, ...)
{
va_list adx;
char buf[256];
va_start(adx, fmt);
(void) vsnprintf(buf, sizeof (buf), fmt, adx);
va_end(adx);
zfs_dbgmsg("spa_load(%s, config %s): %s", spa->spa_name,
spa->spa_trust_config ? "trusted" : "untrusted", buf);
}
/*
* By default dedup and user data indirects land in the special class
*/
int zfs_ddt_data_is_special = B_TRUE;
int zfs_user_indirect_is_special = B_TRUE;
/*
* The percentage of special class final space reserved for metadata only.
* Once we allocate 100 - zfs_special_class_metadata_reserve_pct we only
* let metadata into the class.
*/
int zfs_special_class_metadata_reserve_pct = 25;
/*
* ==========================================================================
* SPA config locking
* ==========================================================================
*/
static void
spa_config_lock_init(spa_t *spa)
{
for (int i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
scl->scl_writer = NULL;
scl->scl_write_wanted = 0;
scl->scl_count = 0;
}
}
static void
spa_config_lock_destroy(spa_t *spa)
{
for (int i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
mutex_destroy(&scl->scl_lock);
cv_destroy(&scl->scl_cv);
ASSERT(scl->scl_writer == NULL);
ASSERT(scl->scl_write_wanted == 0);
ASSERT(scl->scl_count == 0);
}
}
int
spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw)
{
for (int i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
if (!(locks & (1 << i)))
continue;
mutex_enter(&scl->scl_lock);
if (rw == RW_READER) {
if (scl->scl_writer || scl->scl_write_wanted) {
mutex_exit(&scl->scl_lock);
spa_config_exit(spa, locks & ((1 << i) - 1),
tag);
return (0);
}
} else {
ASSERT(scl->scl_writer != curthread);
if (scl->scl_count != 0) {
mutex_exit(&scl->scl_lock);
spa_config_exit(spa, locks & ((1 << i) - 1),
tag);
return (0);
}
scl->scl_writer = curthread;
}
scl->scl_count++;
mutex_exit(&scl->scl_lock);
}
return (1);
}
void
spa_config_enter(spa_t *spa, int locks, const void *tag, krw_t rw)
{
int wlocks_held = 0;
ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY);
for (int i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
if (scl->scl_writer == curthread)
wlocks_held |= (1 << i);
if (!(locks & (1 << i)))
continue;
mutex_enter(&scl->scl_lock);
if (rw == RW_READER) {
while (scl->scl_writer || scl->scl_write_wanted) {
cv_wait(&scl->scl_cv, &scl->scl_lock);
}
} else {
ASSERT(scl->scl_writer != curthread);
while (scl->scl_count != 0) {
scl->scl_write_wanted++;
cv_wait(&scl->scl_cv, &scl->scl_lock);
scl->scl_write_wanted--;
}
scl->scl_writer = curthread;
}
scl->scl_count++;
mutex_exit(&scl->scl_lock);
}
ASSERT3U(wlocks_held, <=, locks);
}
void
spa_config_exit(spa_t *spa, int locks, const void *tag)
{
for (int i = SCL_LOCKS - 1; i >= 0; i--) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
if (!(locks & (1 << i)))
continue;
mutex_enter(&scl->scl_lock);
ASSERT(scl->scl_count > 0);
if (--scl->scl_count == 0) {
ASSERT(scl->scl_writer == NULL ||
scl->scl_writer == curthread);
scl->scl_writer = NULL; /* OK in either case */
cv_broadcast(&scl->scl_cv);
}
mutex_exit(&scl->scl_lock);
}
}
int
spa_config_held(spa_t *spa, int locks, krw_t rw)
{
int locks_held = 0;
for (int i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
if (!(locks & (1 << i)))
continue;
if ((rw == RW_READER && scl->scl_count != 0) ||
(rw == RW_WRITER && scl->scl_writer == curthread))
locks_held |= 1 << i;
}
return (locks_held);
}
/*
* ==========================================================================
* SPA namespace functions
* ==========================================================================
*/
/*
* Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held.
* Returns NULL if no matching spa_t is found.
*/
spa_t *
spa_lookup(const char *name)
{
static spa_t search; /* spa_t is large; don't allocate on stack */
spa_t *spa;
avl_index_t where;
char *cp;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
(void) strlcpy(search.spa_name, name, sizeof (search.spa_name));
/*
* If it's a full dataset name, figure out the pool name and
* just use that.
*/
cp = strpbrk(search.spa_name, "/@#");
if (cp != NULL)
*cp = '\0';
spa = avl_find(&spa_namespace_avl, &search, &where);
return (spa);
}
/*
* Fires when spa_sync has not completed within zfs_deadman_synctime_ms.
* If the zfs_deadman_enabled flag is set then it inspects all vdev queues
* looking for potentially hung I/Os.
*/
void
spa_deadman(void *arg)
{
spa_t *spa = arg;
/* Disable the deadman if the pool is suspended. */
if (spa_suspended(spa))
return;
zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu",
(gethrtime() - spa->spa_sync_starttime) / NANOSEC,
(u_longlong_t)++spa->spa_deadman_calls);
if (zfs_deadman_enabled)
vdev_deadman(spa->spa_root_vdev, FTAG);
spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq,
spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() +
MSEC_TO_TICK(zfs_deadman_checktime_ms));
}
static int
spa_log_sm_sort_by_txg(const void *va, const void *vb)
{
const spa_log_sm_t *a = va;
const spa_log_sm_t *b = vb;
return (TREE_CMP(a->sls_txg, b->sls_txg));
}
/*
* Create an uninitialized spa_t with the given name. Requires
* spa_namespace_lock. The caller must ensure that the spa_t doesn't already
* exist by calling spa_lookup() first.
*/
spa_t *
spa_add(const char *name, nvlist_t *config, const char *altroot)
{
spa_t *spa;
spa_config_dirent_t *dp;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP);
mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_feat_stats_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_flushed_ms_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_activities_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_activities_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_waiters_cv, NULL, CV_DEFAULT, NULL);
for (int t = 0; t < TXG_SIZE; t++)
bplist_create(&spa->spa_free_bplist[t]);
(void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name));
spa->spa_state = POOL_STATE_UNINITIALIZED;
spa->spa_freeze_txg = UINT64_MAX;
spa->spa_final_txg = UINT64_MAX;
spa->spa_load_max_txg = UINT64_MAX;
spa->spa_proc = &p0;
spa->spa_proc_state = SPA_PROC_NONE;
spa->spa_trust_config = B_TRUE;
spa->spa_hostid = zone_get_hostid(NULL);
spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms);
spa->spa_deadman_ziotime = MSEC2NSEC(zfs_deadman_ziotime_ms);
spa_set_deadman_failmode(spa, zfs_deadman_failmode);
zfs_refcount_create(&spa->spa_refcount);
spa_config_lock_init(spa);
spa_stats_init(spa);
avl_add(&spa_namespace_avl, spa);
/*
* Set the alternate root, if there is one.
*/
if (altroot)
spa->spa_root = spa_strdup(altroot);
spa->spa_alloc_count = spa_allocators;
spa->spa_allocs = kmem_zalloc(spa->spa_alloc_count *
sizeof (spa_alloc_t), KM_SLEEP);
for (int i = 0; i < spa->spa_alloc_count; i++) {
mutex_init(&spa->spa_allocs[i].spaa_lock, NULL, MUTEX_DEFAULT,
NULL);
avl_create(&spa->spa_allocs[i].spaa_tree, zio_bookmark_compare,
sizeof (zio_t), offsetof(zio_t, io_alloc_node));
}
avl_create(&spa->spa_metaslabs_by_flushed, metaslab_sort_by_flushed,
sizeof (metaslab_t), offsetof(metaslab_t, ms_spa_txg_node));
avl_create(&spa->spa_sm_logs_by_txg, spa_log_sm_sort_by_txg,
sizeof (spa_log_sm_t), offsetof(spa_log_sm_t, sls_node));
list_create(&spa->spa_log_summary, sizeof (log_summary_entry_t),
offsetof(log_summary_entry_t, lse_node));
/*
* Every pool starts with the default cachefile
*/
list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t),
offsetof(spa_config_dirent_t, scd_link));
dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP);
dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path);
list_insert_head(&spa->spa_config_list, dp);
VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME,
KM_SLEEP) == 0);
if (config != NULL) {
nvlist_t *features;
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ,
&features) == 0) {
VERIFY(nvlist_dup(features, &spa->spa_label_features,
0) == 0);
}
VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
}
if (spa->spa_label_features == NULL) {
VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME,
KM_SLEEP) == 0);
}
spa->spa_min_ashift = INT_MAX;
spa->spa_max_ashift = 0;
spa->spa_min_alloc = INT_MAX;
/* Reset cached value */
spa->spa_dedup_dspace = ~0ULL;
/*
* As a pool is being created, treat all features as disabled by
* setting SPA_FEATURE_DISABLED for all entries in the feature
* refcount cache.
*/
for (int i = 0; i < SPA_FEATURES; i++) {
spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED;
}
list_create(&spa->spa_leaf_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_leaf_node));
return (spa);
}
/*
* Removes a spa_t from the namespace, freeing up any memory used. Requires
* spa_namespace_lock. This is called only after the spa_t has been closed and
* deactivated.
*/
void
spa_remove(spa_t *spa)
{
spa_config_dirent_t *dp;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_state(spa) == POOL_STATE_UNINITIALIZED);
ASSERT3U(zfs_refcount_count(&spa->spa_refcount), ==, 0);
ASSERT0(spa->spa_waiters);
nvlist_free(spa->spa_config_splitting);
avl_remove(&spa_namespace_avl, spa);
cv_broadcast(&spa_namespace_cv);
if (spa->spa_root)
spa_strfree(spa->spa_root);
while ((dp = list_head(&spa->spa_config_list)) != NULL) {
list_remove(&spa->spa_config_list, dp);
if (dp->scd_path != NULL)
spa_strfree(dp->scd_path);
kmem_free(dp, sizeof (spa_config_dirent_t));
}
for (int i = 0; i < spa->spa_alloc_count; i++) {
avl_destroy(&spa->spa_allocs[i].spaa_tree);
mutex_destroy(&spa->spa_allocs[i].spaa_lock);
}
kmem_free(spa->spa_allocs, spa->spa_alloc_count *
sizeof (spa_alloc_t));
avl_destroy(&spa->spa_metaslabs_by_flushed);
avl_destroy(&spa->spa_sm_logs_by_txg);
list_destroy(&spa->spa_log_summary);
list_destroy(&spa->spa_config_list);
list_destroy(&spa->spa_leaf_list);
nvlist_free(spa->spa_label_features);
nvlist_free(spa->spa_load_info);
nvlist_free(spa->spa_feat_stats);
spa_config_set(spa, NULL);
zfs_refcount_destroy(&spa->spa_refcount);
spa_stats_destroy(spa);
spa_config_lock_destroy(spa);
for (int t = 0; t < TXG_SIZE; t++)
bplist_destroy(&spa->spa_free_bplist[t]);
zio_checksum_templates_free(spa);
cv_destroy(&spa->spa_async_cv);
cv_destroy(&spa->spa_evicting_os_cv);
cv_destroy(&spa->spa_proc_cv);
cv_destroy(&spa->spa_scrub_io_cv);
cv_destroy(&spa->spa_suspend_cv);
cv_destroy(&spa->spa_activities_cv);
cv_destroy(&spa->spa_waiters_cv);
mutex_destroy(&spa->spa_flushed_ms_lock);
mutex_destroy(&spa->spa_async_lock);
mutex_destroy(&spa->spa_errlist_lock);
mutex_destroy(&spa->spa_errlog_lock);
mutex_destroy(&spa->spa_evicting_os_lock);
mutex_destroy(&spa->spa_history_lock);
mutex_destroy(&spa->spa_proc_lock);
mutex_destroy(&spa->spa_props_lock);
mutex_destroy(&spa->spa_cksum_tmpls_lock);
mutex_destroy(&spa->spa_scrub_lock);
mutex_destroy(&spa->spa_suspend_lock);
mutex_destroy(&spa->spa_vdev_top_lock);
mutex_destroy(&spa->spa_feat_stats_lock);
mutex_destroy(&spa->spa_activities_lock);
kmem_free(spa, sizeof (spa_t));
}
/*
* Given a pool, return the next pool in the namespace, or NULL if there is
* none. If 'prev' is NULL, return the first pool.
*/
spa_t *
spa_next(spa_t *prev)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
if (prev)
return (AVL_NEXT(&spa_namespace_avl, prev));
else
return (avl_first(&spa_namespace_avl));
}
/*
* ==========================================================================
* SPA refcount functions
* ==========================================================================
*/
/*
* Add a reference to the given spa_t. Must have at least one reference, or
* have the namespace lock held.
*/
void
spa_open_ref(spa_t *spa, void *tag)
{
ASSERT(zfs_refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
MUTEX_HELD(&spa_namespace_lock));
(void) zfs_refcount_add(&spa->spa_refcount, tag);
}
/*
* Remove a reference to the given spa_t. Must have at least one reference, or
* have the namespace lock held.
*/
void
spa_close(spa_t *spa, void *tag)
{
ASSERT(zfs_refcount_count(&spa->spa_refcount) > spa->spa_minref ||
MUTEX_HELD(&spa_namespace_lock));
(void) zfs_refcount_remove(&spa->spa_refcount, tag);
}
/*
* Remove a reference to the given spa_t held by a dsl dir that is
* being asynchronously released. Async releases occur from a taskq
* performing eviction of dsl datasets and dirs. The namespace lock
* isn't held and the hold by the object being evicted may contribute to
* spa_minref (e.g. dataset or directory released during pool export),
* so the asserts in spa_close() do not apply.
*/
void
spa_async_close(spa_t *spa, void *tag)
{
(void) zfs_refcount_remove(&spa->spa_refcount, tag);
}
/*
* Check to see if the spa refcount is zero. Must be called with
* spa_namespace_lock held. We really compare against spa_minref, which is the
* number of references acquired when opening a pool
*/
boolean_t
spa_refcount_zero(spa_t *spa)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
return (zfs_refcount_count(&spa->spa_refcount) == spa->spa_minref);
}
/*
* ==========================================================================
* SPA spare and l2cache tracking
* ==========================================================================
*/
/*
* Hot spares and cache devices are tracked using the same code below,
* for 'auxiliary' devices.
*/
typedef struct spa_aux {
uint64_t aux_guid;
uint64_t aux_pool;
avl_node_t aux_avl;
int aux_count;
} spa_aux_t;
static inline int
spa_aux_compare(const void *a, const void *b)
{
const spa_aux_t *sa = (const spa_aux_t *)a;
const spa_aux_t *sb = (const spa_aux_t *)b;
return (TREE_CMP(sa->aux_guid, sb->aux_guid));
}
static void
spa_aux_add(vdev_t *vd, avl_tree_t *avl)
{
avl_index_t where;
spa_aux_t search;
spa_aux_t *aux;
search.aux_guid = vd->vdev_guid;
if ((aux = avl_find(avl, &search, &where)) != NULL) {
aux->aux_count++;
} else {
aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP);
aux->aux_guid = vd->vdev_guid;
aux->aux_count = 1;
avl_insert(avl, aux, where);
}
}
static void
spa_aux_remove(vdev_t *vd, avl_tree_t *avl)
{
spa_aux_t search;
spa_aux_t *aux;
avl_index_t where;
search.aux_guid = vd->vdev_guid;
aux = avl_find(avl, &search, &where);
ASSERT(aux != NULL);
if (--aux->aux_count == 0) {
avl_remove(avl, aux);
kmem_free(aux, sizeof (spa_aux_t));
} else if (aux->aux_pool == spa_guid(vd->vdev_spa)) {
aux->aux_pool = 0ULL;
}
}
static boolean_t
spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl)
{
spa_aux_t search, *found;
search.aux_guid = guid;
found = avl_find(avl, &search, NULL);
if (pool) {
if (found)
*pool = found->aux_pool;
else
*pool = 0ULL;
}
if (refcnt) {
if (found)
*refcnt = found->aux_count;
else
*refcnt = 0;
}
return (found != NULL);
}
static void
spa_aux_activate(vdev_t *vd, avl_tree_t *avl)
{
spa_aux_t search, *found;
avl_index_t where;
search.aux_guid = vd->vdev_guid;
found = avl_find(avl, &search, &where);
ASSERT(found != NULL);
ASSERT(found->aux_pool == 0ULL);
found->aux_pool = spa_guid(vd->vdev_spa);
}
/*
* Spares are tracked globally due to the following constraints:
*
* - A spare may be part of multiple pools.
* - A spare may be added to a pool even if it's actively in use within
* another pool.
* - A spare in use in any pool can only be the source of a replacement if
* the target is a spare in the same pool.
*
* We keep track of all spares on the system through the use of a reference
* counted AVL tree. When a vdev is added as a spare, or used as a replacement
* spare, then we bump the reference count in the AVL tree. In addition, we set
* the 'vdev_isspare' member to indicate that the device is a spare (active or
* inactive). When a spare is made active (used to replace a device in the
* pool), we also keep track of which pool its been made a part of.
*
* The 'spa_spare_lock' protects the AVL tree. These functions are normally
* called under the spa_namespace lock as part of vdev reconfiguration. The
* separate spare lock exists for the status query path, which does not need to
* be completely consistent with respect to other vdev configuration changes.
*/
static int
spa_spare_compare(const void *a, const void *b)
{
return (spa_aux_compare(a, b));
}
void
spa_spare_add(vdev_t *vd)
{
mutex_enter(&spa_spare_lock);
ASSERT(!vd->vdev_isspare);
spa_aux_add(vd, &spa_spare_avl);
vd->vdev_isspare = B_TRUE;
mutex_exit(&spa_spare_lock);
}
void
spa_spare_remove(vdev_t *vd)
{
mutex_enter(&spa_spare_lock);
ASSERT(vd->vdev_isspare);
spa_aux_remove(vd, &spa_spare_avl);
vd->vdev_isspare = B_FALSE;
mutex_exit(&spa_spare_lock);
}
boolean_t
spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt)
{
boolean_t found;
mutex_enter(&spa_spare_lock);
found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl);
mutex_exit(&spa_spare_lock);
return (found);
}
void
spa_spare_activate(vdev_t *vd)
{
mutex_enter(&spa_spare_lock);
ASSERT(vd->vdev_isspare);
spa_aux_activate(vd, &spa_spare_avl);
mutex_exit(&spa_spare_lock);
}
/*
* Level 2 ARC devices are tracked globally for the same reasons as spares.
* Cache devices currently only support one pool per cache device, and so
* for these devices the aux reference count is currently unused beyond 1.
*/
static int
spa_l2cache_compare(const void *a, const void *b)
{
return (spa_aux_compare(a, b));
}
void
spa_l2cache_add(vdev_t *vd)
{
mutex_enter(&spa_l2cache_lock);
ASSERT(!vd->vdev_isl2cache);
spa_aux_add(vd, &spa_l2cache_avl);
vd->vdev_isl2cache = B_TRUE;
mutex_exit(&spa_l2cache_lock);
}
void
spa_l2cache_remove(vdev_t *vd)
{
mutex_enter(&spa_l2cache_lock);
ASSERT(vd->vdev_isl2cache);
spa_aux_remove(vd, &spa_l2cache_avl);
vd->vdev_isl2cache = B_FALSE;
mutex_exit(&spa_l2cache_lock);
}
boolean_t
spa_l2cache_exists(uint64_t guid, uint64_t *pool)
{
boolean_t found;
mutex_enter(&spa_l2cache_lock);
found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl);
mutex_exit(&spa_l2cache_lock);
return (found);
}
void
spa_l2cache_activate(vdev_t *vd)
{
mutex_enter(&spa_l2cache_lock);
ASSERT(vd->vdev_isl2cache);
spa_aux_activate(vd, &spa_l2cache_avl);
mutex_exit(&spa_l2cache_lock);
}
/*
* ==========================================================================
* SPA vdev locking
* ==========================================================================
*/
/*
* Lock the given spa_t for the purpose of adding or removing a vdev.
* Grabs the global spa_namespace_lock plus the spa config lock for writing.
* It returns the next transaction group for the spa_t.
*/
uint64_t
spa_vdev_enter(spa_t *spa)
{
mutex_enter(&spa->spa_vdev_top_lock);
mutex_enter(&spa_namespace_lock);
vdev_autotrim_stop_all(spa);
return (spa_vdev_config_enter(spa));
}
/*
* The same as spa_vdev_enter() above but additionally takes the guid of
* the vdev being detached. When there is a rebuild in process it will be
* suspended while the vdev tree is modified then resumed by spa_vdev_exit().
* The rebuild is canceled if only a single child remains after the detach.
*/
uint64_t
spa_vdev_detach_enter(spa_t *spa, uint64_t guid)
{
mutex_enter(&spa->spa_vdev_top_lock);
mutex_enter(&spa_namespace_lock);
vdev_autotrim_stop_all(spa);
if (guid != 0) {
vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE);
if (vd) {
vdev_rebuild_stop_wait(vd->vdev_top);
}
}
return (spa_vdev_config_enter(spa));
}
/*
* Internal implementation for spa_vdev_enter(). Used when a vdev
* operation requires multiple syncs (i.e. removing a device) while
* keeping the spa_namespace_lock held.
*/
uint64_t
spa_vdev_config_enter(spa_t *spa)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
return (spa_last_synced_txg(spa) + 1);
}
/*
* Used in combination with spa_vdev_config_enter() to allow the syncing
* of multiple transactions without releasing the spa_namespace_lock.
*/
void
spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
int config_changed = B_FALSE;
ASSERT(txg > spa_last_synced_txg(spa));
spa->spa_pending_vdev = NULL;
/*
* Reassess the DTLs.
*/
vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE, B_FALSE);
if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) {
config_changed = B_TRUE;
spa->spa_config_generation++;
}
/*
* Verify the metaslab classes.
*/
ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0);
ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0);
ASSERT(metaslab_class_validate(spa_embedded_log_class(spa)) == 0);
ASSERT(metaslab_class_validate(spa_special_class(spa)) == 0);
ASSERT(metaslab_class_validate(spa_dedup_class(spa)) == 0);
spa_config_exit(spa, SCL_ALL, spa);
/*
* Panic the system if the specified tag requires it. This
* is useful for ensuring that configurations are updated
* transactionally.
*/
if (zio_injection_enabled)
zio_handle_panic_injection(spa, tag, 0);
/*
* Note: this txg_wait_synced() is important because it ensures
* that there won't be more than one config change per txg.
* This allows us to use the txg as the generation number.
*/
if (error == 0)
txg_wait_synced(spa->spa_dsl_pool, txg);
if (vd != NULL) {
ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL);
if (vd->vdev_ops->vdev_op_leaf) {
mutex_enter(&vd->vdev_initialize_lock);
vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED,
NULL);
mutex_exit(&vd->vdev_initialize_lock);
mutex_enter(&vd->vdev_trim_lock);
vdev_trim_stop(vd, VDEV_TRIM_CANCELED, NULL);
mutex_exit(&vd->vdev_trim_lock);
}
/*
* The vdev may be both a leaf and top-level device.
*/
vdev_autotrim_stop_wait(vd);
spa_config_enter(spa, SCL_STATE_ALL, spa, RW_WRITER);
vdev_free(vd);
spa_config_exit(spa, SCL_STATE_ALL, spa);
}
/*
* If the config changed, update the config cache.
*/
if (config_changed)
spa_write_cachefile(spa, B_FALSE, B_TRUE);
}
/*
* Unlock the spa_t after adding or removing a vdev. Besides undoing the
* locking of spa_vdev_enter(), we also want make sure the transactions have
* synced to disk, and then update the global configuration cache with the new
* information.
*/
int
spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error)
{
vdev_autotrim_restart(spa);
vdev_rebuild_restart(spa);
spa_vdev_config_exit(spa, vd, txg, error, FTAG);
mutex_exit(&spa_namespace_lock);
mutex_exit(&spa->spa_vdev_top_lock);
return (error);
}
/*
* Lock the given spa_t for the purpose of changing vdev state.
*/
void
spa_vdev_state_enter(spa_t *spa, int oplocks)
{
int locks = SCL_STATE_ALL | oplocks;
/*
* Root pools may need to read of the underlying devfs filesystem
* when opening up a vdev. Unfortunately if we're holding the
* SCL_ZIO lock it will result in a deadlock when we try to issue
* the read from the root filesystem. Instead we "prefetch"
* the associated vnodes that we need prior to opening the
* underlying devices and cache them so that we can prevent
* any I/O when we are doing the actual open.
*/
if (spa_is_root(spa)) {
int low = locks & ~(SCL_ZIO - 1);
int high = locks & ~low;
spa_config_enter(spa, high, spa, RW_WRITER);
vdev_hold(spa->spa_root_vdev);
spa_config_enter(spa, low, spa, RW_WRITER);
} else {
spa_config_enter(spa, locks, spa, RW_WRITER);
}
spa->spa_vdev_locks = locks;
}
int
spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error)
{
boolean_t config_changed = B_FALSE;
vdev_t *vdev_top;
if (vd == NULL || vd == spa->spa_root_vdev) {
vdev_top = spa->spa_root_vdev;
} else {
vdev_top = vd->vdev_top;
}
if (vd != NULL || error == 0)
vdev_dtl_reassess(vdev_top, 0, 0, B_FALSE, B_FALSE);
if (vd != NULL) {
if (vd != spa->spa_root_vdev)
vdev_state_dirty(vdev_top);
config_changed = B_TRUE;
spa->spa_config_generation++;
}
if (spa_is_root(spa))
vdev_rele(spa->spa_root_vdev);
ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL);
spa_config_exit(spa, spa->spa_vdev_locks, spa);
/*
* If anything changed, wait for it to sync. This ensures that,
* from the system administrator's perspective, zpool(8) commands
* are synchronous. This is important for things like zpool offline:
* when the command completes, you expect no further I/O from ZFS.
*/
if (vd != NULL)
txg_wait_synced(spa->spa_dsl_pool, 0);
/*
* If the config changed, update the config cache.
*/
if (config_changed) {
mutex_enter(&spa_namespace_lock);
spa_write_cachefile(spa, B_FALSE, B_TRUE);
mutex_exit(&spa_namespace_lock);
}
return (error);
}
/*
* ==========================================================================
* Miscellaneous functions
* ==========================================================================
*/
void
spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx)
{
if (!nvlist_exists(spa->spa_label_features, feature)) {
fnvlist_add_boolean(spa->spa_label_features, feature);
/*
* When we are creating the pool (tx_txg==TXG_INITIAL), we can't
* dirty the vdev config because lock SCL_CONFIG is not held.
* Thankfully, in this case we don't need to dirty the config
* because it will be written out anyway when we finish
* creating the pool.
*/
if (tx->tx_txg != TXG_INITIAL)
vdev_config_dirty(spa->spa_root_vdev);
}
}
void
spa_deactivate_mos_feature(spa_t *spa, const char *feature)
{
if (nvlist_remove_all(spa->spa_label_features, feature) == 0)
vdev_config_dirty(spa->spa_root_vdev);
}
/*
* Return the spa_t associated with given pool_guid, if it exists. If
* device_guid is non-zero, determine whether the pool exists *and* contains
* a device with the specified device_guid.
*/
spa_t *
spa_by_guid(uint64_t pool_guid, uint64_t device_guid)
{
spa_t *spa;
avl_tree_t *t = &spa_namespace_avl;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) {
if (spa->spa_state == POOL_STATE_UNINITIALIZED)
continue;
if (spa->spa_root_vdev == NULL)
continue;
if (spa_guid(spa) == pool_guid) {
if (device_guid == 0)
break;
if (vdev_lookup_by_guid(spa->spa_root_vdev,
device_guid) != NULL)
break;
/*
* Check any devices we may be in the process of adding.
*/
if (spa->spa_pending_vdev) {
if (vdev_lookup_by_guid(spa->spa_pending_vdev,
device_guid) != NULL)
break;
}
}
}
return (spa);
}
/*
* Determine whether a pool with the given pool_guid exists.
*/
boolean_t
spa_guid_exists(uint64_t pool_guid, uint64_t device_guid)
{
return (spa_by_guid(pool_guid, device_guid) != NULL);
}
char *
spa_strdup(const char *s)
{
size_t len;
char *new;
len = strlen(s);
new = kmem_alloc(len + 1, KM_SLEEP);
bcopy(s, new, len);
new[len] = '\0';
return (new);
}
void
spa_strfree(char *s)
{
kmem_free(s, strlen(s) + 1);
}
uint64_t
spa_generate_guid(spa_t *spa)
{
uint64_t guid;
if (spa != NULL) {
do {
(void) random_get_pseudo_bytes((void *)&guid,
sizeof (guid));
} while (guid == 0 || spa_guid_exists(spa_guid(spa), guid));
} else {
do {
(void) random_get_pseudo_bytes((void *)&guid,
sizeof (guid));
} while (guid == 0 || spa_guid_exists(guid, 0));
}
return (guid);
}
void
snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp)
{
char type[256];
char *checksum = NULL;
char *compress = NULL;
if (bp != NULL) {
if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) {
dmu_object_byteswap_t bswap =
DMU_OT_BYTESWAP(BP_GET_TYPE(bp));
(void) snprintf(type, sizeof (type), "bswap %s %s",
DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ?
"metadata" : "data",
dmu_ot_byteswap[bswap].ob_name);
} else {
(void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name,
sizeof (type));
}
if (!BP_IS_EMBEDDED(bp)) {
checksum =
zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name;
}
compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name;
}
SNPRINTF_BLKPTR(snprintf, ' ', buf, buflen, bp, type, checksum,
compress);
}
void
spa_freeze(spa_t *spa)
{
uint64_t freeze_txg = 0;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
if (spa->spa_freeze_txg == UINT64_MAX) {
freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE;
spa->spa_freeze_txg = freeze_txg;
}
spa_config_exit(spa, SCL_ALL, FTAG);
if (freeze_txg != 0)
txg_wait_synced(spa_get_dsl(spa), freeze_txg);
}
void
zfs_panic_recover(const char *fmt, ...)
{
va_list adx;
va_start(adx, fmt);
vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx);
va_end(adx);
}
/*
* This is a stripped-down version of strtoull, suitable only for converting
* lowercase hexadecimal numbers that don't overflow.
*/
uint64_t
zfs_strtonum(const char *str, char **nptr)
{
uint64_t val = 0;
char c;
int digit;
while ((c = *str) != '\0') {
if (c >= '0' && c <= '9')
digit = c - '0';
else if (c >= 'a' && c <= 'f')
digit = 10 + c - 'a';
else
break;
val *= 16;
val += digit;
str++;
}
if (nptr)
*nptr = (char *)str;
return (val);
}
void
spa_activate_allocation_classes(spa_t *spa, dmu_tx_t *tx)
{
/*
* We bump the feature refcount for each special vdev added to the pool
*/
ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_ALLOCATION_CLASSES));
spa_feature_incr(spa, SPA_FEATURE_ALLOCATION_CLASSES, tx);
}
/*
* ==========================================================================
* Accessor functions
* ==========================================================================
*/
boolean_t
spa_shutting_down(spa_t *spa)
{
return (spa->spa_async_suspended);
}
dsl_pool_t *
spa_get_dsl(spa_t *spa)
{
return (spa->spa_dsl_pool);
}
boolean_t
spa_is_initializing(spa_t *spa)
{
return (spa->spa_is_initializing);
}
boolean_t
spa_indirect_vdevs_loaded(spa_t *spa)
{
return (spa->spa_indirect_vdevs_loaded);
}
blkptr_t *
spa_get_rootblkptr(spa_t *spa)
{
return (&spa->spa_ubsync.ub_rootbp);
}
void
spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp)
{
spa->spa_uberblock.ub_rootbp = *bp;
}
void
spa_altroot(spa_t *spa, char *buf, size_t buflen)
{
if (spa->spa_root == NULL)
buf[0] = '\0';
else
(void) strncpy(buf, spa->spa_root, buflen);
}
int
spa_sync_pass(spa_t *spa)
{
return (spa->spa_sync_pass);
}
char *
spa_name(spa_t *spa)
{
return (spa->spa_name);
}
uint64_t
spa_guid(spa_t *spa)
{
dsl_pool_t *dp = spa_get_dsl(spa);
uint64_t guid;
/*
* If we fail to parse the config during spa_load(), we can go through
* the error path (which posts an ereport) and end up here with no root
* vdev. We stash the original pool guid in 'spa_config_guid' to handle
* this case.
*/
if (spa->spa_root_vdev == NULL)
return (spa->spa_config_guid);
guid = spa->spa_last_synced_guid != 0 ?
spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid;
/*
* Return the most recently synced out guid unless we're
* in syncing context.
*/
if (dp && dsl_pool_sync_context(dp))
return (spa->spa_root_vdev->vdev_guid);
else
return (guid);
}
uint64_t
spa_load_guid(spa_t *spa)
{
/*
* This is a GUID that exists solely as a reference for the
* purposes of the arc. It is generated at load time, and
* is never written to persistent storage.
*/
return (spa->spa_load_guid);
}
uint64_t
spa_last_synced_txg(spa_t *spa)
{
return (spa->spa_ubsync.ub_txg);
}
uint64_t
spa_first_txg(spa_t *spa)
{
return (spa->spa_first_txg);
}
uint64_t
spa_syncing_txg(spa_t *spa)
{
return (spa->spa_syncing_txg);
}
/*
* Return the last txg where data can be dirtied. The final txgs
* will be used to just clear out any deferred frees that remain.
*/
uint64_t
spa_final_dirty_txg(spa_t *spa)
{
return (spa->spa_final_txg - TXG_DEFER_SIZE);
}
pool_state_t
spa_state(spa_t *spa)
{
return (spa->spa_state);
}
spa_load_state_t
spa_load_state(spa_t *spa)
{
return (spa->spa_load_state);
}
uint64_t
spa_freeze_txg(spa_t *spa)
{
return (spa->spa_freeze_txg);
}
/*
* Return the inflated asize for a logical write in bytes. This is used by the
* DMU to calculate the space a logical write will require on disk.
* If lsize is smaller than the largest physical block size allocatable on this
* pool we use its value instead, since the write will end up using the whole
* block anyway.
*/
uint64_t
spa_get_worst_case_asize(spa_t *spa, uint64_t lsize)
{
if (lsize == 0)
return (0); /* No inflation needed */
return (MAX(lsize, 1 << spa->spa_max_ashift) * spa_asize_inflation);
}
/*
* Return the amount of slop space in bytes. It is typically 1/32 of the pool
* (3.2%), minus the embedded log space. On very small pools, it may be
* slightly larger than this. On very large pools, it will be capped to
* the value of spa_max_slop. The embedded log space is not included in
* spa_dspace. By subtracting it, the usable space (per "zfs list") is a
* constant 97% of the total space, regardless of metaslab size (assuming the
* default spa_slop_shift=5 and a non-tiny pool).
*
* See the comment above spa_slop_shift for more details.
*/
uint64_t
spa_get_slop_space(spa_t *spa)
{
uint64_t space = 0;
uint64_t slop = 0;
/*
* Make sure spa_dedup_dspace has been set.
*/
if (spa->spa_dedup_dspace == ~0ULL)
spa_update_dspace(spa);
/*
* spa_get_dspace() includes the space only logically "used" by
* deduplicated data, so since it's not useful to reserve more
* space with more deduplicated data, we subtract that out here.
*/
space = spa_get_dspace(spa) - spa->spa_dedup_dspace;
slop = MIN(space >> spa_slop_shift, spa_max_slop);
/*
* Subtract the embedded log space, but no more than half the (3.2%)
* unusable space. Note, the "no more than half" is only relevant if
* zfs_embedded_slog_min_ms >> spa_slop_shift < 2, which is not true by
* default.
*/
uint64_t embedded_log =
metaslab_class_get_dspace(spa_embedded_log_class(spa));
slop -= MIN(embedded_log, slop >> 1);
/*
* Slop space should be at least spa_min_slop, but no more than half
* the entire pool.
*/
slop = MAX(slop, MIN(space >> 1, spa_min_slop));
return (slop);
}
uint64_t
spa_get_dspace(spa_t *spa)
{
return (spa->spa_dspace);
}
uint64_t
spa_get_checkpoint_space(spa_t *spa)
{
return (spa->spa_checkpoint_info.sci_dspace);
}
void
spa_update_dspace(spa_t *spa)
{
spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) +
ddt_get_dedup_dspace(spa);
if (spa->spa_vdev_removal != NULL) {
/*
* We can't allocate from the removing device, so subtract
* its size if it was included in dspace (i.e. if this is a
* normal-class vdev, not special/dedup). This prevents the
* DMU/DSL from filling up the (now smaller) pool while we
* are in the middle of removing the device.
*
* Note that the DMU/DSL doesn't actually know or care
* how much space is allocated (it does its own tracking
* of how much space has been logically used). So it
* doesn't matter that the data we are moving may be
* allocated twice (on the old device and the new
* device).
*/
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
vdev_t *vd =
vdev_lookup_top(spa, spa->spa_vdev_removal->svr_vdev_id);
- if (vd->vdev_mg->mg_class == spa_normal_class(spa)) {
+ /*
+ * If the stars align, we can wind up here after
+ * vdev_remove_complete() has cleared vd->vdev_mg but before
+ * spa->spa_vdev_removal gets cleared, so we must check before
+ * we dereference.
+ */
+ if (vd->vdev_mg &&
+ vd->vdev_mg->mg_class == spa_normal_class(spa)) {
spa->spa_dspace -= spa_deflate(spa) ?
vd->vdev_stat.vs_dspace : vd->vdev_stat.vs_space;
}
spa_config_exit(spa, SCL_VDEV, FTAG);
}
}
/*
* Return the failure mode that has been set to this pool. The default
* behavior will be to block all I/Os when a complete failure occurs.
*/
uint64_t
spa_get_failmode(spa_t *spa)
{
return (spa->spa_failmode);
}
boolean_t
spa_suspended(spa_t *spa)
{
return (spa->spa_suspended != ZIO_SUSPEND_NONE);
}
uint64_t
spa_version(spa_t *spa)
{
return (spa->spa_ubsync.ub_version);
}
boolean_t
spa_deflate(spa_t *spa)
{
return (spa->spa_deflate);
}
metaslab_class_t *
spa_normal_class(spa_t *spa)
{
return (spa->spa_normal_class);
}
metaslab_class_t *
spa_log_class(spa_t *spa)
{
return (spa->spa_log_class);
}
metaslab_class_t *
spa_embedded_log_class(spa_t *spa)
{
return (spa->spa_embedded_log_class);
}
metaslab_class_t *
spa_special_class(spa_t *spa)
{
return (spa->spa_special_class);
}
metaslab_class_t *
spa_dedup_class(spa_t *spa)
{
return (spa->spa_dedup_class);
}
/*
* Locate an appropriate allocation class
*/
metaslab_class_t *
spa_preferred_class(spa_t *spa, uint64_t size, dmu_object_type_t objtype,
uint_t level, uint_t special_smallblk)
{
/*
* ZIL allocations determine their class in zio_alloc_zil().
*/
ASSERT(objtype != DMU_OT_INTENT_LOG);
boolean_t has_special_class = spa->spa_special_class->mc_groups != 0;
if (DMU_OT_IS_DDT(objtype)) {
if (spa->spa_dedup_class->mc_groups != 0)
return (spa_dedup_class(spa));
else if (has_special_class && zfs_ddt_data_is_special)
return (spa_special_class(spa));
else
return (spa_normal_class(spa));
}
/* Indirect blocks for user data can land in special if allowed */
if (level > 0 && (DMU_OT_IS_FILE(objtype) || objtype == DMU_OT_ZVOL)) {
if (has_special_class && zfs_user_indirect_is_special)
return (spa_special_class(spa));
else
return (spa_normal_class(spa));
}
if (DMU_OT_IS_METADATA(objtype) || level > 0) {
if (has_special_class)
return (spa_special_class(spa));
else
return (spa_normal_class(spa));
}
/*
* Allow small file blocks in special class in some cases (like
* for the dRAID vdev feature). But always leave a reserve of
* zfs_special_class_metadata_reserve_pct exclusively for metadata.
*/
if (DMU_OT_IS_FILE(objtype) &&
has_special_class && size <= special_smallblk) {
metaslab_class_t *special = spa_special_class(spa);
uint64_t alloc = metaslab_class_get_alloc(special);
uint64_t space = metaslab_class_get_space(special);
uint64_t limit =
(space * (100 - zfs_special_class_metadata_reserve_pct))
/ 100;
if (alloc < limit)
return (special);
}
return (spa_normal_class(spa));
}
void
spa_evicting_os_register(spa_t *spa, objset_t *os)
{
mutex_enter(&spa->spa_evicting_os_lock);
list_insert_head(&spa->spa_evicting_os_list, os);
mutex_exit(&spa->spa_evicting_os_lock);
}
void
spa_evicting_os_deregister(spa_t *spa, objset_t *os)
{
mutex_enter(&spa->spa_evicting_os_lock);
list_remove(&spa->spa_evicting_os_list, os);
cv_broadcast(&spa->spa_evicting_os_cv);
mutex_exit(&spa->spa_evicting_os_lock);
}
void
spa_evicting_os_wait(spa_t *spa)
{
mutex_enter(&spa->spa_evicting_os_lock);
while (!list_is_empty(&spa->spa_evicting_os_list))
cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock);
mutex_exit(&spa->spa_evicting_os_lock);
dmu_buf_user_evict_wait();
}
int
spa_max_replication(spa_t *spa)
{
/*
* As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to
* handle BPs with more than one DVA allocated. Set our max
* replication level accordingly.
*/
if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS)
return (1);
return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override));
}
int
spa_prev_software_version(spa_t *spa)
{
return (spa->spa_prev_software_version);
}
uint64_t
spa_deadman_synctime(spa_t *spa)
{
return (spa->spa_deadman_synctime);
}
spa_autotrim_t
spa_get_autotrim(spa_t *spa)
{
return (spa->spa_autotrim);
}
uint64_t
spa_deadman_ziotime(spa_t *spa)
{
return (spa->spa_deadman_ziotime);
}
uint64_t
spa_get_deadman_failmode(spa_t *spa)
{
return (spa->spa_deadman_failmode);
}
void
spa_set_deadman_failmode(spa_t *spa, const char *failmode)
{
if (strcmp(failmode, "wait") == 0)
spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT;
else if (strcmp(failmode, "continue") == 0)
spa->spa_deadman_failmode = ZIO_FAILURE_MODE_CONTINUE;
else if (strcmp(failmode, "panic") == 0)
spa->spa_deadman_failmode = ZIO_FAILURE_MODE_PANIC;
else
spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT;
}
void
spa_set_deadman_ziotime(hrtime_t ns)
{
spa_t *spa = NULL;
if (spa_mode_global != SPA_MODE_UNINIT) {
mutex_enter(&spa_namespace_lock);
while ((spa = spa_next(spa)) != NULL)
spa->spa_deadman_ziotime = ns;
mutex_exit(&spa_namespace_lock);
}
}
void
spa_set_deadman_synctime(hrtime_t ns)
{
spa_t *spa = NULL;
if (spa_mode_global != SPA_MODE_UNINIT) {
mutex_enter(&spa_namespace_lock);
while ((spa = spa_next(spa)) != NULL)
spa->spa_deadman_synctime = ns;
mutex_exit(&spa_namespace_lock);
}
}
uint64_t
dva_get_dsize_sync(spa_t *spa, const dva_t *dva)
{
uint64_t asize = DVA_GET_ASIZE(dva);
uint64_t dsize = asize;
ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
if (asize != 0 && spa->spa_deflate) {
vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
if (vd != NULL)
dsize = (asize >> SPA_MINBLOCKSHIFT) *
vd->vdev_deflate_ratio;
}
return (dsize);
}
uint64_t
bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp)
{
uint64_t dsize = 0;
for (int d = 0; d < BP_GET_NDVAS(bp); d++)
dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
return (dsize);
}
uint64_t
bp_get_dsize(spa_t *spa, const blkptr_t *bp)
{
uint64_t dsize = 0;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
for (int d = 0; d < BP_GET_NDVAS(bp); d++)
dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
spa_config_exit(spa, SCL_VDEV, FTAG);
return (dsize);
}
uint64_t
spa_dirty_data(spa_t *spa)
{
return (spa->spa_dsl_pool->dp_dirty_total);
}
/*
* ==========================================================================
* SPA Import Progress Routines
* ==========================================================================
*/
typedef struct spa_import_progress {
uint64_t pool_guid; /* unique id for updates */
char *pool_name;
spa_load_state_t spa_load_state;
uint64_t mmp_sec_remaining; /* MMP activity check */
uint64_t spa_load_max_txg; /* rewind txg */
procfs_list_node_t smh_node;
} spa_import_progress_t;
spa_history_list_t *spa_import_progress_list = NULL;
static int
spa_import_progress_show_header(struct seq_file *f)
{
seq_printf(f, "%-20s %-14s %-14s %-12s %s\n", "pool_guid",
"load_state", "multihost_secs", "max_txg",
"pool_name");
return (0);
}
static int
spa_import_progress_show(struct seq_file *f, void *data)
{
spa_import_progress_t *sip = (spa_import_progress_t *)data;
seq_printf(f, "%-20llu %-14llu %-14llu %-12llu %s\n",
(u_longlong_t)sip->pool_guid, (u_longlong_t)sip->spa_load_state,
(u_longlong_t)sip->mmp_sec_remaining,
(u_longlong_t)sip->spa_load_max_txg,
(sip->pool_name ? sip->pool_name : "-"));
return (0);
}
/* Remove oldest elements from list until there are no more than 'size' left */
static void
spa_import_progress_truncate(spa_history_list_t *shl, unsigned int size)
{
spa_import_progress_t *sip;
while (shl->size > size) {
sip = list_remove_head(&shl->procfs_list.pl_list);
if (sip->pool_name)
spa_strfree(sip->pool_name);
kmem_free(sip, sizeof (spa_import_progress_t));
shl->size--;
}
IMPLY(size == 0, list_is_empty(&shl->procfs_list.pl_list));
}
static void
spa_import_progress_init(void)
{
spa_import_progress_list = kmem_zalloc(sizeof (spa_history_list_t),
KM_SLEEP);
spa_import_progress_list->size = 0;
spa_import_progress_list->procfs_list.pl_private =
spa_import_progress_list;
procfs_list_install("zfs",
NULL,
"import_progress",
0644,
&spa_import_progress_list->procfs_list,
spa_import_progress_show,
spa_import_progress_show_header,
NULL,
offsetof(spa_import_progress_t, smh_node));
}
static void
spa_import_progress_destroy(void)
{
spa_history_list_t *shl = spa_import_progress_list;
procfs_list_uninstall(&shl->procfs_list);
spa_import_progress_truncate(shl, 0);
procfs_list_destroy(&shl->procfs_list);
kmem_free(shl, sizeof (spa_history_list_t));
}
int
spa_import_progress_set_state(uint64_t pool_guid,
spa_load_state_t load_state)
{
spa_history_list_t *shl = spa_import_progress_list;
spa_import_progress_t *sip;
int error = ENOENT;
if (shl->size == 0)
return (0);
mutex_enter(&shl->procfs_list.pl_lock);
for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
sip = list_prev(&shl->procfs_list.pl_list, sip)) {
if (sip->pool_guid == pool_guid) {
sip->spa_load_state = load_state;
error = 0;
break;
}
}
mutex_exit(&shl->procfs_list.pl_lock);
return (error);
}
int
spa_import_progress_set_max_txg(uint64_t pool_guid, uint64_t load_max_txg)
{
spa_history_list_t *shl = spa_import_progress_list;
spa_import_progress_t *sip;
int error = ENOENT;
if (shl->size == 0)
return (0);
mutex_enter(&shl->procfs_list.pl_lock);
for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
sip = list_prev(&shl->procfs_list.pl_list, sip)) {
if (sip->pool_guid == pool_guid) {
sip->spa_load_max_txg = load_max_txg;
error = 0;
break;
}
}
mutex_exit(&shl->procfs_list.pl_lock);
return (error);
}
int
spa_import_progress_set_mmp_check(uint64_t pool_guid,
uint64_t mmp_sec_remaining)
{
spa_history_list_t *shl = spa_import_progress_list;
spa_import_progress_t *sip;
int error = ENOENT;
if (shl->size == 0)
return (0);
mutex_enter(&shl->procfs_list.pl_lock);
for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
sip = list_prev(&shl->procfs_list.pl_list, sip)) {
if (sip->pool_guid == pool_guid) {
sip->mmp_sec_remaining = mmp_sec_remaining;
error = 0;
break;
}
}
mutex_exit(&shl->procfs_list.pl_lock);
return (error);
}
/*
* A new import is in progress, add an entry.
*/
void
spa_import_progress_add(spa_t *spa)
{
spa_history_list_t *shl = spa_import_progress_list;
spa_import_progress_t *sip;
char *poolname = NULL;
sip = kmem_zalloc(sizeof (spa_import_progress_t), KM_SLEEP);
sip->pool_guid = spa_guid(spa);
(void) nvlist_lookup_string(spa->spa_config, ZPOOL_CONFIG_POOL_NAME,
&poolname);
if (poolname == NULL)
poolname = spa_name(spa);
sip->pool_name = spa_strdup(poolname);
sip->spa_load_state = spa_load_state(spa);
mutex_enter(&shl->procfs_list.pl_lock);
procfs_list_add(&shl->procfs_list, sip);
shl->size++;
mutex_exit(&shl->procfs_list.pl_lock);
}
void
spa_import_progress_remove(uint64_t pool_guid)
{
spa_history_list_t *shl = spa_import_progress_list;
spa_import_progress_t *sip;
mutex_enter(&shl->procfs_list.pl_lock);
for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
sip = list_prev(&shl->procfs_list.pl_list, sip)) {
if (sip->pool_guid == pool_guid) {
if (sip->pool_name)
spa_strfree(sip->pool_name);
list_remove(&shl->procfs_list.pl_list, sip);
shl->size--;
kmem_free(sip, sizeof (spa_import_progress_t));
break;
}
}
mutex_exit(&shl->procfs_list.pl_lock);
}
/*
* ==========================================================================
* Initialization and Termination
* ==========================================================================
*/
static int
spa_name_compare(const void *a1, const void *a2)
{
const spa_t *s1 = a1;
const spa_t *s2 = a2;
int s;
s = strcmp(s1->spa_name, s2->spa_name);
return (TREE_ISIGN(s));
}
void
spa_boot_init(void)
{
spa_config_load();
}
void
spa_init(spa_mode_t mode)
{
mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL);
avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t),
offsetof(spa_t, spa_avl));
avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t),
offsetof(spa_aux_t, aux_avl));
avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t),
offsetof(spa_aux_t, aux_avl));
spa_mode_global = mode;
#ifndef _KERNEL
if (spa_mode_global != SPA_MODE_READ && dprintf_find_string("watch")) {
struct sigaction sa;
sa.sa_flags = SA_SIGINFO;
sigemptyset(&sa.sa_mask);
sa.sa_sigaction = arc_buf_sigsegv;
if (sigaction(SIGSEGV, &sa, NULL) == -1) {
perror("could not enable watchpoints: "
"sigaction(SIGSEGV, ...) = ");
} else {
arc_watch = B_TRUE;
}
}
#endif
fm_init();
zfs_refcount_init();
unique_init();
zfs_btree_init();
metaslab_stat_init();
ddt_init();
zio_init();
dmu_init();
zil_init();
vdev_cache_stat_init();
vdev_mirror_stat_init();
vdev_raidz_math_init();
vdev_file_init();
zfs_prop_init();
zpool_prop_init();
zpool_feature_init();
spa_config_load();
l2arc_start();
scan_init();
qat_init();
spa_import_progress_init();
}
void
spa_fini(void)
{
l2arc_stop();
spa_evict_all();
vdev_file_fini();
vdev_cache_stat_fini();
vdev_mirror_stat_fini();
vdev_raidz_math_fini();
zil_fini();
dmu_fini();
zio_fini();
ddt_fini();
metaslab_stat_fini();
zfs_btree_fini();
unique_fini();
zfs_refcount_fini();
fm_fini();
scan_fini();
qat_fini();
spa_import_progress_destroy();
avl_destroy(&spa_namespace_avl);
avl_destroy(&spa_spare_avl);
avl_destroy(&spa_l2cache_avl);
cv_destroy(&spa_namespace_cv);
mutex_destroy(&spa_namespace_lock);
mutex_destroy(&spa_spare_lock);
mutex_destroy(&spa_l2cache_lock);
}
/*
* Return whether this pool has a dedicated slog device. No locking needed.
* It's not a problem if the wrong answer is returned as it's only for
* performance and not correctness.
*/
boolean_t
spa_has_slogs(spa_t *spa)
{
return (spa->spa_log_class->mc_groups != 0);
}
spa_log_state_t
spa_get_log_state(spa_t *spa)
{
return (spa->spa_log_state);
}
void
spa_set_log_state(spa_t *spa, spa_log_state_t state)
{
spa->spa_log_state = state;
}
boolean_t
spa_is_root(spa_t *spa)
{
return (spa->spa_is_root);
}
boolean_t
spa_writeable(spa_t *spa)
{
return (!!(spa->spa_mode & SPA_MODE_WRITE) && spa->spa_trust_config);
}
/*
* Returns true if there is a pending sync task in any of the current
* syncing txg, the current quiescing txg, or the current open txg.
*/
boolean_t
spa_has_pending_synctask(spa_t *spa)
{
return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks) ||
!txg_all_lists_empty(&spa->spa_dsl_pool->dp_early_sync_tasks));
}
spa_mode_t
spa_mode(spa_t *spa)
{
return (spa->spa_mode);
}
uint64_t
spa_bootfs(spa_t *spa)
{
return (spa->spa_bootfs);
}
uint64_t
spa_delegation(spa_t *spa)
{
return (spa->spa_delegation);
}
objset_t *
spa_meta_objset(spa_t *spa)
{
return (spa->spa_meta_objset);
}
enum zio_checksum
spa_dedup_checksum(spa_t *spa)
{
return (spa->spa_dedup_checksum);
}
/*
* Reset pool scan stat per scan pass (or reboot).
*/
void
spa_scan_stat_init(spa_t *spa)
{
/* data not stored on disk */
spa->spa_scan_pass_start = gethrestime_sec();
if (dsl_scan_is_paused_scrub(spa->spa_dsl_pool->dp_scan))
spa->spa_scan_pass_scrub_pause = spa->spa_scan_pass_start;
else
spa->spa_scan_pass_scrub_pause = 0;
spa->spa_scan_pass_scrub_spent_paused = 0;
spa->spa_scan_pass_exam = 0;
spa->spa_scan_pass_issued = 0;
vdev_scan_stat_init(spa->spa_root_vdev);
}
/*
* Get scan stats for zpool status reports
*/
int
spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps)
{
dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL;
if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE)
return (SET_ERROR(ENOENT));
bzero(ps, sizeof (pool_scan_stat_t));
/* data stored on disk */
ps->pss_func = scn->scn_phys.scn_func;
ps->pss_state = scn->scn_phys.scn_state;
ps->pss_start_time = scn->scn_phys.scn_start_time;
ps->pss_end_time = scn->scn_phys.scn_end_time;
ps->pss_to_examine = scn->scn_phys.scn_to_examine;
ps->pss_examined = scn->scn_phys.scn_examined;
ps->pss_to_process = scn->scn_phys.scn_to_process;
ps->pss_processed = scn->scn_phys.scn_processed;
ps->pss_errors = scn->scn_phys.scn_errors;
/* data not stored on disk */
ps->pss_pass_exam = spa->spa_scan_pass_exam;
ps->pss_pass_start = spa->spa_scan_pass_start;
ps->pss_pass_scrub_pause = spa->spa_scan_pass_scrub_pause;
ps->pss_pass_scrub_spent_paused = spa->spa_scan_pass_scrub_spent_paused;
ps->pss_pass_issued = spa->spa_scan_pass_issued;
ps->pss_issued =
scn->scn_issued_before_pass + spa->spa_scan_pass_issued;
return (0);
}
int
spa_maxblocksize(spa_t *spa)
{
if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS))
return (SPA_MAXBLOCKSIZE);
else
return (SPA_OLD_MAXBLOCKSIZE);
}
/*
* Returns the txg that the last device removal completed. No indirect mappings
* have been added since this txg.
*/
uint64_t
spa_get_last_removal_txg(spa_t *spa)
{
uint64_t vdevid;
uint64_t ret = -1ULL;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
/*
* sr_prev_indirect_vdev is only modified while holding all the
* config locks, so it is sufficient to hold SCL_VDEV as reader when
* examining it.
*/
vdevid = spa->spa_removing_phys.sr_prev_indirect_vdev;
while (vdevid != -1ULL) {
vdev_t *vd = vdev_lookup_top(spa, vdevid);
vdev_indirect_births_t *vib = vd->vdev_indirect_births;
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
/*
* If the removal did not remap any data, we don't care.
*/
if (vdev_indirect_births_count(vib) != 0) {
ret = vdev_indirect_births_last_entry_txg(vib);
break;
}
vdevid = vd->vdev_indirect_config.vic_prev_indirect_vdev;
}
spa_config_exit(spa, SCL_VDEV, FTAG);
IMPLY(ret != -1ULL,
spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL));
return (ret);
}
int
spa_maxdnodesize(spa_t *spa)
{
if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE))
return (DNODE_MAX_SIZE);
else
return (DNODE_MIN_SIZE);
}
boolean_t
spa_multihost(spa_t *spa)
{
return (spa->spa_multihost ? B_TRUE : B_FALSE);
}
uint32_t
spa_get_hostid(spa_t *spa)
{
return (spa->spa_hostid);
}
boolean_t
spa_trust_config(spa_t *spa)
{
return (spa->spa_trust_config);
}
uint64_t
spa_missing_tvds_allowed(spa_t *spa)
{
return (spa->spa_missing_tvds_allowed);
}
space_map_t *
spa_syncing_log_sm(spa_t *spa)
{
return (spa->spa_syncing_log_sm);
}
void
spa_set_missing_tvds(spa_t *spa, uint64_t missing)
{
spa->spa_missing_tvds = missing;
}
/*
* Return the pool state string ("ONLINE", "DEGRADED", "SUSPENDED", etc).
*/
const char *
spa_state_to_name(spa_t *spa)
{
ASSERT3P(spa, !=, NULL);
/*
* it is possible for the spa to exist, without root vdev
* as the spa transitions during import/export
*/
vdev_t *rvd = spa->spa_root_vdev;
if (rvd == NULL) {
return ("TRANSITIONING");
}
vdev_state_t state = rvd->vdev_state;
vdev_aux_t aux = rvd->vdev_stat.vs_aux;
if (spa_suspended(spa) &&
(spa_get_failmode(spa) != ZIO_FAILURE_MODE_CONTINUE))
return ("SUSPENDED");
switch (state) {
case VDEV_STATE_CLOSED:
case VDEV_STATE_OFFLINE:
return ("OFFLINE");
case VDEV_STATE_REMOVED:
return ("REMOVED");
case VDEV_STATE_CANT_OPEN:
if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
return ("FAULTED");
else if (aux == VDEV_AUX_SPLIT_POOL)
return ("SPLIT");
else
return ("UNAVAIL");
case VDEV_STATE_FAULTED:
return ("FAULTED");
case VDEV_STATE_DEGRADED:
return ("DEGRADED");
case VDEV_STATE_HEALTHY:
return ("ONLINE");
default:
break;
}
return ("UNKNOWN");
}
boolean_t
spa_top_vdevs_spacemap_addressable(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
if (!vdev_is_spacemap_addressable(rvd->vdev_child[c]))
return (B_FALSE);
}
return (B_TRUE);
}
boolean_t
spa_has_checkpoint(spa_t *spa)
{
return (spa->spa_checkpoint_txg != 0);
}
boolean_t
spa_importing_readonly_checkpoint(spa_t *spa)
{
return ((spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT) &&
spa->spa_mode == SPA_MODE_READ);
}
uint64_t
spa_min_claim_txg(spa_t *spa)
{
uint64_t checkpoint_txg = spa->spa_uberblock.ub_checkpoint_txg;
if (checkpoint_txg != 0)
return (checkpoint_txg + 1);
return (spa->spa_first_txg);
}
/*
* If there is a checkpoint, async destroys may consume more space from
* the pool instead of freeing it. In an attempt to save the pool from
* getting suspended when it is about to run out of space, we stop
* processing async destroys.
*/
boolean_t
spa_suspend_async_destroy(spa_t *spa)
{
dsl_pool_t *dp = spa_get_dsl(spa);
uint64_t unreserved = dsl_pool_unreserved_space(dp,
ZFS_SPACE_CHECK_EXTRA_RESERVED);
uint64_t used = dsl_dir_phys(dp->dp_root_dir)->dd_used_bytes;
uint64_t avail = (unreserved > used) ? (unreserved - used) : 0;
if (spa_has_checkpoint(spa) && avail == 0)
return (B_TRUE);
return (B_FALSE);
}
#if defined(_KERNEL)
int
param_set_deadman_failmode_common(const char *val)
{
spa_t *spa = NULL;
char *p;
if (val == NULL)
return (SET_ERROR(EINVAL));
if ((p = strchr(val, '\n')) != NULL)
*p = '\0';
if (strcmp(val, "wait") != 0 && strcmp(val, "continue") != 0 &&
strcmp(val, "panic"))
return (SET_ERROR(EINVAL));
if (spa_mode_global != SPA_MODE_UNINIT) {
mutex_enter(&spa_namespace_lock);
while ((spa = spa_next(spa)) != NULL)
spa_set_deadman_failmode(spa, val);
mutex_exit(&spa_namespace_lock);
}
return (0);
}
#endif
/* Namespace manipulation */
EXPORT_SYMBOL(spa_lookup);
EXPORT_SYMBOL(spa_add);
EXPORT_SYMBOL(spa_remove);
EXPORT_SYMBOL(spa_next);
/* Refcount functions */
EXPORT_SYMBOL(spa_open_ref);
EXPORT_SYMBOL(spa_close);
EXPORT_SYMBOL(spa_refcount_zero);
/* Pool configuration lock */
EXPORT_SYMBOL(spa_config_tryenter);
EXPORT_SYMBOL(spa_config_enter);
EXPORT_SYMBOL(spa_config_exit);
EXPORT_SYMBOL(spa_config_held);
/* Pool vdev add/remove lock */
EXPORT_SYMBOL(spa_vdev_enter);
EXPORT_SYMBOL(spa_vdev_exit);
/* Pool vdev state change lock */
EXPORT_SYMBOL(spa_vdev_state_enter);
EXPORT_SYMBOL(spa_vdev_state_exit);
/* Accessor functions */
EXPORT_SYMBOL(spa_shutting_down);
EXPORT_SYMBOL(spa_get_dsl);
EXPORT_SYMBOL(spa_get_rootblkptr);
EXPORT_SYMBOL(spa_set_rootblkptr);
EXPORT_SYMBOL(spa_altroot);
EXPORT_SYMBOL(spa_sync_pass);
EXPORT_SYMBOL(spa_name);
EXPORT_SYMBOL(spa_guid);
EXPORT_SYMBOL(spa_last_synced_txg);
EXPORT_SYMBOL(spa_first_txg);
EXPORT_SYMBOL(spa_syncing_txg);
EXPORT_SYMBOL(spa_version);
EXPORT_SYMBOL(spa_state);
EXPORT_SYMBOL(spa_load_state);
EXPORT_SYMBOL(spa_freeze_txg);
EXPORT_SYMBOL(spa_get_dspace);
EXPORT_SYMBOL(spa_update_dspace);
EXPORT_SYMBOL(spa_deflate);
EXPORT_SYMBOL(spa_normal_class);
EXPORT_SYMBOL(spa_log_class);
EXPORT_SYMBOL(spa_special_class);
EXPORT_SYMBOL(spa_preferred_class);
EXPORT_SYMBOL(spa_max_replication);
EXPORT_SYMBOL(spa_prev_software_version);
EXPORT_SYMBOL(spa_get_failmode);
EXPORT_SYMBOL(spa_suspended);
EXPORT_SYMBOL(spa_bootfs);
EXPORT_SYMBOL(spa_delegation);
EXPORT_SYMBOL(spa_meta_objset);
EXPORT_SYMBOL(spa_maxblocksize);
EXPORT_SYMBOL(spa_maxdnodesize);
/* Miscellaneous support routines */
EXPORT_SYMBOL(spa_guid_exists);
EXPORT_SYMBOL(spa_strdup);
EXPORT_SYMBOL(spa_strfree);
EXPORT_SYMBOL(spa_generate_guid);
EXPORT_SYMBOL(snprintf_blkptr);
EXPORT_SYMBOL(spa_freeze);
EXPORT_SYMBOL(spa_upgrade);
EXPORT_SYMBOL(spa_evict_all);
EXPORT_SYMBOL(spa_lookup_by_guid);
EXPORT_SYMBOL(spa_has_spare);
EXPORT_SYMBOL(dva_get_dsize_sync);
EXPORT_SYMBOL(bp_get_dsize_sync);
EXPORT_SYMBOL(bp_get_dsize);
EXPORT_SYMBOL(spa_has_slogs);
EXPORT_SYMBOL(spa_is_root);
EXPORT_SYMBOL(spa_writeable);
EXPORT_SYMBOL(spa_mode);
EXPORT_SYMBOL(spa_namespace_lock);
EXPORT_SYMBOL(spa_trust_config);
EXPORT_SYMBOL(spa_missing_tvds_allowed);
EXPORT_SYMBOL(spa_set_missing_tvds);
EXPORT_SYMBOL(spa_state_to_name);
EXPORT_SYMBOL(spa_importing_readonly_checkpoint);
EXPORT_SYMBOL(spa_min_claim_txg);
EXPORT_SYMBOL(spa_suspend_async_destroy);
EXPORT_SYMBOL(spa_has_checkpoint);
EXPORT_SYMBOL(spa_top_vdevs_spacemap_addressable);
ZFS_MODULE_PARAM(zfs, zfs_, flags, UINT, ZMOD_RW,
"Set additional debugging flags");
ZFS_MODULE_PARAM(zfs, zfs_, recover, INT, ZMOD_RW,
"Set to attempt to recover from fatal errors");
ZFS_MODULE_PARAM(zfs, zfs_, free_leak_on_eio, INT, ZMOD_RW,
"Set to ignore IO errors during free and permanently leak the space");
ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, checktime_ms, ULONG, ZMOD_RW,
"Dead I/O check interval in milliseconds");
ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, enabled, INT, ZMOD_RW,
"Enable deadman timer");
ZFS_MODULE_PARAM(zfs_spa, spa_, asize_inflation, INT, ZMOD_RW,
"SPA size estimate multiplication factor");
ZFS_MODULE_PARAM(zfs, zfs_, ddt_data_is_special, INT, ZMOD_RW,
"Place DDT data into the special class");
ZFS_MODULE_PARAM(zfs, zfs_, user_indirect_is_special, INT, ZMOD_RW,
"Place user data indirect blocks into the special class");
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, failmode,
param_set_deadman_failmode, param_get_charp, ZMOD_RW,
"Failmode for deadman timer");
ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, synctime_ms,
param_set_deadman_synctime, param_get_ulong, ZMOD_RW,
"Pool sync expiration time in milliseconds");
ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, ziotime_ms,
param_set_deadman_ziotime, param_get_ulong, ZMOD_RW,
"IO expiration time in milliseconds");
ZFS_MODULE_PARAM(zfs, zfs_, special_class_metadata_reserve_pct, INT, ZMOD_RW,
"Small file blocks in special vdevs depends on this much "
"free space available");
/* END CSTYLED */
ZFS_MODULE_PARAM_CALL(zfs_spa, spa_, slop_shift, param_set_slop_shift,
param_get_int, ZMOD_RW, "Reserved free space in pool");
diff --git a/sys/contrib/openzfs/module/zfs/vdev.c b/sys/contrib/openzfs/module/zfs/vdev.c
index 4e316d8135ee..47a475135302 100644
--- a/sys/contrib/openzfs/module/zfs/vdev.c
+++ b/sys/contrib/openzfs/module/zfs/vdev.c
@@ -1,5426 +1,5426 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2021 by Delphix. All rights reserved.
* Copyright 2017 Nexenta Systems, Inc.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2016 Toomas Soome <tsoome@me.com>
* Copyright 2017 Joyent, Inc.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2019, Datto Inc. All rights reserved.
* Copyright [2021] Hewlett Packard Enterprise Development LP
*/
#include <sys/zfs_context.h>
#include <sys/fm/fs/zfs.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/bpobj.h>
#include <sys/dmu.h>
#include <sys/dmu_tx.h>
#include <sys/dsl_dir.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_rebuild.h>
#include <sys/vdev_draid.h>
#include <sys/uberblock_impl.h>
#include <sys/metaslab.h>
#include <sys/metaslab_impl.h>
#include <sys/space_map.h>
#include <sys/space_reftree.h>
#include <sys/zio.h>
#include <sys/zap.h>
#include <sys/fs/zfs.h>
#include <sys/arc.h>
#include <sys/zil.h>
#include <sys/dsl_scan.h>
#include <sys/vdev_raidz.h>
#include <sys/abd.h>
#include <sys/vdev_initialize.h>
#include <sys/vdev_trim.h>
#include <sys/zvol.h>
#include <sys/zfs_ratelimit.h>
/*
* One metaslab from each (normal-class) vdev is used by the ZIL. These are
* called "embedded slog metaslabs", are referenced by vdev_log_mg, and are
* part of the spa_embedded_log_class. The metaslab with the most free space
* in each vdev is selected for this purpose when the pool is opened (or a
* vdev is added). See vdev_metaslab_init().
*
* Log blocks can be allocated from the following locations. Each one is tried
* in order until the allocation succeeds:
* 1. dedicated log vdevs, aka "slog" (spa_log_class)
* 2. embedded slog metaslabs (spa_embedded_log_class)
* 3. other metaslabs in normal vdevs (spa_normal_class)
*
* zfs_embedded_slog_min_ms disables the embedded slog if there are fewer
* than this number of metaslabs in the vdev. This ensures that we don't set
* aside an unreasonable amount of space for the ZIL. If set to less than
* 1 << (spa_slop_shift + 1), on small pools the usable space may be reduced
* (by more than 1<<spa_slop_shift) due to the embedded slog metaslab.
*/
int zfs_embedded_slog_min_ms = 64;
/* default target for number of metaslabs per top-level vdev */
int zfs_vdev_default_ms_count = 200;
/* minimum number of metaslabs per top-level vdev */
int zfs_vdev_min_ms_count = 16;
/* practical upper limit of total metaslabs per top-level vdev */
int zfs_vdev_ms_count_limit = 1ULL << 17;
/* lower limit for metaslab size (512M) */
int zfs_vdev_default_ms_shift = 29;
/* upper limit for metaslab size (16G) */
int zfs_vdev_max_ms_shift = 34;
int vdev_validate_skip = B_FALSE;
/*
* Since the DTL space map of a vdev is not expected to have a lot of
* entries, we default its block size to 4K.
*/
int zfs_vdev_dtl_sm_blksz = (1 << 12);
/*
* Rate limit slow IO (delay) events to this many per second.
*/
unsigned int zfs_slow_io_events_per_second = 20;
/*
* Rate limit checksum events after this many checksum errors per second.
*/
unsigned int zfs_checksum_events_per_second = 20;
/*
* Ignore errors during scrub/resilver. Allows to work around resilver
* upon import when there are pool errors.
*/
int zfs_scan_ignore_errors = 0;
/*
* vdev-wide space maps that have lots of entries written to them at
* the end of each transaction can benefit from a higher I/O bandwidth
* (e.g. vdev_obsolete_sm), thus we default their block size to 128K.
*/
int zfs_vdev_standard_sm_blksz = (1 << 17);
/*
* Tunable parameter for debugging or performance analysis. Setting this
* will cause pool corruption on power loss if a volatile out-of-order
* write cache is enabled.
*/
int zfs_nocacheflush = 0;
uint64_t zfs_vdev_max_auto_ashift = ASHIFT_MAX;
uint64_t zfs_vdev_min_auto_ashift = ASHIFT_MIN;
-/*PRINTFLIKE2*/
void
vdev_dbgmsg(vdev_t *vd, const char *fmt, ...)
{
va_list adx;
char buf[256];
va_start(adx, fmt);
(void) vsnprintf(buf, sizeof (buf), fmt, adx);
va_end(adx);
if (vd->vdev_path != NULL) {
zfs_dbgmsg("%s vdev '%s': %s", vd->vdev_ops->vdev_op_type,
vd->vdev_path, buf);
} else {
zfs_dbgmsg("%s-%llu vdev (guid %llu): %s",
vd->vdev_ops->vdev_op_type,
(u_longlong_t)vd->vdev_id,
(u_longlong_t)vd->vdev_guid, buf);
}
}
void
vdev_dbgmsg_print_tree(vdev_t *vd, int indent)
{
char state[20];
if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops) {
zfs_dbgmsg("%*svdev %llu: %s", indent, "",
(u_longlong_t)vd->vdev_id,
vd->vdev_ops->vdev_op_type);
return;
}
switch (vd->vdev_state) {
case VDEV_STATE_UNKNOWN:
(void) snprintf(state, sizeof (state), "unknown");
break;
case VDEV_STATE_CLOSED:
(void) snprintf(state, sizeof (state), "closed");
break;
case VDEV_STATE_OFFLINE:
(void) snprintf(state, sizeof (state), "offline");
break;
case VDEV_STATE_REMOVED:
(void) snprintf(state, sizeof (state), "removed");
break;
case VDEV_STATE_CANT_OPEN:
(void) snprintf(state, sizeof (state), "can't open");
break;
case VDEV_STATE_FAULTED:
(void) snprintf(state, sizeof (state), "faulted");
break;
case VDEV_STATE_DEGRADED:
(void) snprintf(state, sizeof (state), "degraded");
break;
case VDEV_STATE_HEALTHY:
(void) snprintf(state, sizeof (state), "healthy");
break;
default:
(void) snprintf(state, sizeof (state), "<state %u>",
(uint_t)vd->vdev_state);
}
zfs_dbgmsg("%*svdev %u: %s%s, guid: %llu, path: %s, %s", indent,
"", (int)vd->vdev_id, vd->vdev_ops->vdev_op_type,
vd->vdev_islog ? " (log)" : "",
(u_longlong_t)vd->vdev_guid,
vd->vdev_path ? vd->vdev_path : "N/A", state);
for (uint64_t i = 0; i < vd->vdev_children; i++)
vdev_dbgmsg_print_tree(vd->vdev_child[i], indent + 2);
}
/*
* Virtual device management.
*/
static vdev_ops_t *vdev_ops_table[] = {
&vdev_root_ops,
&vdev_raidz_ops,
&vdev_draid_ops,
&vdev_draid_spare_ops,
&vdev_mirror_ops,
&vdev_replacing_ops,
&vdev_spare_ops,
&vdev_disk_ops,
&vdev_file_ops,
&vdev_missing_ops,
&vdev_hole_ops,
&vdev_indirect_ops,
NULL
};
/*
* Given a vdev type, return the appropriate ops vector.
*/
static vdev_ops_t *
vdev_getops(const char *type)
{
vdev_ops_t *ops, **opspp;
for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++)
if (strcmp(ops->vdev_op_type, type) == 0)
break;
return (ops);
}
/*
* Given a vdev and a metaslab class, find which metaslab group we're
* interested in. All vdevs may belong to two different metaslab classes.
* Dedicated slog devices use only the primary metaslab group, rather than a
* separate log group. For embedded slogs, the vdev_log_mg will be non-NULL.
*/
metaslab_group_t *
vdev_get_mg(vdev_t *vd, metaslab_class_t *mc)
{
if (mc == spa_embedded_log_class(vd->vdev_spa) &&
vd->vdev_log_mg != NULL)
return (vd->vdev_log_mg);
else
return (vd->vdev_mg);
}
/* ARGSUSED */
void
vdev_default_xlate(vdev_t *vd, const range_seg64_t *logical_rs,
range_seg64_t *physical_rs, range_seg64_t *remain_rs)
{
physical_rs->rs_start = logical_rs->rs_start;
physical_rs->rs_end = logical_rs->rs_end;
}
/*
* Derive the enumerated allocation bias from string input.
* String origin is either the per-vdev zap or zpool(8).
*/
static vdev_alloc_bias_t
vdev_derive_alloc_bias(const char *bias)
{
vdev_alloc_bias_t alloc_bias = VDEV_BIAS_NONE;
if (strcmp(bias, VDEV_ALLOC_BIAS_LOG) == 0)
alloc_bias = VDEV_BIAS_LOG;
else if (strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0)
alloc_bias = VDEV_BIAS_SPECIAL;
else if (strcmp(bias, VDEV_ALLOC_BIAS_DEDUP) == 0)
alloc_bias = VDEV_BIAS_DEDUP;
return (alloc_bias);
}
/*
* Default asize function: return the MAX of psize with the asize of
* all children. This is what's used by anything other than RAID-Z.
*/
uint64_t
vdev_default_asize(vdev_t *vd, uint64_t psize)
{
uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift);
uint64_t csize;
for (int c = 0; c < vd->vdev_children; c++) {
csize = vdev_psize_to_asize(vd->vdev_child[c], psize);
asize = MAX(asize, csize);
}
return (asize);
}
uint64_t
vdev_default_min_asize(vdev_t *vd)
{
return (vd->vdev_min_asize);
}
/*
* Get the minimum allocatable size. We define the allocatable size as
* the vdev's asize rounded to the nearest metaslab. This allows us to
* replace or attach devices which don't have the same physical size but
* can still satisfy the same number of allocations.
*/
uint64_t
vdev_get_min_asize(vdev_t *vd)
{
vdev_t *pvd = vd->vdev_parent;
/*
* If our parent is NULL (inactive spare or cache) or is the root,
* just return our own asize.
*/
if (pvd == NULL)
return (vd->vdev_asize);
/*
* The top-level vdev just returns the allocatable size rounded
* to the nearest metaslab.
*/
if (vd == vd->vdev_top)
return (P2ALIGN(vd->vdev_asize, 1ULL << vd->vdev_ms_shift));
return (pvd->vdev_ops->vdev_op_min_asize(pvd));
}
void
vdev_set_min_asize(vdev_t *vd)
{
vd->vdev_min_asize = vdev_get_min_asize(vd);
for (int c = 0; c < vd->vdev_children; c++)
vdev_set_min_asize(vd->vdev_child[c]);
}
/*
* Get the minimal allocation size for the top-level vdev.
*/
uint64_t
vdev_get_min_alloc(vdev_t *vd)
{
uint64_t min_alloc = 1ULL << vd->vdev_ashift;
if (vd->vdev_ops->vdev_op_min_alloc != NULL)
min_alloc = vd->vdev_ops->vdev_op_min_alloc(vd);
return (min_alloc);
}
/*
* Get the parity level for a top-level vdev.
*/
uint64_t
vdev_get_nparity(vdev_t *vd)
{
uint64_t nparity = 0;
if (vd->vdev_ops->vdev_op_nparity != NULL)
nparity = vd->vdev_ops->vdev_op_nparity(vd);
return (nparity);
}
/*
* Get the number of data disks for a top-level vdev.
*/
uint64_t
vdev_get_ndisks(vdev_t *vd)
{
uint64_t ndisks = 1;
if (vd->vdev_ops->vdev_op_ndisks != NULL)
ndisks = vd->vdev_ops->vdev_op_ndisks(vd);
return (ndisks);
}
vdev_t *
vdev_lookup_top(spa_t *spa, uint64_t vdev)
{
vdev_t *rvd = spa->spa_root_vdev;
ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
if (vdev < rvd->vdev_children) {
ASSERT(rvd->vdev_child[vdev] != NULL);
return (rvd->vdev_child[vdev]);
}
return (NULL);
}
vdev_t *
vdev_lookup_by_guid(vdev_t *vd, uint64_t guid)
{
vdev_t *mvd;
if (vd->vdev_guid == guid)
return (vd);
for (int c = 0; c < vd->vdev_children; c++)
if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) !=
NULL)
return (mvd);
return (NULL);
}
static int
vdev_count_leaves_impl(vdev_t *vd)
{
int n = 0;
if (vd->vdev_ops->vdev_op_leaf)
return (1);
for (int c = 0; c < vd->vdev_children; c++)
n += vdev_count_leaves_impl(vd->vdev_child[c]);
return (n);
}
int
vdev_count_leaves(spa_t *spa)
{
int rc;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
rc = vdev_count_leaves_impl(spa->spa_root_vdev);
spa_config_exit(spa, SCL_VDEV, FTAG);
return (rc);
}
void
vdev_add_child(vdev_t *pvd, vdev_t *cvd)
{
size_t oldsize, newsize;
uint64_t id = cvd->vdev_id;
vdev_t **newchild;
ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
ASSERT(cvd->vdev_parent == NULL);
cvd->vdev_parent = pvd;
if (pvd == NULL)
return;
ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL);
oldsize = pvd->vdev_children * sizeof (vdev_t *);
pvd->vdev_children = MAX(pvd->vdev_children, id + 1);
newsize = pvd->vdev_children * sizeof (vdev_t *);
newchild = kmem_alloc(newsize, KM_SLEEP);
if (pvd->vdev_child != NULL) {
bcopy(pvd->vdev_child, newchild, oldsize);
kmem_free(pvd->vdev_child, oldsize);
}
pvd->vdev_child = newchild;
pvd->vdev_child[id] = cvd;
cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd);
ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL);
/*
* Walk up all ancestors to update guid sum.
*/
for (; pvd != NULL; pvd = pvd->vdev_parent)
pvd->vdev_guid_sum += cvd->vdev_guid_sum;
if (cvd->vdev_ops->vdev_op_leaf) {
list_insert_head(&cvd->vdev_spa->spa_leaf_list, cvd);
cvd->vdev_spa->spa_leaf_list_gen++;
}
}
void
vdev_remove_child(vdev_t *pvd, vdev_t *cvd)
{
int c;
uint_t id = cvd->vdev_id;
ASSERT(cvd->vdev_parent == pvd);
if (pvd == NULL)
return;
ASSERT(id < pvd->vdev_children);
ASSERT(pvd->vdev_child[id] == cvd);
pvd->vdev_child[id] = NULL;
cvd->vdev_parent = NULL;
for (c = 0; c < pvd->vdev_children; c++)
if (pvd->vdev_child[c])
break;
if (c == pvd->vdev_children) {
kmem_free(pvd->vdev_child, c * sizeof (vdev_t *));
pvd->vdev_child = NULL;
pvd->vdev_children = 0;
}
if (cvd->vdev_ops->vdev_op_leaf) {
spa_t *spa = cvd->vdev_spa;
list_remove(&spa->spa_leaf_list, cvd);
spa->spa_leaf_list_gen++;
}
/*
* Walk up all ancestors to update guid sum.
*/
for (; pvd != NULL; pvd = pvd->vdev_parent)
pvd->vdev_guid_sum -= cvd->vdev_guid_sum;
}
/*
* Remove any holes in the child array.
*/
void
vdev_compact_children(vdev_t *pvd)
{
vdev_t **newchild, *cvd;
int oldc = pvd->vdev_children;
int newc;
ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
if (oldc == 0)
return;
for (int c = newc = 0; c < oldc; c++)
if (pvd->vdev_child[c])
newc++;
if (newc > 0) {
newchild = kmem_zalloc(newc * sizeof (vdev_t *), KM_SLEEP);
for (int c = newc = 0; c < oldc; c++) {
if ((cvd = pvd->vdev_child[c]) != NULL) {
newchild[newc] = cvd;
cvd->vdev_id = newc++;
}
}
} else {
newchild = NULL;
}
kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *));
pvd->vdev_child = newchild;
pvd->vdev_children = newc;
}
/*
* Allocate and minimally initialize a vdev_t.
*/
vdev_t *
vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
{
vdev_t *vd;
vdev_indirect_config_t *vic;
vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP);
vic = &vd->vdev_indirect_config;
if (spa->spa_root_vdev == NULL) {
ASSERT(ops == &vdev_root_ops);
spa->spa_root_vdev = vd;
spa->spa_load_guid = spa_generate_guid(NULL);
}
if (guid == 0 && ops != &vdev_hole_ops) {
if (spa->spa_root_vdev == vd) {
/*
* The root vdev's guid will also be the pool guid,
* which must be unique among all pools.
*/
guid = spa_generate_guid(NULL);
} else {
/*
* Any other vdev's guid must be unique within the pool.
*/
guid = spa_generate_guid(spa);
}
ASSERT(!spa_guid_exists(spa_guid(spa), guid));
}
vd->vdev_spa = spa;
vd->vdev_id = id;
vd->vdev_guid = guid;
vd->vdev_guid_sum = guid;
vd->vdev_ops = ops;
vd->vdev_state = VDEV_STATE_CLOSED;
vd->vdev_ishole = (ops == &vdev_hole_ops);
vic->vic_prev_indirect_vdev = UINT64_MAX;
rw_init(&vd->vdev_indirect_rwlock, NULL, RW_DEFAULT, NULL);
mutex_init(&vd->vdev_obsolete_lock, NULL, MUTEX_DEFAULT, NULL);
vd->vdev_obsolete_segments = range_tree_create(NULL, RANGE_SEG64, NULL,
0, 0);
/*
* Initialize rate limit structs for events. We rate limit ZIO delay
* and checksum events so that we don't overwhelm ZED with thousands
* of events when a disk is acting up.
*/
zfs_ratelimit_init(&vd->vdev_delay_rl, &zfs_slow_io_events_per_second,
1);
zfs_ratelimit_init(&vd->vdev_deadman_rl, &zfs_slow_io_events_per_second,
1);
zfs_ratelimit_init(&vd->vdev_checksum_rl,
&zfs_checksum_events_per_second, 1);
list_link_init(&vd->vdev_config_dirty_node);
list_link_init(&vd->vdev_state_dirty_node);
list_link_init(&vd->vdev_initialize_node);
list_link_init(&vd->vdev_leaf_node);
list_link_init(&vd->vdev_trim_node);
mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_NOLOCKDEP, NULL);
mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_scan_io_queue_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_initialize_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_initialize_io_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&vd->vdev_initialize_cv, NULL, CV_DEFAULT, NULL);
cv_init(&vd->vdev_initialize_io_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&vd->vdev_trim_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_autotrim_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_trim_io_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&vd->vdev_trim_cv, NULL, CV_DEFAULT, NULL);
cv_init(&vd->vdev_autotrim_cv, NULL, CV_DEFAULT, NULL);
cv_init(&vd->vdev_trim_io_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&vd->vdev_rebuild_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&vd->vdev_rebuild_cv, NULL, CV_DEFAULT, NULL);
for (int t = 0; t < DTL_TYPES; t++) {
vd->vdev_dtl[t] = range_tree_create(NULL, RANGE_SEG64, NULL, 0,
0);
}
txg_list_create(&vd->vdev_ms_list, spa,
offsetof(struct metaslab, ms_txg_node));
txg_list_create(&vd->vdev_dtl_list, spa,
offsetof(struct vdev, vdev_dtl_node));
vd->vdev_stat.vs_timestamp = gethrtime();
vdev_queue_init(vd);
vdev_cache_init(vd);
return (vd);
}
/*
* Allocate a new vdev. The 'alloctype' is used to control whether we are
* creating a new vdev or loading an existing one - the behavior is slightly
* different for each case.
*/
int
vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id,
int alloctype)
{
vdev_ops_t *ops;
char *type;
uint64_t guid = 0, islog;
vdev_t *vd;
vdev_indirect_config_t *vic;
char *tmp = NULL;
int rc;
vdev_alloc_bias_t alloc_bias = VDEV_BIAS_NONE;
boolean_t top_level = (parent && !parent->vdev_parent);
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
return (SET_ERROR(EINVAL));
if ((ops = vdev_getops(type)) == NULL)
return (SET_ERROR(EINVAL));
/*
* If this is a load, get the vdev guid from the nvlist.
* Otherwise, vdev_alloc_common() will generate one for us.
*/
if (alloctype == VDEV_ALLOC_LOAD) {
uint64_t label_id;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) ||
label_id != id)
return (SET_ERROR(EINVAL));
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (SET_ERROR(EINVAL));
} else if (alloctype == VDEV_ALLOC_SPARE) {
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (SET_ERROR(EINVAL));
} else if (alloctype == VDEV_ALLOC_L2CACHE) {
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (SET_ERROR(EINVAL));
} else if (alloctype == VDEV_ALLOC_ROOTPOOL) {
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (SET_ERROR(EINVAL));
}
/*
* The first allocated vdev must be of type 'root'.
*/
if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL)
return (SET_ERROR(EINVAL));
/*
* Determine whether we're a log vdev.
*/
islog = 0;
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog);
if (islog && spa_version(spa) < SPA_VERSION_SLOGS)
return (SET_ERROR(ENOTSUP));
if (ops == &vdev_hole_ops && spa_version(spa) < SPA_VERSION_HOLES)
return (SET_ERROR(ENOTSUP));
if (top_level && alloctype == VDEV_ALLOC_ADD) {
char *bias;
/*
* If creating a top-level vdev, check for allocation
* classes input.
*/
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_ALLOCATION_BIAS,
&bias) == 0) {
alloc_bias = vdev_derive_alloc_bias(bias);
/* spa_vdev_add() expects feature to be enabled */
if (spa->spa_load_state != SPA_LOAD_CREATE &&
!spa_feature_is_enabled(spa,
SPA_FEATURE_ALLOCATION_CLASSES)) {
return (SET_ERROR(ENOTSUP));
}
}
/* spa_vdev_add() expects feature to be enabled */
if (ops == &vdev_draid_ops &&
spa->spa_load_state != SPA_LOAD_CREATE &&
!spa_feature_is_enabled(spa, SPA_FEATURE_DRAID)) {
return (SET_ERROR(ENOTSUP));
}
}
/*
* Initialize the vdev specific data. This is done before calling
* vdev_alloc_common() since it may fail and this simplifies the
* error reporting and cleanup code paths.
*/
void *tsd = NULL;
if (ops->vdev_op_init != NULL) {
rc = ops->vdev_op_init(spa, nv, &tsd);
if (rc != 0) {
return (rc);
}
}
vd = vdev_alloc_common(spa, id, guid, ops);
vd->vdev_tsd = tsd;
vd->vdev_islog = islog;
if (top_level && alloc_bias != VDEV_BIAS_NONE)
vd->vdev_alloc_bias = alloc_bias;
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &vd->vdev_path) == 0)
vd->vdev_path = spa_strdup(vd->vdev_path);
/*
* ZPOOL_CONFIG_AUX_STATE = "external" means we previously forced a
* fault on a vdev and want it to persist across imports (like with
* zpool offline -f).
*/
rc = nvlist_lookup_string(nv, ZPOOL_CONFIG_AUX_STATE, &tmp);
if (rc == 0 && tmp != NULL && strcmp(tmp, "external") == 0) {
vd->vdev_stat.vs_aux = VDEV_AUX_EXTERNAL;
vd->vdev_faulted = 1;
vd->vdev_label_aux = VDEV_AUX_EXTERNAL;
}
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &vd->vdev_devid) == 0)
vd->vdev_devid = spa_strdup(vd->vdev_devid);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH,
&vd->vdev_physpath) == 0)
vd->vdev_physpath = spa_strdup(vd->vdev_physpath);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
&vd->vdev_enc_sysfs_path) == 0)
vd->vdev_enc_sysfs_path = spa_strdup(vd->vdev_enc_sysfs_path);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &vd->vdev_fru) == 0)
vd->vdev_fru = spa_strdup(vd->vdev_fru);
/*
* Set the whole_disk property. If it's not specified, leave the value
* as -1.
*/
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
&vd->vdev_wholedisk) != 0)
vd->vdev_wholedisk = -1ULL;
vic = &vd->vdev_indirect_config;
ASSERT0(vic->vic_mapping_object);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_OBJECT,
&vic->vic_mapping_object);
ASSERT0(vic->vic_births_object);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_BIRTHS,
&vic->vic_births_object);
ASSERT3U(vic->vic_prev_indirect_vdev, ==, UINT64_MAX);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_PREV_INDIRECT_VDEV,
&vic->vic_prev_indirect_vdev);
/*
* Look for the 'not present' flag. This will only be set if the device
* was not present at the time of import.
*/
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
&vd->vdev_not_present);
/*
* Get the alignment requirement.
*/
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &vd->vdev_ashift);
/*
* Retrieve the vdev creation time.
*/
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_CREATE_TXG,
&vd->vdev_crtxg);
/*
* If we're a top-level vdev, try to load the allocation parameters.
*/
if (top_level &&
(alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) {
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY,
&vd->vdev_ms_array);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT,
&vd->vdev_ms_shift);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE,
&vd->vdev_asize);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVING,
&vd->vdev_removing);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_VDEV_TOP_ZAP,
&vd->vdev_top_zap);
} else {
ASSERT0(vd->vdev_top_zap);
}
if (top_level && alloctype != VDEV_ALLOC_ATTACH) {
ASSERT(alloctype == VDEV_ALLOC_LOAD ||
alloctype == VDEV_ALLOC_ADD ||
alloctype == VDEV_ALLOC_SPLIT ||
alloctype == VDEV_ALLOC_ROOTPOOL);
/* Note: metaslab_group_create() is now deferred */
}
if (vd->vdev_ops->vdev_op_leaf &&
(alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) {
(void) nvlist_lookup_uint64(nv,
ZPOOL_CONFIG_VDEV_LEAF_ZAP, &vd->vdev_leaf_zap);
} else {
ASSERT0(vd->vdev_leaf_zap);
}
/*
* If we're a leaf vdev, try to load the DTL object and other state.
*/
if (vd->vdev_ops->vdev_op_leaf &&
(alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE ||
alloctype == VDEV_ALLOC_ROOTPOOL)) {
if (alloctype == VDEV_ALLOC_LOAD) {
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL,
&vd->vdev_dtl_object);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE,
&vd->vdev_unspare);
}
if (alloctype == VDEV_ALLOC_ROOTPOOL) {
uint64_t spare = 0;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
&spare) == 0 && spare)
spa_spare_add(vd);
}
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE,
&vd->vdev_offline);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_RESILVER_TXG,
&vd->vdev_resilver_txg);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REBUILD_TXG,
&vd->vdev_rebuild_txg);
if (nvlist_exists(nv, ZPOOL_CONFIG_RESILVER_DEFER))
vdev_defer_resilver(vd);
/*
* In general, when importing a pool we want to ignore the
* persistent fault state, as the diagnosis made on another
* system may not be valid in the current context. The only
* exception is if we forced a vdev to a persistently faulted
* state with 'zpool offline -f'. The persistent fault will
* remain across imports until cleared.
*
* Local vdevs will remain in the faulted state.
*/
if (spa_load_state(spa) == SPA_LOAD_OPEN ||
spa_load_state(spa) == SPA_LOAD_IMPORT) {
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED,
&vd->vdev_faulted);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED,
&vd->vdev_degraded);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED,
&vd->vdev_removed);
if (vd->vdev_faulted || vd->vdev_degraded) {
char *aux;
vd->vdev_label_aux =
VDEV_AUX_ERR_EXCEEDED;
if (nvlist_lookup_string(nv,
ZPOOL_CONFIG_AUX_STATE, &aux) == 0 &&
strcmp(aux, "external") == 0)
vd->vdev_label_aux = VDEV_AUX_EXTERNAL;
else
vd->vdev_faulted = 0ULL;
}
}
}
/*
* Add ourselves to the parent's list of children.
*/
vdev_add_child(parent, vd);
*vdp = vd;
return (0);
}
void
vdev_free(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
ASSERT3P(vd->vdev_trim_thread, ==, NULL);
ASSERT3P(vd->vdev_autotrim_thread, ==, NULL);
ASSERT3P(vd->vdev_rebuild_thread, ==, NULL);
/*
* Scan queues are normally destroyed at the end of a scan. If the
* queue exists here, that implies the vdev is being removed while
* the scan is still running.
*/
if (vd->vdev_scan_io_queue != NULL) {
mutex_enter(&vd->vdev_scan_io_queue_lock);
dsl_scan_io_queue_destroy(vd->vdev_scan_io_queue);
vd->vdev_scan_io_queue = NULL;
mutex_exit(&vd->vdev_scan_io_queue_lock);
}
/*
* vdev_free() implies closing the vdev first. This is simpler than
* trying to ensure complicated semantics for all callers.
*/
vdev_close(vd);
ASSERT(!list_link_active(&vd->vdev_config_dirty_node));
ASSERT(!list_link_active(&vd->vdev_state_dirty_node));
/*
* Free all children.
*/
for (int c = 0; c < vd->vdev_children; c++)
vdev_free(vd->vdev_child[c]);
ASSERT(vd->vdev_child == NULL);
ASSERT(vd->vdev_guid_sum == vd->vdev_guid);
if (vd->vdev_ops->vdev_op_fini != NULL)
vd->vdev_ops->vdev_op_fini(vd);
/*
* Discard allocation state.
*/
if (vd->vdev_mg != NULL) {
vdev_metaslab_fini(vd);
metaslab_group_destroy(vd->vdev_mg);
vd->vdev_mg = NULL;
}
if (vd->vdev_log_mg != NULL) {
ASSERT0(vd->vdev_ms_count);
metaslab_group_destroy(vd->vdev_log_mg);
vd->vdev_log_mg = NULL;
}
ASSERT0(vd->vdev_stat.vs_space);
ASSERT0(vd->vdev_stat.vs_dspace);
ASSERT0(vd->vdev_stat.vs_alloc);
/*
* Remove this vdev from its parent's child list.
*/
vdev_remove_child(vd->vdev_parent, vd);
ASSERT(vd->vdev_parent == NULL);
ASSERT(!list_link_active(&vd->vdev_leaf_node));
/*
* Clean up vdev structure.
*/
vdev_queue_fini(vd);
vdev_cache_fini(vd);
if (vd->vdev_path)
spa_strfree(vd->vdev_path);
if (vd->vdev_devid)
spa_strfree(vd->vdev_devid);
if (vd->vdev_physpath)
spa_strfree(vd->vdev_physpath);
if (vd->vdev_enc_sysfs_path)
spa_strfree(vd->vdev_enc_sysfs_path);
if (vd->vdev_fru)
spa_strfree(vd->vdev_fru);
if (vd->vdev_isspare)
spa_spare_remove(vd);
if (vd->vdev_isl2cache)
spa_l2cache_remove(vd);
txg_list_destroy(&vd->vdev_ms_list);
txg_list_destroy(&vd->vdev_dtl_list);
mutex_enter(&vd->vdev_dtl_lock);
space_map_close(vd->vdev_dtl_sm);
for (int t = 0; t < DTL_TYPES; t++) {
range_tree_vacate(vd->vdev_dtl[t], NULL, NULL);
range_tree_destroy(vd->vdev_dtl[t]);
}
mutex_exit(&vd->vdev_dtl_lock);
EQUIV(vd->vdev_indirect_births != NULL,
vd->vdev_indirect_mapping != NULL);
if (vd->vdev_indirect_births != NULL) {
vdev_indirect_mapping_close(vd->vdev_indirect_mapping);
vdev_indirect_births_close(vd->vdev_indirect_births);
}
if (vd->vdev_obsolete_sm != NULL) {
ASSERT(vd->vdev_removing ||
vd->vdev_ops == &vdev_indirect_ops);
space_map_close(vd->vdev_obsolete_sm);
vd->vdev_obsolete_sm = NULL;
}
range_tree_destroy(vd->vdev_obsolete_segments);
rw_destroy(&vd->vdev_indirect_rwlock);
mutex_destroy(&vd->vdev_obsolete_lock);
mutex_destroy(&vd->vdev_dtl_lock);
mutex_destroy(&vd->vdev_stat_lock);
mutex_destroy(&vd->vdev_probe_lock);
mutex_destroy(&vd->vdev_scan_io_queue_lock);
mutex_destroy(&vd->vdev_initialize_lock);
mutex_destroy(&vd->vdev_initialize_io_lock);
cv_destroy(&vd->vdev_initialize_io_cv);
cv_destroy(&vd->vdev_initialize_cv);
mutex_destroy(&vd->vdev_trim_lock);
mutex_destroy(&vd->vdev_autotrim_lock);
mutex_destroy(&vd->vdev_trim_io_lock);
cv_destroy(&vd->vdev_trim_cv);
cv_destroy(&vd->vdev_autotrim_cv);
cv_destroy(&vd->vdev_trim_io_cv);
mutex_destroy(&vd->vdev_rebuild_lock);
cv_destroy(&vd->vdev_rebuild_cv);
zfs_ratelimit_fini(&vd->vdev_delay_rl);
zfs_ratelimit_fini(&vd->vdev_deadman_rl);
zfs_ratelimit_fini(&vd->vdev_checksum_rl);
if (vd == spa->spa_root_vdev)
spa->spa_root_vdev = NULL;
kmem_free(vd, sizeof (vdev_t));
}
/*
* Transfer top-level vdev state from svd to tvd.
*/
static void
vdev_top_transfer(vdev_t *svd, vdev_t *tvd)
{
spa_t *spa = svd->vdev_spa;
metaslab_t *msp;
vdev_t *vd;
int t;
ASSERT(tvd == tvd->vdev_top);
tvd->vdev_pending_fastwrite = svd->vdev_pending_fastwrite;
tvd->vdev_ms_array = svd->vdev_ms_array;
tvd->vdev_ms_shift = svd->vdev_ms_shift;
tvd->vdev_ms_count = svd->vdev_ms_count;
tvd->vdev_top_zap = svd->vdev_top_zap;
svd->vdev_ms_array = 0;
svd->vdev_ms_shift = 0;
svd->vdev_ms_count = 0;
svd->vdev_top_zap = 0;
if (tvd->vdev_mg)
ASSERT3P(tvd->vdev_mg, ==, svd->vdev_mg);
if (tvd->vdev_log_mg)
ASSERT3P(tvd->vdev_log_mg, ==, svd->vdev_log_mg);
tvd->vdev_mg = svd->vdev_mg;
tvd->vdev_log_mg = svd->vdev_log_mg;
tvd->vdev_ms = svd->vdev_ms;
svd->vdev_mg = NULL;
svd->vdev_log_mg = NULL;
svd->vdev_ms = NULL;
if (tvd->vdev_mg != NULL)
tvd->vdev_mg->mg_vd = tvd;
if (tvd->vdev_log_mg != NULL)
tvd->vdev_log_mg->mg_vd = tvd;
tvd->vdev_checkpoint_sm = svd->vdev_checkpoint_sm;
svd->vdev_checkpoint_sm = NULL;
tvd->vdev_alloc_bias = svd->vdev_alloc_bias;
svd->vdev_alloc_bias = VDEV_BIAS_NONE;
tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc;
tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space;
tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace;
svd->vdev_stat.vs_alloc = 0;
svd->vdev_stat.vs_space = 0;
svd->vdev_stat.vs_dspace = 0;
/*
* State which may be set on a top-level vdev that's in the
* process of being removed.
*/
ASSERT0(tvd->vdev_indirect_config.vic_births_object);
ASSERT0(tvd->vdev_indirect_config.vic_mapping_object);
ASSERT3U(tvd->vdev_indirect_config.vic_prev_indirect_vdev, ==, -1ULL);
ASSERT3P(tvd->vdev_indirect_mapping, ==, NULL);
ASSERT3P(tvd->vdev_indirect_births, ==, NULL);
ASSERT3P(tvd->vdev_obsolete_sm, ==, NULL);
ASSERT0(tvd->vdev_removing);
ASSERT0(tvd->vdev_rebuilding);
tvd->vdev_removing = svd->vdev_removing;
tvd->vdev_rebuilding = svd->vdev_rebuilding;
tvd->vdev_rebuild_config = svd->vdev_rebuild_config;
tvd->vdev_indirect_config = svd->vdev_indirect_config;
tvd->vdev_indirect_mapping = svd->vdev_indirect_mapping;
tvd->vdev_indirect_births = svd->vdev_indirect_births;
range_tree_swap(&svd->vdev_obsolete_segments,
&tvd->vdev_obsolete_segments);
tvd->vdev_obsolete_sm = svd->vdev_obsolete_sm;
svd->vdev_indirect_config.vic_mapping_object = 0;
svd->vdev_indirect_config.vic_births_object = 0;
svd->vdev_indirect_config.vic_prev_indirect_vdev = -1ULL;
svd->vdev_indirect_mapping = NULL;
svd->vdev_indirect_births = NULL;
svd->vdev_obsolete_sm = NULL;
svd->vdev_removing = 0;
svd->vdev_rebuilding = 0;
for (t = 0; t < TXG_SIZE; t++) {
while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL)
(void) txg_list_add(&tvd->vdev_ms_list, msp, t);
while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL)
(void) txg_list_add(&tvd->vdev_dtl_list, vd, t);
if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t))
(void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t);
}
if (list_link_active(&svd->vdev_config_dirty_node)) {
vdev_config_clean(svd);
vdev_config_dirty(tvd);
}
if (list_link_active(&svd->vdev_state_dirty_node)) {
vdev_state_clean(svd);
vdev_state_dirty(tvd);
}
tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio;
svd->vdev_deflate_ratio = 0;
tvd->vdev_islog = svd->vdev_islog;
svd->vdev_islog = 0;
dsl_scan_io_queue_vdev_xfer(svd, tvd);
}
static void
vdev_top_update(vdev_t *tvd, vdev_t *vd)
{
if (vd == NULL)
return;
vd->vdev_top = tvd;
for (int c = 0; c < vd->vdev_children; c++)
vdev_top_update(tvd, vd->vdev_child[c]);
}
/*
* Add a mirror/replacing vdev above an existing vdev. There is no need to
* call .vdev_op_init() since mirror/replacing vdevs do not have private state.
*/
vdev_t *
vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops)
{
spa_t *spa = cvd->vdev_spa;
vdev_t *pvd = cvd->vdev_parent;
vdev_t *mvd;
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops);
mvd->vdev_asize = cvd->vdev_asize;
mvd->vdev_min_asize = cvd->vdev_min_asize;
mvd->vdev_max_asize = cvd->vdev_max_asize;
mvd->vdev_psize = cvd->vdev_psize;
mvd->vdev_ashift = cvd->vdev_ashift;
mvd->vdev_logical_ashift = cvd->vdev_logical_ashift;
mvd->vdev_physical_ashift = cvd->vdev_physical_ashift;
mvd->vdev_state = cvd->vdev_state;
mvd->vdev_crtxg = cvd->vdev_crtxg;
vdev_remove_child(pvd, cvd);
vdev_add_child(pvd, mvd);
cvd->vdev_id = mvd->vdev_children;
vdev_add_child(mvd, cvd);
vdev_top_update(cvd->vdev_top, cvd->vdev_top);
if (mvd == mvd->vdev_top)
vdev_top_transfer(cvd, mvd);
return (mvd);
}
/*
* Remove a 1-way mirror/replacing vdev from the tree.
*/
void
vdev_remove_parent(vdev_t *cvd)
{
vdev_t *mvd = cvd->vdev_parent;
vdev_t *pvd = mvd->vdev_parent;
ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
ASSERT(mvd->vdev_children == 1);
ASSERT(mvd->vdev_ops == &vdev_mirror_ops ||
mvd->vdev_ops == &vdev_replacing_ops ||
mvd->vdev_ops == &vdev_spare_ops);
cvd->vdev_ashift = mvd->vdev_ashift;
cvd->vdev_logical_ashift = mvd->vdev_logical_ashift;
cvd->vdev_physical_ashift = mvd->vdev_physical_ashift;
vdev_remove_child(mvd, cvd);
vdev_remove_child(pvd, mvd);
/*
* If cvd will replace mvd as a top-level vdev, preserve mvd's guid.
* Otherwise, we could have detached an offline device, and when we
* go to import the pool we'll think we have two top-level vdevs,
* instead of a different version of the same top-level vdev.
*/
if (mvd->vdev_top == mvd) {
uint64_t guid_delta = mvd->vdev_guid - cvd->vdev_guid;
cvd->vdev_orig_guid = cvd->vdev_guid;
cvd->vdev_guid += guid_delta;
cvd->vdev_guid_sum += guid_delta;
/*
* If pool not set for autoexpand, we need to also preserve
* mvd's asize to prevent automatic expansion of cvd.
* Otherwise if we are adjusting the mirror by attaching and
* detaching children of non-uniform sizes, the mirror could
* autoexpand, unexpectedly requiring larger devices to
* re-establish the mirror.
*/
if (!cvd->vdev_spa->spa_autoexpand)
cvd->vdev_asize = mvd->vdev_asize;
}
cvd->vdev_id = mvd->vdev_id;
vdev_add_child(pvd, cvd);
vdev_top_update(cvd->vdev_top, cvd->vdev_top);
if (cvd == cvd->vdev_top)
vdev_top_transfer(mvd, cvd);
ASSERT(mvd->vdev_children == 0);
vdev_free(mvd);
}
void
vdev_metaslab_group_create(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
/*
* metaslab_group_create was delayed until allocation bias was available
*/
if (vd->vdev_mg == NULL) {
metaslab_class_t *mc;
if (vd->vdev_islog && vd->vdev_alloc_bias == VDEV_BIAS_NONE)
vd->vdev_alloc_bias = VDEV_BIAS_LOG;
ASSERT3U(vd->vdev_islog, ==,
(vd->vdev_alloc_bias == VDEV_BIAS_LOG));
switch (vd->vdev_alloc_bias) {
case VDEV_BIAS_LOG:
mc = spa_log_class(spa);
break;
case VDEV_BIAS_SPECIAL:
mc = spa_special_class(spa);
break;
case VDEV_BIAS_DEDUP:
mc = spa_dedup_class(spa);
break;
default:
mc = spa_normal_class(spa);
}
vd->vdev_mg = metaslab_group_create(mc, vd,
spa->spa_alloc_count);
if (!vd->vdev_islog) {
vd->vdev_log_mg = metaslab_group_create(
spa_embedded_log_class(spa), vd, 1);
}
/*
* The spa ashift min/max only apply for the normal metaslab
* class. Class destination is late binding so ashift boundary
* setting had to wait until now.
*/
if (vd->vdev_top == vd && vd->vdev_ashift != 0 &&
mc == spa_normal_class(spa) && vd->vdev_aux == NULL) {
if (vd->vdev_ashift > spa->spa_max_ashift)
spa->spa_max_ashift = vd->vdev_ashift;
if (vd->vdev_ashift < spa->spa_min_ashift)
spa->spa_min_ashift = vd->vdev_ashift;
uint64_t min_alloc = vdev_get_min_alloc(vd);
if (min_alloc < spa->spa_min_alloc)
spa->spa_min_alloc = min_alloc;
}
}
}
int
vdev_metaslab_init(vdev_t *vd, uint64_t txg)
{
spa_t *spa = vd->vdev_spa;
uint64_t oldc = vd->vdev_ms_count;
uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift;
metaslab_t **mspp;
int error;
boolean_t expanding = (oldc != 0);
ASSERT(txg == 0 || spa_config_held(spa, SCL_ALLOC, RW_WRITER));
/*
* This vdev is not being allocated from yet or is a hole.
*/
if (vd->vdev_ms_shift == 0)
return (0);
ASSERT(!vd->vdev_ishole);
ASSERT(oldc <= newc);
mspp = vmem_zalloc(newc * sizeof (*mspp), KM_SLEEP);
if (expanding) {
bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp));
vmem_free(vd->vdev_ms, oldc * sizeof (*mspp));
}
vd->vdev_ms = mspp;
vd->vdev_ms_count = newc;
for (uint64_t m = oldc; m < newc; m++) {
uint64_t object = 0;
/*
* vdev_ms_array may be 0 if we are creating the "fake"
* metaslabs for an indirect vdev for zdb's leak detection.
* See zdb_leak_init().
*/
if (txg == 0 && vd->vdev_ms_array != 0) {
error = dmu_read(spa->spa_meta_objset,
vd->vdev_ms_array,
m * sizeof (uint64_t), sizeof (uint64_t), &object,
DMU_READ_PREFETCH);
if (error != 0) {
vdev_dbgmsg(vd, "unable to read the metaslab "
"array [error=%d]", error);
return (error);
}
}
error = metaslab_init(vd->vdev_mg, m, object, txg,
&(vd->vdev_ms[m]));
if (error != 0) {
vdev_dbgmsg(vd, "metaslab_init failed [error=%d]",
error);
return (error);
}
}
/*
* Find the emptiest metaslab on the vdev and mark it for use for
* embedded slog by moving it from the regular to the log metaslab
* group.
*/
if (vd->vdev_mg->mg_class == spa_normal_class(spa) &&
vd->vdev_ms_count > zfs_embedded_slog_min_ms &&
avl_is_empty(&vd->vdev_log_mg->mg_metaslab_tree)) {
uint64_t slog_msid = 0;
uint64_t smallest = UINT64_MAX;
/*
* Note, we only search the new metaslabs, because the old
* (pre-existing) ones may be active (e.g. have non-empty
* range_tree's), and we don't move them to the new
* metaslab_t.
*/
for (uint64_t m = oldc; m < newc; m++) {
uint64_t alloc =
space_map_allocated(vd->vdev_ms[m]->ms_sm);
if (alloc < smallest) {
slog_msid = m;
smallest = alloc;
}
}
metaslab_t *slog_ms = vd->vdev_ms[slog_msid];
/*
* The metaslab was marked as dirty at the end of
* metaslab_init(). Remove it from the dirty list so that we
* can uninitialize and reinitialize it to the new class.
*/
if (txg != 0) {
(void) txg_list_remove_this(&vd->vdev_ms_list,
slog_ms, txg);
}
uint64_t sm_obj = space_map_object(slog_ms->ms_sm);
metaslab_fini(slog_ms);
VERIFY0(metaslab_init(vd->vdev_log_mg, slog_msid, sm_obj, txg,
&vd->vdev_ms[slog_msid]));
}
if (txg == 0)
spa_config_enter(spa, SCL_ALLOC, FTAG, RW_WRITER);
/*
* If the vdev is being removed we don't activate
* the metaslabs since we want to ensure that no new
* allocations are performed on this device.
*/
if (!expanding && !vd->vdev_removing) {
metaslab_group_activate(vd->vdev_mg);
if (vd->vdev_log_mg != NULL)
metaslab_group_activate(vd->vdev_log_mg);
}
if (txg == 0)
spa_config_exit(spa, SCL_ALLOC, FTAG);
/*
* Regardless whether this vdev was just added or it is being
* expanded, the metaslab count has changed. Recalculate the
* block limit.
*/
spa_log_sm_set_blocklimit(spa);
return (0);
}
void
vdev_metaslab_fini(vdev_t *vd)
{
if (vd->vdev_checkpoint_sm != NULL) {
ASSERT(spa_feature_is_active(vd->vdev_spa,
SPA_FEATURE_POOL_CHECKPOINT));
space_map_close(vd->vdev_checkpoint_sm);
/*
* Even though we close the space map, we need to set its
* pointer to NULL. The reason is that vdev_metaslab_fini()
* may be called multiple times for certain operations
* (i.e. when destroying a pool) so we need to ensure that
* this clause never executes twice. This logic is similar
* to the one used for the vdev_ms clause below.
*/
vd->vdev_checkpoint_sm = NULL;
}
if (vd->vdev_ms != NULL) {
metaslab_group_t *mg = vd->vdev_mg;
metaslab_group_passivate(mg);
if (vd->vdev_log_mg != NULL) {
ASSERT(!vd->vdev_islog);
metaslab_group_passivate(vd->vdev_log_mg);
}
uint64_t count = vd->vdev_ms_count;
for (uint64_t m = 0; m < count; m++) {
metaslab_t *msp = vd->vdev_ms[m];
if (msp != NULL)
metaslab_fini(msp);
}
vmem_free(vd->vdev_ms, count * sizeof (metaslab_t *));
vd->vdev_ms = NULL;
vd->vdev_ms_count = 0;
for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
ASSERT0(mg->mg_histogram[i]);
if (vd->vdev_log_mg != NULL)
ASSERT0(vd->vdev_log_mg->mg_histogram[i]);
}
}
ASSERT0(vd->vdev_ms_count);
ASSERT3U(vd->vdev_pending_fastwrite, ==, 0);
}
typedef struct vdev_probe_stats {
boolean_t vps_readable;
boolean_t vps_writeable;
int vps_flags;
} vdev_probe_stats_t;
static void
vdev_probe_done(zio_t *zio)
{
spa_t *spa = zio->io_spa;
vdev_t *vd = zio->io_vd;
vdev_probe_stats_t *vps = zio->io_private;
ASSERT(vd->vdev_probe_zio != NULL);
if (zio->io_type == ZIO_TYPE_READ) {
if (zio->io_error == 0)
vps->vps_readable = 1;
if (zio->io_error == 0 && spa_writeable(spa)) {
zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd,
zio->io_offset, zio->io_size, zio->io_abd,
ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE));
} else {
abd_free(zio->io_abd);
}
} else if (zio->io_type == ZIO_TYPE_WRITE) {
if (zio->io_error == 0)
vps->vps_writeable = 1;
abd_free(zio->io_abd);
} else if (zio->io_type == ZIO_TYPE_NULL) {
zio_t *pio;
zio_link_t *zl;
vd->vdev_cant_read |= !vps->vps_readable;
vd->vdev_cant_write |= !vps->vps_writeable;
if (vdev_readable(vd) &&
(vdev_writeable(vd) || !spa_writeable(spa))) {
zio->io_error = 0;
} else {
ASSERT(zio->io_error != 0);
vdev_dbgmsg(vd, "failed probe");
(void) zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE,
spa, vd, NULL, NULL, 0);
zio->io_error = SET_ERROR(ENXIO);
}
mutex_enter(&vd->vdev_probe_lock);
ASSERT(vd->vdev_probe_zio == zio);
vd->vdev_probe_zio = NULL;
mutex_exit(&vd->vdev_probe_lock);
zl = NULL;
while ((pio = zio_walk_parents(zio, &zl)) != NULL)
if (!vdev_accessible(vd, pio))
pio->io_error = SET_ERROR(ENXIO);
kmem_free(vps, sizeof (*vps));
}
}
/*
* Determine whether this device is accessible.
*
* Read and write to several known locations: the pad regions of each
* vdev label but the first, which we leave alone in case it contains
* a VTOC.
*/
zio_t *
vdev_probe(vdev_t *vd, zio_t *zio)
{
spa_t *spa = vd->vdev_spa;
vdev_probe_stats_t *vps = NULL;
zio_t *pio;
ASSERT(vd->vdev_ops->vdev_op_leaf);
/*
* Don't probe the probe.
*/
if (zio && (zio->io_flags & ZIO_FLAG_PROBE))
return (NULL);
/*
* To prevent 'probe storms' when a device fails, we create
* just one probe i/o at a time. All zios that want to probe
* this vdev will become parents of the probe io.
*/
mutex_enter(&vd->vdev_probe_lock);
if ((pio = vd->vdev_probe_zio) == NULL) {
vps = kmem_zalloc(sizeof (*vps), KM_SLEEP);
vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE |
ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE |
ZIO_FLAG_TRYHARD;
if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) {
/*
* vdev_cant_read and vdev_cant_write can only
* transition from TRUE to FALSE when we have the
* SCL_ZIO lock as writer; otherwise they can only
* transition from FALSE to TRUE. This ensures that
* any zio looking at these values can assume that
* failures persist for the life of the I/O. That's
* important because when a device has intermittent
* connectivity problems, we want to ensure that
* they're ascribed to the device (ENXIO) and not
* the zio (EIO).
*
* Since we hold SCL_ZIO as writer here, clear both
* values so the probe can reevaluate from first
* principles.
*/
vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER;
vd->vdev_cant_read = B_FALSE;
vd->vdev_cant_write = B_FALSE;
}
vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd,
vdev_probe_done, vps,
vps->vps_flags | ZIO_FLAG_DONT_PROPAGATE);
/*
* We can't change the vdev state in this context, so we
* kick off an async task to do it on our behalf.
*/
if (zio != NULL) {
vd->vdev_probe_wanted = B_TRUE;
spa_async_request(spa, SPA_ASYNC_PROBE);
}
}
if (zio != NULL)
zio_add_child(zio, pio);
mutex_exit(&vd->vdev_probe_lock);
if (vps == NULL) {
ASSERT(zio != NULL);
return (NULL);
}
for (int l = 1; l < VDEV_LABELS; l++) {
zio_nowait(zio_read_phys(pio, vd,
vdev_label_offset(vd->vdev_psize, l,
offsetof(vdev_label_t, vl_be)), VDEV_PAD_SIZE,
abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE),
ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE));
}
if (zio == NULL)
return (pio);
zio_nowait(pio);
return (NULL);
}
static void
vdev_load_child(void *arg)
{
vdev_t *vd = arg;
vd->vdev_load_error = vdev_load(vd);
}
static void
vdev_open_child(void *arg)
{
vdev_t *vd = arg;
vd->vdev_open_thread = curthread;
vd->vdev_open_error = vdev_open(vd);
vd->vdev_open_thread = NULL;
}
static boolean_t
vdev_uses_zvols(vdev_t *vd)
{
#ifdef _KERNEL
if (zvol_is_zvol(vd->vdev_path))
return (B_TRUE);
#endif
for (int c = 0; c < vd->vdev_children; c++)
if (vdev_uses_zvols(vd->vdev_child[c]))
return (B_TRUE);
return (B_FALSE);
}
/*
* Returns B_TRUE if the passed child should be opened.
*/
static boolean_t
vdev_default_open_children_func(vdev_t *vd)
{
return (B_TRUE);
}
/*
* Open the requested child vdevs. If any of the leaf vdevs are using
* a ZFS volume then do the opens in a single thread. This avoids a
* deadlock when the current thread is holding the spa_namespace_lock.
*/
static void
vdev_open_children_impl(vdev_t *vd, vdev_open_children_func_t *open_func)
{
int children = vd->vdev_children;
taskq_t *tq = taskq_create("vdev_open", children, minclsyspri,
children, children, TASKQ_PREPOPULATE);
vd->vdev_nonrot = B_TRUE;
for (int c = 0; c < children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (open_func(cvd) == B_FALSE)
continue;
if (tq == NULL || vdev_uses_zvols(vd)) {
cvd->vdev_open_error = vdev_open(cvd);
} else {
VERIFY(taskq_dispatch(tq, vdev_open_child,
cvd, TQ_SLEEP) != TASKQID_INVALID);
}
vd->vdev_nonrot &= cvd->vdev_nonrot;
}
if (tq != NULL) {
taskq_wait(tq);
taskq_destroy(tq);
}
}
/*
* Open all child vdevs.
*/
void
vdev_open_children(vdev_t *vd)
{
vdev_open_children_impl(vd, vdev_default_open_children_func);
}
/*
* Conditionally open a subset of child vdevs.
*/
void
vdev_open_children_subset(vdev_t *vd, vdev_open_children_func_t *open_func)
{
vdev_open_children_impl(vd, open_func);
}
/*
* Compute the raidz-deflation ratio. Note, we hard-code
* in 128k (1 << 17) because it is the "typical" blocksize.
* Even though SPA_MAXBLOCKSIZE changed, this algorithm can not change,
* otherwise it would inconsistently account for existing bp's.
*/
static void
vdev_set_deflate_ratio(vdev_t *vd)
{
if (vd == vd->vdev_top && !vd->vdev_ishole && vd->vdev_ashift != 0) {
vd->vdev_deflate_ratio = (1 << 17) /
(vdev_psize_to_asize(vd, 1 << 17) >> SPA_MINBLOCKSHIFT);
}
}
/*
* Maximize performance by inflating the configured ashift for top level
* vdevs to be as close to the physical ashift as possible while maintaining
* administrator defined limits and ensuring it doesn't go below the
* logical ashift.
*/
static void
vdev_ashift_optimize(vdev_t *vd)
{
ASSERT(vd == vd->vdev_top);
if (vd->vdev_ashift < vd->vdev_physical_ashift) {
vd->vdev_ashift = MIN(
MAX(zfs_vdev_max_auto_ashift, vd->vdev_ashift),
MAX(zfs_vdev_min_auto_ashift,
vd->vdev_physical_ashift));
} else {
/*
* If the logical and physical ashifts are the same, then
* we ensure that the top-level vdev's ashift is not smaller
* than our minimum ashift value. For the unusual case
* where logical ashift > physical ashift, we can't cap
* the calculated ashift based on max ashift as that
* would cause failures.
* We still check if we need to increase it to match
* the min ashift.
*/
vd->vdev_ashift = MAX(zfs_vdev_min_auto_ashift,
vd->vdev_ashift);
}
}
/*
* Prepare a virtual device for access.
*/
int
vdev_open(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
int error;
uint64_t osize = 0;
uint64_t max_osize = 0;
uint64_t asize, max_asize, psize;
uint64_t logical_ashift = 0;
uint64_t physical_ashift = 0;
ASSERT(vd->vdev_open_thread == curthread ||
spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
ASSERT(vd->vdev_state == VDEV_STATE_CLOSED ||
vd->vdev_state == VDEV_STATE_CANT_OPEN ||
vd->vdev_state == VDEV_STATE_OFFLINE);
vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
vd->vdev_cant_read = B_FALSE;
vd->vdev_cant_write = B_FALSE;
vd->vdev_min_asize = vdev_get_min_asize(vd);
/*
* If this vdev is not removed, check its fault status. If it's
* faulted, bail out of the open.
*/
if (!vd->vdev_removed && vd->vdev_faulted) {
ASSERT(vd->vdev_children == 0);
ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
vd->vdev_label_aux);
return (SET_ERROR(ENXIO));
} else if (vd->vdev_offline) {
ASSERT(vd->vdev_children == 0);
vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE);
return (SET_ERROR(ENXIO));
}
error = vd->vdev_ops->vdev_op_open(vd, &osize, &max_osize,
&logical_ashift, &physical_ashift);
/*
* Physical volume size should never be larger than its max size, unless
* the disk has shrunk while we were reading it or the device is buggy
* or damaged: either way it's not safe for use, bail out of the open.
*/
if (osize > max_osize) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_OPEN_FAILED);
return (SET_ERROR(ENXIO));
}
/*
* Reset the vdev_reopening flag so that we actually close
* the vdev on error.
*/
vd->vdev_reopening = B_FALSE;
if (zio_injection_enabled && error == 0)
error = zio_handle_device_injection(vd, NULL, SET_ERROR(ENXIO));
if (error) {
if (vd->vdev_removed &&
vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED)
vd->vdev_removed = B_FALSE;
if (vd->vdev_stat.vs_aux == VDEV_AUX_CHILDREN_OFFLINE) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE,
vd->vdev_stat.vs_aux);
} else {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
vd->vdev_stat.vs_aux);
}
return (error);
}
vd->vdev_removed = B_FALSE;
/*
* Recheck the faulted flag now that we have confirmed that
* the vdev is accessible. If we're faulted, bail.
*/
if (vd->vdev_faulted) {
ASSERT(vd->vdev_children == 0);
ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
vd->vdev_label_aux);
return (SET_ERROR(ENXIO));
}
if (vd->vdev_degraded) {
ASSERT(vd->vdev_children == 0);
vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
VDEV_AUX_ERR_EXCEEDED);
} else {
vdev_set_state(vd, B_TRUE, VDEV_STATE_HEALTHY, 0);
}
/*
* For hole or missing vdevs we just return success.
*/
if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops)
return (0);
for (int c = 0; c < vd->vdev_children; c++) {
if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
VDEV_AUX_NONE);
break;
}
}
osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t));
max_osize = P2ALIGN(max_osize, (uint64_t)sizeof (vdev_label_t));
if (vd->vdev_children == 0) {
if (osize < SPA_MINDEVSIZE) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_TOO_SMALL);
return (SET_ERROR(EOVERFLOW));
}
psize = osize;
asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE);
max_asize = max_osize - (VDEV_LABEL_START_SIZE +
VDEV_LABEL_END_SIZE);
} else {
if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE -
(VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_TOO_SMALL);
return (SET_ERROR(EOVERFLOW));
}
psize = 0;
asize = osize;
max_asize = max_osize;
}
/*
* If the vdev was expanded, record this so that we can re-create the
* uberblock rings in labels {2,3}, during the next sync.
*/
if ((psize > vd->vdev_psize) && (vd->vdev_psize != 0))
vd->vdev_copy_uberblocks = B_TRUE;
vd->vdev_psize = psize;
/*
* Make sure the allocatable size hasn't shrunk too much.
*/
if (asize < vd->vdev_min_asize) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_BAD_LABEL);
return (SET_ERROR(EINVAL));
}
/*
* We can always set the logical/physical ashift members since
* their values are only used to calculate the vdev_ashift when
* the device is first added to the config. These values should
* not be used for anything else since they may change whenever
* the device is reopened and we don't store them in the label.
*/
vd->vdev_physical_ashift =
MAX(physical_ashift, vd->vdev_physical_ashift);
vd->vdev_logical_ashift = MAX(logical_ashift,
vd->vdev_logical_ashift);
if (vd->vdev_asize == 0) {
/*
* This is the first-ever open, so use the computed values.
* For compatibility, a different ashift can be requested.
*/
vd->vdev_asize = asize;
vd->vdev_max_asize = max_asize;
/*
* If the vdev_ashift was not overridden at creation time,
* then set it the logical ashift and optimize the ashift.
*/
if (vd->vdev_ashift == 0) {
vd->vdev_ashift = vd->vdev_logical_ashift;
if (vd->vdev_logical_ashift > ASHIFT_MAX) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_ASHIFT_TOO_BIG);
return (SET_ERROR(EDOM));
}
if (vd->vdev_top == vd) {
vdev_ashift_optimize(vd);
}
}
if (vd->vdev_ashift != 0 && (vd->vdev_ashift < ASHIFT_MIN ||
vd->vdev_ashift > ASHIFT_MAX)) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_BAD_ASHIFT);
return (SET_ERROR(EDOM));
}
} else {
/*
* Make sure the alignment required hasn't increased.
*/
if (vd->vdev_ashift > vd->vdev_top->vdev_ashift &&
vd->vdev_ops->vdev_op_leaf) {
(void) zfs_ereport_post(
FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT,
spa, vd, NULL, NULL, 0);
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_BAD_LABEL);
return (SET_ERROR(EDOM));
}
vd->vdev_max_asize = max_asize;
}
/*
* If all children are healthy we update asize if either:
* The asize has increased, due to a device expansion caused by dynamic
* LUN growth or vdev replacement, and automatic expansion is enabled;
* making the additional space available.
*
* The asize has decreased, due to a device shrink usually caused by a
* vdev replace with a smaller device. This ensures that calculations
* based of max_asize and asize e.g. esize are always valid. It's safe
* to do this as we've already validated that asize is greater than
* vdev_min_asize.
*/
if (vd->vdev_state == VDEV_STATE_HEALTHY &&
((asize > vd->vdev_asize &&
(vd->vdev_expanding || spa->spa_autoexpand)) ||
(asize < vd->vdev_asize)))
vd->vdev_asize = asize;
vdev_set_min_asize(vd);
/*
* Ensure we can issue some IO before declaring the
* vdev open for business.
*/
if (vd->vdev_ops->vdev_op_leaf &&
(error = zio_wait(vdev_probe(vd, NULL))) != 0) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
VDEV_AUX_ERR_EXCEEDED);
return (error);
}
/*
* Track the minimum allocation size.
*/
if (vd->vdev_top == vd && vd->vdev_ashift != 0 &&
vd->vdev_islog == 0 && vd->vdev_aux == NULL) {
uint64_t min_alloc = vdev_get_min_alloc(vd);
if (min_alloc < spa->spa_min_alloc)
spa->spa_min_alloc = min_alloc;
}
/*
* If this is a leaf vdev, assess whether a resilver is needed.
* But don't do this if we are doing a reopen for a scrub, since
* this would just restart the scrub we are already doing.
*/
if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen)
dsl_scan_assess_vdev(spa->spa_dsl_pool, vd);
return (0);
}
static void
vdev_validate_child(void *arg)
{
vdev_t *vd = arg;
vd->vdev_validate_thread = curthread;
vd->vdev_validate_error = vdev_validate(vd);
vd->vdev_validate_thread = NULL;
}
/*
* Called once the vdevs are all opened, this routine validates the label
* contents. This needs to be done before vdev_load() so that we don't
* inadvertently do repair I/Os to the wrong device.
*
* This function will only return failure if one of the vdevs indicates that it
* has since been destroyed or exported. This is only possible if
* /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state
* will be updated but the function will return 0.
*/
int
vdev_validate(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
taskq_t *tq = NULL;
nvlist_t *label;
uint64_t guid = 0, aux_guid = 0, top_guid;
uint64_t state;
nvlist_t *nvl;
uint64_t txg;
int children = vd->vdev_children;
if (vdev_validate_skip)
return (0);
if (children > 0) {
tq = taskq_create("vdev_validate", children, minclsyspri,
children, children, TASKQ_PREPOPULATE);
}
for (uint64_t c = 0; c < children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (tq == NULL || vdev_uses_zvols(cvd)) {
vdev_validate_child(cvd);
} else {
VERIFY(taskq_dispatch(tq, vdev_validate_child, cvd,
TQ_SLEEP) != TASKQID_INVALID);
}
}
if (tq != NULL) {
taskq_wait(tq);
taskq_destroy(tq);
}
for (int c = 0; c < children; c++) {
int error = vd->vdev_child[c]->vdev_validate_error;
if (error != 0)
return (SET_ERROR(EBADF));
}
/*
* If the device has already failed, or was marked offline, don't do
* any further validation. Otherwise, label I/O will fail and we will
* overwrite the previous state.
*/
if (!vd->vdev_ops->vdev_op_leaf || !vdev_readable(vd))
return (0);
/*
* If we are performing an extreme rewind, we allow for a label that
* was modified at a point after the current txg.
* If config lock is not held do not check for the txg. spa_sync could
* be updating the vdev's label before updating spa_last_synced_txg.
*/
if (spa->spa_extreme_rewind || spa_last_synced_txg(spa) == 0 ||
spa_config_held(spa, SCL_CONFIG, RW_WRITER) != SCL_CONFIG)
txg = UINT64_MAX;
else
txg = spa_last_synced_txg(spa);
if ((label = vdev_label_read_config(vd, txg)) == NULL) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_BAD_LABEL);
vdev_dbgmsg(vd, "vdev_validate: failed reading config for "
"txg %llu", (u_longlong_t)txg);
return (0);
}
/*
* Determine if this vdev has been split off into another
* pool. If so, then refuse to open it.
*/
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_SPLIT_GUID,
&aux_guid) == 0 && aux_guid == spa_guid(spa)) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_SPLIT_POOL);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: vdev split into other pool");
return (0);
}
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, &guid) != 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
ZPOOL_CONFIG_POOL_GUID);
return (0);
}
/*
* If config is not trusted then ignore the spa guid check. This is
* necessary because if the machine crashed during a re-guid the new
* guid might have been written to all of the vdev labels, but not the
* cached config. The check will be performed again once we have the
* trusted config from the MOS.
*/
if (spa->spa_trust_config && guid != spa_guid(spa)) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: vdev label pool_guid doesn't "
"match config (%llu != %llu)", (u_longlong_t)guid,
(u_longlong_t)spa_guid(spa));
return (0);
}
if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_VDEV_TREE, &nvl)
!= 0 || nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_ORIG_GUID,
&aux_guid) != 0)
aux_guid = 0;
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
ZPOOL_CONFIG_GUID);
return (0);
}
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID, &top_guid)
!= 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
ZPOOL_CONFIG_TOP_GUID);
return (0);
}
/*
* If this vdev just became a top-level vdev because its sibling was
* detached, it will have adopted the parent's vdev guid -- but the
* label may or may not be on disk yet. Fortunately, either version
* of the label will have the same top guid, so if we're a top-level
* vdev, we can safely compare to that instead.
* However, if the config comes from a cachefile that failed to update
* after the detach, a top-level vdev will appear as a non top-level
* vdev in the config. Also relax the constraints if we perform an
* extreme rewind.
*
* If we split this vdev off instead, then we also check the
* original pool's guid. We don't want to consider the vdev
* corrupt if it is partway through a split operation.
*/
if (vd->vdev_guid != guid && vd->vdev_guid != aux_guid) {
boolean_t mismatch = B_FALSE;
if (spa->spa_trust_config && !spa->spa_extreme_rewind) {
if (vd != vd->vdev_top || vd->vdev_guid != top_guid)
mismatch = B_TRUE;
} else {
if (vd->vdev_guid != top_guid &&
vd->vdev_top->vdev_guid != guid)
mismatch = B_TRUE;
}
if (mismatch) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: config guid "
"doesn't match label guid");
vdev_dbgmsg(vd, "CONFIG: guid %llu, top_guid %llu",
(u_longlong_t)vd->vdev_guid,
(u_longlong_t)vd->vdev_top->vdev_guid);
vdev_dbgmsg(vd, "LABEL: guid %llu, top_guid %llu, "
"aux_guid %llu", (u_longlong_t)guid,
(u_longlong_t)top_guid, (u_longlong_t)aux_guid);
return (0);
}
}
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE,
&state) != 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
ZPOOL_CONFIG_POOL_STATE);
return (0);
}
nvlist_free(label);
/*
* If this is a verbatim import, no need to check the
* state of the pool.
*/
if (!(spa->spa_import_flags & ZFS_IMPORT_VERBATIM) &&
spa_load_state(spa) == SPA_LOAD_OPEN &&
state != POOL_STATE_ACTIVE) {
vdev_dbgmsg(vd, "vdev_validate: invalid pool state (%llu) "
"for spa %s", (u_longlong_t)state, spa->spa_name);
return (SET_ERROR(EBADF));
}
/*
* If we were able to open and validate a vdev that was
* previously marked permanently unavailable, clear that state
* now.
*/
if (vd->vdev_not_present)
vd->vdev_not_present = 0;
return (0);
}
static void
vdev_copy_path_impl(vdev_t *svd, vdev_t *dvd)
{
if (svd->vdev_path != NULL && dvd->vdev_path != NULL) {
if (strcmp(svd->vdev_path, dvd->vdev_path) != 0) {
zfs_dbgmsg("vdev_copy_path: vdev %llu: path changed "
"from '%s' to '%s'", (u_longlong_t)dvd->vdev_guid,
dvd->vdev_path, svd->vdev_path);
spa_strfree(dvd->vdev_path);
dvd->vdev_path = spa_strdup(svd->vdev_path);
}
} else if (svd->vdev_path != NULL) {
dvd->vdev_path = spa_strdup(svd->vdev_path);
zfs_dbgmsg("vdev_copy_path: vdev %llu: path set to '%s'",
(u_longlong_t)dvd->vdev_guid, dvd->vdev_path);
}
}
/*
* Recursively copy vdev paths from one vdev to another. Source and destination
* vdev trees must have same geometry otherwise return error. Intended to copy
* paths from userland config into MOS config.
*/
int
vdev_copy_path_strict(vdev_t *svd, vdev_t *dvd)
{
if ((svd->vdev_ops == &vdev_missing_ops) ||
(svd->vdev_ishole && dvd->vdev_ishole) ||
(dvd->vdev_ops == &vdev_indirect_ops))
return (0);
if (svd->vdev_ops != dvd->vdev_ops) {
vdev_dbgmsg(svd, "vdev_copy_path: vdev type mismatch: %s != %s",
svd->vdev_ops->vdev_op_type, dvd->vdev_ops->vdev_op_type);
return (SET_ERROR(EINVAL));
}
if (svd->vdev_guid != dvd->vdev_guid) {
vdev_dbgmsg(svd, "vdev_copy_path: guids mismatch (%llu != "
"%llu)", (u_longlong_t)svd->vdev_guid,
(u_longlong_t)dvd->vdev_guid);
return (SET_ERROR(EINVAL));
}
if (svd->vdev_children != dvd->vdev_children) {
vdev_dbgmsg(svd, "vdev_copy_path: children count mismatch: "
"%llu != %llu", (u_longlong_t)svd->vdev_children,
(u_longlong_t)dvd->vdev_children);
return (SET_ERROR(EINVAL));
}
for (uint64_t i = 0; i < svd->vdev_children; i++) {
int error = vdev_copy_path_strict(svd->vdev_child[i],
dvd->vdev_child[i]);
if (error != 0)
return (error);
}
if (svd->vdev_ops->vdev_op_leaf)
vdev_copy_path_impl(svd, dvd);
return (0);
}
static void
vdev_copy_path_search(vdev_t *stvd, vdev_t *dvd)
{
ASSERT(stvd->vdev_top == stvd);
ASSERT3U(stvd->vdev_id, ==, dvd->vdev_top->vdev_id);
for (uint64_t i = 0; i < dvd->vdev_children; i++) {
vdev_copy_path_search(stvd, dvd->vdev_child[i]);
}
if (!dvd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(dvd))
return;
/*
* The idea here is that while a vdev can shift positions within
* a top vdev (when replacing, attaching mirror, etc.) it cannot
* step outside of it.
*/
vdev_t *vd = vdev_lookup_by_guid(stvd, dvd->vdev_guid);
if (vd == NULL || vd->vdev_ops != dvd->vdev_ops)
return;
ASSERT(vd->vdev_ops->vdev_op_leaf);
vdev_copy_path_impl(vd, dvd);
}
/*
* Recursively copy vdev paths from one root vdev to another. Source and
* destination vdev trees may differ in geometry. For each destination leaf
* vdev, search a vdev with the same guid and top vdev id in the source.
* Intended to copy paths from userland config into MOS config.
*/
void
vdev_copy_path_relaxed(vdev_t *srvd, vdev_t *drvd)
{
uint64_t children = MIN(srvd->vdev_children, drvd->vdev_children);
ASSERT(srvd->vdev_ops == &vdev_root_ops);
ASSERT(drvd->vdev_ops == &vdev_root_ops);
for (uint64_t i = 0; i < children; i++) {
vdev_copy_path_search(srvd->vdev_child[i],
drvd->vdev_child[i]);
}
}
/*
* Close a virtual device.
*/
void
vdev_close(vdev_t *vd)
{
vdev_t *pvd = vd->vdev_parent;
spa_t *spa __maybe_unused = vd->vdev_spa;
ASSERT(vd != NULL);
ASSERT(vd->vdev_open_thread == curthread ||
spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
/*
* If our parent is reopening, then we are as well, unless we are
* going offline.
*/
if (pvd != NULL && pvd->vdev_reopening)
vd->vdev_reopening = (pvd->vdev_reopening && !vd->vdev_offline);
vd->vdev_ops->vdev_op_close(vd);
vdev_cache_purge(vd);
/*
* We record the previous state before we close it, so that if we are
* doing a reopen(), we don't generate FMA ereports if we notice that
* it's still faulted.
*/
vd->vdev_prevstate = vd->vdev_state;
if (vd->vdev_offline)
vd->vdev_state = VDEV_STATE_OFFLINE;
else
vd->vdev_state = VDEV_STATE_CLOSED;
vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
}
void
vdev_hold(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_is_root(spa));
if (spa->spa_state == POOL_STATE_UNINITIALIZED)
return;
for (int c = 0; c < vd->vdev_children; c++)
vdev_hold(vd->vdev_child[c]);
if (vd->vdev_ops->vdev_op_leaf && vd->vdev_ops->vdev_op_hold != NULL)
vd->vdev_ops->vdev_op_hold(vd);
}
void
vdev_rele(vdev_t *vd)
{
ASSERT(spa_is_root(vd->vdev_spa));
for (int c = 0; c < vd->vdev_children; c++)
vdev_rele(vd->vdev_child[c]);
if (vd->vdev_ops->vdev_op_leaf && vd->vdev_ops->vdev_op_rele != NULL)
vd->vdev_ops->vdev_op_rele(vd);
}
/*
* Reopen all interior vdevs and any unopened leaves. We don't actually
* reopen leaf vdevs which had previously been opened as they might deadlock
* on the spa_config_lock. Instead we only obtain the leaf's physical size.
* If the leaf has never been opened then open it, as usual.
*/
void
vdev_reopen(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
/* set the reopening flag unless we're taking the vdev offline */
vd->vdev_reopening = !vd->vdev_offline;
vdev_close(vd);
(void) vdev_open(vd);
/*
* Call vdev_validate() here to make sure we have the same device.
* Otherwise, a device with an invalid label could be successfully
* opened in response to vdev_reopen().
*/
if (vd->vdev_aux) {
(void) vdev_validate_aux(vd);
if (vdev_readable(vd) && vdev_writeable(vd) &&
vd->vdev_aux == &spa->spa_l2cache) {
/*
* In case the vdev is present we should evict all ARC
* buffers and pointers to log blocks and reclaim their
* space before restoring its contents to L2ARC.
*/
if (l2arc_vdev_present(vd)) {
l2arc_rebuild_vdev(vd, B_TRUE);
} else {
l2arc_add_vdev(spa, vd);
}
spa_async_request(spa, SPA_ASYNC_L2CACHE_REBUILD);
spa_async_request(spa, SPA_ASYNC_L2CACHE_TRIM);
}
} else {
(void) vdev_validate(vd);
}
/*
* Reassess parent vdev's health.
*/
vdev_propagate_state(vd);
}
int
vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing)
{
int error;
/*
* Normally, partial opens (e.g. of a mirror) are allowed.
* For a create, however, we want to fail the request if
* there are any components we can't open.
*/
error = vdev_open(vd);
if (error || vd->vdev_state != VDEV_STATE_HEALTHY) {
vdev_close(vd);
return (error ? error : SET_ERROR(ENXIO));
}
/*
* Recursively load DTLs and initialize all labels.
*/
if ((error = vdev_dtl_load(vd)) != 0 ||
(error = vdev_label_init(vd, txg, isreplacing ?
VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) {
vdev_close(vd);
return (error);
}
return (0);
}
void
vdev_metaslab_set_size(vdev_t *vd)
{
uint64_t asize = vd->vdev_asize;
uint64_t ms_count = asize >> zfs_vdev_default_ms_shift;
uint64_t ms_shift;
/*
* There are two dimensions to the metaslab sizing calculation:
* the size of the metaslab and the count of metaslabs per vdev.
*
* The default values used below are a good balance between memory
* usage (larger metaslab size means more memory needed for loaded
* metaslabs; more metaslabs means more memory needed for the
* metaslab_t structs), metaslab load time (larger metaslabs take
* longer to load), and metaslab sync time (more metaslabs means
* more time spent syncing all of them).
*
* In general, we aim for zfs_vdev_default_ms_count (200) metaslabs.
* The range of the dimensions are as follows:
*
* 2^29 <= ms_size <= 2^34
* 16 <= ms_count <= 131,072
*
* On the lower end of vdev sizes, we aim for metaslabs sizes of
* at least 512MB (2^29) to minimize fragmentation effects when
* testing with smaller devices. However, the count constraint
* of at least 16 metaslabs will override this minimum size goal.
*
* On the upper end of vdev sizes, we aim for a maximum metaslab
* size of 16GB. However, we will cap the total count to 2^17
* metaslabs to keep our memory footprint in check and let the
* metaslab size grow from there if that limit is hit.
*
* The net effect of applying above constrains is summarized below.
*
* vdev size metaslab count
* --------------|-----------------
* < 8GB ~16
* 8GB - 100GB one per 512MB
* 100GB - 3TB ~200
* 3TB - 2PB one per 16GB
* > 2PB ~131,072
* --------------------------------
*
* Finally, note that all of the above calculate the initial
* number of metaslabs. Expanding a top-level vdev will result
* in additional metaslabs being allocated making it possible
* to exceed the zfs_vdev_ms_count_limit.
*/
if (ms_count < zfs_vdev_min_ms_count)
ms_shift = highbit64(asize / zfs_vdev_min_ms_count);
else if (ms_count > zfs_vdev_default_ms_count)
ms_shift = highbit64(asize / zfs_vdev_default_ms_count);
else
ms_shift = zfs_vdev_default_ms_shift;
if (ms_shift < SPA_MAXBLOCKSHIFT) {
ms_shift = SPA_MAXBLOCKSHIFT;
} else if (ms_shift > zfs_vdev_max_ms_shift) {
ms_shift = zfs_vdev_max_ms_shift;
/* cap the total count to constrain memory footprint */
if ((asize >> ms_shift) > zfs_vdev_ms_count_limit)
ms_shift = highbit64(asize / zfs_vdev_ms_count_limit);
}
vd->vdev_ms_shift = ms_shift;
ASSERT3U(vd->vdev_ms_shift, >=, SPA_MAXBLOCKSHIFT);
}
void
vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg)
{
ASSERT(vd == vd->vdev_top);
/* indirect vdevs don't have metaslabs or dtls */
ASSERT(vdev_is_concrete(vd) || flags == 0);
ASSERT(ISP2(flags));
ASSERT(spa_writeable(vd->vdev_spa));
if (flags & VDD_METASLAB)
(void) txg_list_add(&vd->vdev_ms_list, arg, txg);
if (flags & VDD_DTL)
(void) txg_list_add(&vd->vdev_dtl_list, arg, txg);
(void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg);
}
void
vdev_dirty_leaves(vdev_t *vd, int flags, uint64_t txg)
{
for (int c = 0; c < vd->vdev_children; c++)
vdev_dirty_leaves(vd->vdev_child[c], flags, txg);
if (vd->vdev_ops->vdev_op_leaf)
vdev_dirty(vd->vdev_top, flags, vd, txg);
}
/*
* DTLs.
*
* A vdev's DTL (dirty time log) is the set of transaction groups for which
* the vdev has less than perfect replication. There are four kinds of DTL:
*
* DTL_MISSING: txgs for which the vdev has no valid copies of the data
*
* DTL_PARTIAL: txgs for which data is available, but not fully replicated
*
* DTL_SCRUB: the txgs that could not be repaired by the last scrub; upon
* scrub completion, DTL_SCRUB replaces DTL_MISSING in the range of
* txgs that was scrubbed.
*
* DTL_OUTAGE: txgs which cannot currently be read, whether due to
* persistent errors or just some device being offline.
* Unlike the other three, the DTL_OUTAGE map is not generally
* maintained; it's only computed when needed, typically to
* determine whether a device can be detached.
*
* For leaf vdevs, DTL_MISSING and DTL_PARTIAL are identical: the device
* either has the data or it doesn't.
*
* For interior vdevs such as mirror and RAID-Z the picture is more complex.
* A vdev's DTL_PARTIAL is the union of its children's DTL_PARTIALs, because
* if any child is less than fully replicated, then so is its parent.
* A vdev's DTL_MISSING is a modified union of its children's DTL_MISSINGs,
* comprising only those txgs which appear in 'maxfaults' or more children;
* those are the txgs we don't have enough replication to read. For example,
* double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2);
* thus, its DTL_MISSING consists of the set of txgs that appear in more than
* two child DTL_MISSING maps.
*
* It should be clear from the above that to compute the DTLs and outage maps
* for all vdevs, it suffices to know just the leaf vdevs' DTL_MISSING maps.
* Therefore, that is all we keep on disk. When loading the pool, or after
* a configuration change, we generate all other DTLs from first principles.
*/
void
vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
{
range_tree_t *rt = vd->vdev_dtl[t];
ASSERT(t < DTL_TYPES);
ASSERT(vd != vd->vdev_spa->spa_root_vdev);
ASSERT(spa_writeable(vd->vdev_spa));
mutex_enter(&vd->vdev_dtl_lock);
if (!range_tree_contains(rt, txg, size))
range_tree_add(rt, txg, size);
mutex_exit(&vd->vdev_dtl_lock);
}
boolean_t
vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
{
range_tree_t *rt = vd->vdev_dtl[t];
boolean_t dirty = B_FALSE;
ASSERT(t < DTL_TYPES);
ASSERT(vd != vd->vdev_spa->spa_root_vdev);
/*
* While we are loading the pool, the DTLs have not been loaded yet.
* This isn't a problem but it can result in devices being tried
* which are known to not have the data. In which case, the import
* is relying on the checksum to ensure that we get the right data.
* Note that while importing we are only reading the MOS, which is
* always checksummed.
*/
mutex_enter(&vd->vdev_dtl_lock);
if (!range_tree_is_empty(rt))
dirty = range_tree_contains(rt, txg, size);
mutex_exit(&vd->vdev_dtl_lock);
return (dirty);
}
boolean_t
vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t)
{
range_tree_t *rt = vd->vdev_dtl[t];
boolean_t empty;
mutex_enter(&vd->vdev_dtl_lock);
empty = range_tree_is_empty(rt);
mutex_exit(&vd->vdev_dtl_lock);
return (empty);
}
/*
* Check if the txg falls within the range which must be
* resilvered. DVAs outside this range can always be skipped.
*/
boolean_t
vdev_default_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize,
uint64_t phys_birth)
{
/* Set by sequential resilver. */
if (phys_birth == TXG_UNKNOWN)
return (B_TRUE);
return (vdev_dtl_contains(vd, DTL_PARTIAL, phys_birth, 1));
}
/*
* Returns B_TRUE if the vdev determines the DVA needs to be resilvered.
*/
boolean_t
vdev_dtl_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize,
uint64_t phys_birth)
{
ASSERT(vd != vd->vdev_spa->spa_root_vdev);
if (vd->vdev_ops->vdev_op_need_resilver == NULL ||
vd->vdev_ops->vdev_op_leaf)
return (B_TRUE);
return (vd->vdev_ops->vdev_op_need_resilver(vd, dva, psize,
phys_birth));
}
/*
* Returns the lowest txg in the DTL range.
*/
static uint64_t
vdev_dtl_min(vdev_t *vd)
{
ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock));
ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0);
ASSERT0(vd->vdev_children);
return (range_tree_min(vd->vdev_dtl[DTL_MISSING]) - 1);
}
/*
* Returns the highest txg in the DTL.
*/
static uint64_t
vdev_dtl_max(vdev_t *vd)
{
ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock));
ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0);
ASSERT0(vd->vdev_children);
return (range_tree_max(vd->vdev_dtl[DTL_MISSING]));
}
/*
* Determine if a resilvering vdev should remove any DTL entries from
* its range. If the vdev was resilvering for the entire duration of the
* scan then it should excise that range from its DTLs. Otherwise, this
* vdev is considered partially resilvered and should leave its DTL
* entries intact. The comment in vdev_dtl_reassess() describes how we
* excise the DTLs.
*/
static boolean_t
vdev_dtl_should_excise(vdev_t *vd, boolean_t rebuild_done)
{
ASSERT0(vd->vdev_children);
if (vd->vdev_state < VDEV_STATE_DEGRADED)
return (B_FALSE);
if (vd->vdev_resilver_deferred)
return (B_FALSE);
if (range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]))
return (B_TRUE);
if (rebuild_done) {
vdev_rebuild_t *vr = &vd->vdev_top->vdev_rebuild_config;
vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
/* Rebuild not initiated by attach */
if (vd->vdev_rebuild_txg == 0)
return (B_TRUE);
/*
* When a rebuild completes without error then all missing data
* up to the rebuild max txg has been reconstructed and the DTL
* is eligible for excision.
*/
if (vrp->vrp_rebuild_state == VDEV_REBUILD_COMPLETE &&
vdev_dtl_max(vd) <= vrp->vrp_max_txg) {
ASSERT3U(vrp->vrp_min_txg, <=, vdev_dtl_min(vd));
ASSERT3U(vrp->vrp_min_txg, <, vd->vdev_rebuild_txg);
ASSERT3U(vd->vdev_rebuild_txg, <=, vrp->vrp_max_txg);
return (B_TRUE);
}
} else {
dsl_scan_t *scn = vd->vdev_spa->spa_dsl_pool->dp_scan;
dsl_scan_phys_t *scnp __maybe_unused = &scn->scn_phys;
/* Resilver not initiated by attach */
if (vd->vdev_resilver_txg == 0)
return (B_TRUE);
/*
* When a resilver is initiated the scan will assign the
* scn_max_txg value to the highest txg value that exists
* in all DTLs. If this device's max DTL is not part of this
* scan (i.e. it is not in the range (scn_min_txg, scn_max_txg]
* then it is not eligible for excision.
*/
if (vdev_dtl_max(vd) <= scn->scn_phys.scn_max_txg) {
ASSERT3U(scnp->scn_min_txg, <=, vdev_dtl_min(vd));
ASSERT3U(scnp->scn_min_txg, <, vd->vdev_resilver_txg);
ASSERT3U(vd->vdev_resilver_txg, <=, scnp->scn_max_txg);
return (B_TRUE);
}
}
return (B_FALSE);
}
/*
* Reassess DTLs after a config change or scrub completion. If txg == 0 no
* write operations will be issued to the pool.
*/
void
vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg,
boolean_t scrub_done, boolean_t rebuild_done)
{
spa_t *spa = vd->vdev_spa;
avl_tree_t reftree;
int minref;
ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
for (int c = 0; c < vd->vdev_children; c++)
vdev_dtl_reassess(vd->vdev_child[c], txg,
scrub_txg, scrub_done, rebuild_done);
if (vd == spa->spa_root_vdev || !vdev_is_concrete(vd) || vd->vdev_aux)
return;
if (vd->vdev_ops->vdev_op_leaf) {
dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
vdev_rebuild_t *vr = &vd->vdev_top->vdev_rebuild_config;
boolean_t check_excise = B_FALSE;
boolean_t wasempty = B_TRUE;
mutex_enter(&vd->vdev_dtl_lock);
/*
* If requested, pretend the scan or rebuild completed cleanly.
*/
if (zfs_scan_ignore_errors) {
if (scn != NULL)
scn->scn_phys.scn_errors = 0;
if (vr != NULL)
vr->vr_rebuild_phys.vrp_errors = 0;
}
if (scrub_txg != 0 &&
!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) {
wasempty = B_FALSE;
zfs_dbgmsg("guid:%llu txg:%llu scrub:%llu started:%d "
"dtl:%llu/%llu errors:%llu",
(u_longlong_t)vd->vdev_guid, (u_longlong_t)txg,
(u_longlong_t)scrub_txg, spa->spa_scrub_started,
(u_longlong_t)vdev_dtl_min(vd),
(u_longlong_t)vdev_dtl_max(vd),
(u_longlong_t)(scn ? scn->scn_phys.scn_errors : 0));
}
/*
* If we've completed a scrub/resilver or a rebuild cleanly
* then determine if this vdev should remove any DTLs. We
* only want to excise regions on vdevs that were available
* during the entire duration of this scan.
*/
if (rebuild_done &&
vr != NULL && vr->vr_rebuild_phys.vrp_errors == 0) {
check_excise = B_TRUE;
} else {
if (spa->spa_scrub_started ||
(scn != NULL && scn->scn_phys.scn_errors == 0)) {
check_excise = B_TRUE;
}
}
if (scrub_txg && check_excise &&
vdev_dtl_should_excise(vd, rebuild_done)) {
/*
* We completed a scrub, resilver or rebuild up to
* scrub_txg. If we did it without rebooting, then
* the scrub dtl will be valid, so excise the old
* region and fold in the scrub dtl. Otherwise,
* leave the dtl as-is if there was an error.
*
* There's little trick here: to excise the beginning
* of the DTL_MISSING map, we put it into a reference
* tree and then add a segment with refcnt -1 that
* covers the range [0, scrub_txg). This means
* that each txg in that range has refcnt -1 or 0.
* We then add DTL_SCRUB with a refcnt of 2, so that
* entries in the range [0, scrub_txg) will have a
* positive refcnt -- either 1 or 2. We then convert
* the reference tree into the new DTL_MISSING map.
*/
space_reftree_create(&reftree);
space_reftree_add_map(&reftree,
vd->vdev_dtl[DTL_MISSING], 1);
space_reftree_add_seg(&reftree, 0, scrub_txg, -1);
space_reftree_add_map(&reftree,
vd->vdev_dtl[DTL_SCRUB], 2);
space_reftree_generate_map(&reftree,
vd->vdev_dtl[DTL_MISSING], 1);
space_reftree_destroy(&reftree);
if (!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) {
zfs_dbgmsg("update DTL_MISSING:%llu/%llu",
(u_longlong_t)vdev_dtl_min(vd),
(u_longlong_t)vdev_dtl_max(vd));
} else if (!wasempty) {
zfs_dbgmsg("DTL_MISSING is now empty");
}
}
range_tree_vacate(vd->vdev_dtl[DTL_PARTIAL], NULL, NULL);
range_tree_walk(vd->vdev_dtl[DTL_MISSING],
range_tree_add, vd->vdev_dtl[DTL_PARTIAL]);
if (scrub_done)
range_tree_vacate(vd->vdev_dtl[DTL_SCRUB], NULL, NULL);
range_tree_vacate(vd->vdev_dtl[DTL_OUTAGE], NULL, NULL);
if (!vdev_readable(vd))
range_tree_add(vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL);
else
range_tree_walk(vd->vdev_dtl[DTL_MISSING],
range_tree_add, vd->vdev_dtl[DTL_OUTAGE]);
/*
* If the vdev was resilvering or rebuilding and no longer
* has any DTLs then reset the appropriate flag and dirty
* the top level so that we persist the change.
*/
if (txg != 0 &&
range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) &&
range_tree_is_empty(vd->vdev_dtl[DTL_OUTAGE])) {
if (vd->vdev_rebuild_txg != 0) {
vd->vdev_rebuild_txg = 0;
vdev_config_dirty(vd->vdev_top);
} else if (vd->vdev_resilver_txg != 0) {
vd->vdev_resilver_txg = 0;
vdev_config_dirty(vd->vdev_top);
}
}
mutex_exit(&vd->vdev_dtl_lock);
if (txg != 0)
vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg);
return;
}
mutex_enter(&vd->vdev_dtl_lock);
for (int t = 0; t < DTL_TYPES; t++) {
/* account for child's outage in parent's missing map */
int s = (t == DTL_MISSING) ? DTL_OUTAGE: t;
if (t == DTL_SCRUB)
continue; /* leaf vdevs only */
if (t == DTL_PARTIAL)
minref = 1; /* i.e. non-zero */
else if (vdev_get_nparity(vd) != 0)
minref = vdev_get_nparity(vd) + 1; /* RAID-Z, dRAID */
else
minref = vd->vdev_children; /* any kind of mirror */
space_reftree_create(&reftree);
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
mutex_enter(&cvd->vdev_dtl_lock);
space_reftree_add_map(&reftree, cvd->vdev_dtl[s], 1);
mutex_exit(&cvd->vdev_dtl_lock);
}
space_reftree_generate_map(&reftree, vd->vdev_dtl[t], minref);
space_reftree_destroy(&reftree);
}
mutex_exit(&vd->vdev_dtl_lock);
}
int
vdev_dtl_load(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa->spa_meta_objset;
range_tree_t *rt;
int error = 0;
if (vd->vdev_ops->vdev_op_leaf && vd->vdev_dtl_object != 0) {
ASSERT(vdev_is_concrete(vd));
error = space_map_open(&vd->vdev_dtl_sm, mos,
vd->vdev_dtl_object, 0, -1ULL, 0);
if (error)
return (error);
ASSERT(vd->vdev_dtl_sm != NULL);
rt = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
error = space_map_load(vd->vdev_dtl_sm, rt, SM_ALLOC);
if (error == 0) {
mutex_enter(&vd->vdev_dtl_lock);
range_tree_walk(rt, range_tree_add,
vd->vdev_dtl[DTL_MISSING]);
mutex_exit(&vd->vdev_dtl_lock);
}
range_tree_vacate(rt, NULL, NULL);
range_tree_destroy(rt);
return (error);
}
for (int c = 0; c < vd->vdev_children; c++) {
error = vdev_dtl_load(vd->vdev_child[c]);
if (error != 0)
break;
}
return (error);
}
static void
vdev_zap_allocation_data(vdev_t *vd, dmu_tx_t *tx)
{
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa->spa_meta_objset;
vdev_alloc_bias_t alloc_bias = vd->vdev_alloc_bias;
const char *string;
ASSERT(alloc_bias != VDEV_BIAS_NONE);
string =
(alloc_bias == VDEV_BIAS_LOG) ? VDEV_ALLOC_BIAS_LOG :
(alloc_bias == VDEV_BIAS_SPECIAL) ? VDEV_ALLOC_BIAS_SPECIAL :
(alloc_bias == VDEV_BIAS_DEDUP) ? VDEV_ALLOC_BIAS_DEDUP : NULL;
ASSERT(string != NULL);
VERIFY0(zap_add(mos, vd->vdev_top_zap, VDEV_TOP_ZAP_ALLOCATION_BIAS,
1, strlen(string) + 1, string, tx));
if (alloc_bias == VDEV_BIAS_SPECIAL || alloc_bias == VDEV_BIAS_DEDUP) {
spa_activate_allocation_classes(spa, tx);
}
}
void
vdev_destroy_unlink_zap(vdev_t *vd, uint64_t zapobj, dmu_tx_t *tx)
{
spa_t *spa = vd->vdev_spa;
VERIFY0(zap_destroy(spa->spa_meta_objset, zapobj, tx));
VERIFY0(zap_remove_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps,
zapobj, tx));
}
uint64_t
vdev_create_link_zap(vdev_t *vd, dmu_tx_t *tx)
{
spa_t *spa = vd->vdev_spa;
uint64_t zap = zap_create(spa->spa_meta_objset, DMU_OTN_ZAP_METADATA,
DMU_OT_NONE, 0, tx);
ASSERT(zap != 0);
VERIFY0(zap_add_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps,
zap, tx));
return (zap);
}
void
vdev_construct_zaps(vdev_t *vd, dmu_tx_t *tx)
{
if (vd->vdev_ops != &vdev_hole_ops &&
vd->vdev_ops != &vdev_missing_ops &&
vd->vdev_ops != &vdev_root_ops &&
!vd->vdev_top->vdev_removing) {
if (vd->vdev_ops->vdev_op_leaf && vd->vdev_leaf_zap == 0) {
vd->vdev_leaf_zap = vdev_create_link_zap(vd, tx);
}
if (vd == vd->vdev_top && vd->vdev_top_zap == 0) {
vd->vdev_top_zap = vdev_create_link_zap(vd, tx);
if (vd->vdev_alloc_bias != VDEV_BIAS_NONE)
vdev_zap_allocation_data(vd, tx);
}
}
for (uint64_t i = 0; i < vd->vdev_children; i++) {
vdev_construct_zaps(vd->vdev_child[i], tx);
}
}
static void
vdev_dtl_sync(vdev_t *vd, uint64_t txg)
{
spa_t *spa = vd->vdev_spa;
range_tree_t *rt = vd->vdev_dtl[DTL_MISSING];
objset_t *mos = spa->spa_meta_objset;
range_tree_t *rtsync;
dmu_tx_t *tx;
uint64_t object = space_map_object(vd->vdev_dtl_sm);
ASSERT(vdev_is_concrete(vd));
ASSERT(vd->vdev_ops->vdev_op_leaf);
tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
if (vd->vdev_detached || vd->vdev_top->vdev_removing) {
mutex_enter(&vd->vdev_dtl_lock);
space_map_free(vd->vdev_dtl_sm, tx);
space_map_close(vd->vdev_dtl_sm);
vd->vdev_dtl_sm = NULL;
mutex_exit(&vd->vdev_dtl_lock);
/*
* We only destroy the leaf ZAP for detached leaves or for
* removed log devices. Removed data devices handle leaf ZAP
* cleanup later, once cancellation is no longer possible.
*/
if (vd->vdev_leaf_zap != 0 && (vd->vdev_detached ||
vd->vdev_top->vdev_islog)) {
vdev_destroy_unlink_zap(vd, vd->vdev_leaf_zap, tx);
vd->vdev_leaf_zap = 0;
}
dmu_tx_commit(tx);
return;
}
if (vd->vdev_dtl_sm == NULL) {
uint64_t new_object;
new_object = space_map_alloc(mos, zfs_vdev_dtl_sm_blksz, tx);
VERIFY3U(new_object, !=, 0);
VERIFY0(space_map_open(&vd->vdev_dtl_sm, mos, new_object,
0, -1ULL, 0));
ASSERT(vd->vdev_dtl_sm != NULL);
}
rtsync = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
mutex_enter(&vd->vdev_dtl_lock);
range_tree_walk(rt, range_tree_add, rtsync);
mutex_exit(&vd->vdev_dtl_lock);
space_map_truncate(vd->vdev_dtl_sm, zfs_vdev_dtl_sm_blksz, tx);
space_map_write(vd->vdev_dtl_sm, rtsync, SM_ALLOC, SM_NO_VDEVID, tx);
range_tree_vacate(rtsync, NULL, NULL);
range_tree_destroy(rtsync);
/*
* If the object for the space map has changed then dirty
* the top level so that we update the config.
*/
if (object != space_map_object(vd->vdev_dtl_sm)) {
vdev_dbgmsg(vd, "txg %llu, spa %s, DTL old object %llu, "
"new object %llu", (u_longlong_t)txg, spa_name(spa),
(u_longlong_t)object,
(u_longlong_t)space_map_object(vd->vdev_dtl_sm));
vdev_config_dirty(vd->vdev_top);
}
dmu_tx_commit(tx);
}
/*
* Determine whether the specified vdev can be offlined/detached/removed
* without losing data.
*/
boolean_t
vdev_dtl_required(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
vdev_t *tvd = vd->vdev_top;
uint8_t cant_read = vd->vdev_cant_read;
boolean_t required;
ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
if (vd == spa->spa_root_vdev || vd == tvd)
return (B_TRUE);
/*
* Temporarily mark the device as unreadable, and then determine
* whether this results in any DTL outages in the top-level vdev.
* If not, we can safely offline/detach/remove the device.
*/
vd->vdev_cant_read = B_TRUE;
vdev_dtl_reassess(tvd, 0, 0, B_FALSE, B_FALSE);
required = !vdev_dtl_empty(tvd, DTL_OUTAGE);
vd->vdev_cant_read = cant_read;
vdev_dtl_reassess(tvd, 0, 0, B_FALSE, B_FALSE);
if (!required && zio_injection_enabled) {
required = !!zio_handle_device_injection(vd, NULL,
SET_ERROR(ECHILD));
}
return (required);
}
/*
* Determine if resilver is needed, and if so the txg range.
*/
boolean_t
vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp)
{
boolean_t needed = B_FALSE;
uint64_t thismin = UINT64_MAX;
uint64_t thismax = 0;
if (vd->vdev_children == 0) {
mutex_enter(&vd->vdev_dtl_lock);
if (!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) &&
vdev_writeable(vd)) {
thismin = vdev_dtl_min(vd);
thismax = vdev_dtl_max(vd);
needed = B_TRUE;
}
mutex_exit(&vd->vdev_dtl_lock);
} else {
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
uint64_t cmin, cmax;
if (vdev_resilver_needed(cvd, &cmin, &cmax)) {
thismin = MIN(thismin, cmin);
thismax = MAX(thismax, cmax);
needed = B_TRUE;
}
}
}
if (needed && minp) {
*minp = thismin;
*maxp = thismax;
}
return (needed);
}
/*
* Gets the checkpoint space map object from the vdev's ZAP. On success sm_obj
* will contain either the checkpoint spacemap object or zero if none exists.
* All other errors are returned to the caller.
*/
int
vdev_checkpoint_sm_object(vdev_t *vd, uint64_t *sm_obj)
{
ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
if (vd->vdev_top_zap == 0) {
*sm_obj = 0;
return (0);
}
int error = zap_lookup(spa_meta_objset(vd->vdev_spa), vd->vdev_top_zap,
VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, sizeof (uint64_t), 1, sm_obj);
if (error == ENOENT) {
*sm_obj = 0;
error = 0;
}
return (error);
}
int
vdev_load(vdev_t *vd)
{
int children = vd->vdev_children;
int error = 0;
taskq_t *tq = NULL;
/*
* It's only worthwhile to use the taskq for the root vdev, because the
* slow part is metaslab_init, and that only happens for top-level
* vdevs.
*/
if (vd->vdev_ops == &vdev_root_ops && vd->vdev_children > 0) {
tq = taskq_create("vdev_load", children, minclsyspri,
children, children, TASKQ_PREPOPULATE);
}
/*
* Recursively load all children.
*/
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (tq == NULL || vdev_uses_zvols(cvd)) {
cvd->vdev_load_error = vdev_load(cvd);
} else {
VERIFY(taskq_dispatch(tq, vdev_load_child,
cvd, TQ_SLEEP) != TASKQID_INVALID);
}
}
if (tq != NULL) {
taskq_wait(tq);
taskq_destroy(tq);
}
for (int c = 0; c < vd->vdev_children; c++) {
int error = vd->vdev_child[c]->vdev_load_error;
if (error != 0)
return (error);
}
vdev_set_deflate_ratio(vd);
/*
* On spa_load path, grab the allocation bias from our zap
*/
if (vd == vd->vdev_top && vd->vdev_top_zap != 0) {
spa_t *spa = vd->vdev_spa;
char bias_str[64];
error = zap_lookup(spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_ALLOCATION_BIAS, 1, sizeof (bias_str),
bias_str);
if (error == 0) {
ASSERT(vd->vdev_alloc_bias == VDEV_BIAS_NONE);
vd->vdev_alloc_bias = vdev_derive_alloc_bias(bias_str);
} else if (error != ENOENT) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
vdev_dbgmsg(vd, "vdev_load: zap_lookup(top_zap=%llu) "
- "failed [error=%d]", vd->vdev_top_zap, error);
+ "failed [error=%d]",
+ (u_longlong_t)vd->vdev_top_zap, error);
return (error);
}
}
/*
* Load any rebuild state from the top-level vdev zap.
*/
if (vd == vd->vdev_top && vd->vdev_top_zap != 0) {
error = vdev_rebuild_load(vd);
if (error && error != ENOTSUP) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
vdev_dbgmsg(vd, "vdev_load: vdev_rebuild_load "
"failed [error=%d]", error);
return (error);
}
}
/*
* If this is a top-level vdev, initialize its metaslabs.
*/
if (vd == vd->vdev_top && vdev_is_concrete(vd)) {
vdev_metaslab_group_create(vd);
if (vd->vdev_ashift == 0 || vd->vdev_asize == 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
vdev_dbgmsg(vd, "vdev_load: invalid size. ashift=%llu, "
"asize=%llu", (u_longlong_t)vd->vdev_ashift,
(u_longlong_t)vd->vdev_asize);
return (SET_ERROR(ENXIO));
}
error = vdev_metaslab_init(vd, 0);
if (error != 0) {
vdev_dbgmsg(vd, "vdev_load: metaslab_init failed "
"[error=%d]", error);
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
return (error);
}
uint64_t checkpoint_sm_obj;
error = vdev_checkpoint_sm_object(vd, &checkpoint_sm_obj);
if (error == 0 && checkpoint_sm_obj != 0) {
objset_t *mos = spa_meta_objset(vd->vdev_spa);
ASSERT(vd->vdev_asize != 0);
ASSERT3P(vd->vdev_checkpoint_sm, ==, NULL);
error = space_map_open(&vd->vdev_checkpoint_sm,
mos, checkpoint_sm_obj, 0, vd->vdev_asize,
vd->vdev_ashift);
if (error != 0) {
vdev_dbgmsg(vd, "vdev_load: space_map_open "
"failed for checkpoint spacemap (obj %llu) "
"[error=%d]",
(u_longlong_t)checkpoint_sm_obj, error);
return (error);
}
ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
/*
* Since the checkpoint_sm contains free entries
* exclusively we can use space_map_allocated() to
* indicate the cumulative checkpointed space that
* has been freed.
*/
vd->vdev_stat.vs_checkpoint_space =
-space_map_allocated(vd->vdev_checkpoint_sm);
vd->vdev_spa->spa_checkpoint_info.sci_dspace +=
vd->vdev_stat.vs_checkpoint_space;
} else if (error != 0) {
vdev_dbgmsg(vd, "vdev_load: failed to retrieve "
"checkpoint space map object from vdev ZAP "
"[error=%d]", error);
return (error);
}
}
/*
* If this is a leaf vdev, load its DTL.
*/
if (vd->vdev_ops->vdev_op_leaf && (error = vdev_dtl_load(vd)) != 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
vdev_dbgmsg(vd, "vdev_load: vdev_dtl_load failed "
"[error=%d]", error);
return (error);
}
uint64_t obsolete_sm_object;
error = vdev_obsolete_sm_object(vd, &obsolete_sm_object);
if (error == 0 && obsolete_sm_object != 0) {
objset_t *mos = vd->vdev_spa->spa_meta_objset;
ASSERT(vd->vdev_asize != 0);
ASSERT3P(vd->vdev_obsolete_sm, ==, NULL);
if ((error = space_map_open(&vd->vdev_obsolete_sm, mos,
obsolete_sm_object, 0, vd->vdev_asize, 0))) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
vdev_dbgmsg(vd, "vdev_load: space_map_open failed for "
"obsolete spacemap (obj %llu) [error=%d]",
(u_longlong_t)obsolete_sm_object, error);
return (error);
}
} else if (error != 0) {
vdev_dbgmsg(vd, "vdev_load: failed to retrieve obsolete "
"space map object from vdev ZAP [error=%d]", error);
return (error);
}
return (0);
}
/*
* The special vdev case is used for hot spares and l2cache devices. Its
* sole purpose it to set the vdev state for the associated vdev. To do this,
* we make sure that we can open the underlying device, then try to read the
* label, and make sure that the label is sane and that it hasn't been
* repurposed to another pool.
*/
int
vdev_validate_aux(vdev_t *vd)
{
nvlist_t *label;
uint64_t guid, version;
uint64_t state;
if (!vdev_readable(vd))
return (0);
if ((label = vdev_label_read_config(vd, -1ULL)) == NULL) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
return (-1);
}
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 ||
!SPA_VERSION_IS_SUPPORTED(version) ||
nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 ||
guid != vd->vdev_guid ||
nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
return (-1);
}
/*
* We don't actually check the pool state here. If it's in fact in
* use by another pool, we update this fact on the fly when requested.
*/
nvlist_free(label);
return (0);
}
static void
vdev_destroy_ms_flush_data(vdev_t *vd, dmu_tx_t *tx)
{
objset_t *mos = spa_meta_objset(vd->vdev_spa);
if (vd->vdev_top_zap == 0)
return;
uint64_t object = 0;
int err = zap_lookup(mos, vd->vdev_top_zap,
VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1, &object);
if (err == ENOENT)
return;
VERIFY0(err);
VERIFY0(dmu_object_free(mos, object, tx));
VERIFY0(zap_remove(mos, vd->vdev_top_zap,
VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, tx));
}
/*
* Free the objects used to store this vdev's spacemaps, and the array
* that points to them.
*/
void
vdev_destroy_spacemaps(vdev_t *vd, dmu_tx_t *tx)
{
if (vd->vdev_ms_array == 0)
return;
objset_t *mos = vd->vdev_spa->spa_meta_objset;
uint64_t array_count = vd->vdev_asize >> vd->vdev_ms_shift;
size_t array_bytes = array_count * sizeof (uint64_t);
uint64_t *smobj_array = kmem_alloc(array_bytes, KM_SLEEP);
VERIFY0(dmu_read(mos, vd->vdev_ms_array, 0,
array_bytes, smobj_array, 0));
for (uint64_t i = 0; i < array_count; i++) {
uint64_t smobj = smobj_array[i];
if (smobj == 0)
continue;
space_map_free_obj(mos, smobj, tx);
}
kmem_free(smobj_array, array_bytes);
VERIFY0(dmu_object_free(mos, vd->vdev_ms_array, tx));
vdev_destroy_ms_flush_data(vd, tx);
vd->vdev_ms_array = 0;
}
static void
vdev_remove_empty_log(vdev_t *vd, uint64_t txg)
{
spa_t *spa = vd->vdev_spa;
ASSERT(vd->vdev_islog);
ASSERT(vd == vd->vdev_top);
ASSERT3U(txg, ==, spa_syncing_txg(spa));
dmu_tx_t *tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
vdev_destroy_spacemaps(vd, tx);
if (vd->vdev_top_zap != 0) {
vdev_destroy_unlink_zap(vd, vd->vdev_top_zap, tx);
vd->vdev_top_zap = 0;
}
dmu_tx_commit(tx);
}
void
vdev_sync_done(vdev_t *vd, uint64_t txg)
{
metaslab_t *msp;
boolean_t reassess = !txg_list_empty(&vd->vdev_ms_list, TXG_CLEAN(txg));
ASSERT(vdev_is_concrete(vd));
while ((msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg)))
!= NULL)
metaslab_sync_done(msp, txg);
if (reassess) {
metaslab_sync_reassess(vd->vdev_mg);
if (vd->vdev_log_mg != NULL)
metaslab_sync_reassess(vd->vdev_log_mg);
}
}
void
vdev_sync(vdev_t *vd, uint64_t txg)
{
spa_t *spa = vd->vdev_spa;
vdev_t *lvd;
metaslab_t *msp;
ASSERT3U(txg, ==, spa->spa_syncing_txg);
dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
if (range_tree_space(vd->vdev_obsolete_segments) > 0) {
ASSERT(vd->vdev_removing ||
vd->vdev_ops == &vdev_indirect_ops);
vdev_indirect_sync_obsolete(vd, tx);
/*
* If the vdev is indirect, it can't have dirty
* metaslabs or DTLs.
*/
if (vd->vdev_ops == &vdev_indirect_ops) {
ASSERT(txg_list_empty(&vd->vdev_ms_list, txg));
ASSERT(txg_list_empty(&vd->vdev_dtl_list, txg));
dmu_tx_commit(tx);
return;
}
}
ASSERT(vdev_is_concrete(vd));
if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0 &&
!vd->vdev_removing) {
ASSERT(vd == vd->vdev_top);
ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset,
DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx);
ASSERT(vd->vdev_ms_array != 0);
vdev_config_dirty(vd);
}
while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) {
metaslab_sync(msp, txg);
(void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg));
}
while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL)
vdev_dtl_sync(lvd, txg);
/*
* If this is an empty log device being removed, destroy the
* metadata associated with it.
*/
if (vd->vdev_islog && vd->vdev_stat.vs_alloc == 0 && vd->vdev_removing)
vdev_remove_empty_log(vd, txg);
(void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg));
dmu_tx_commit(tx);
}
uint64_t
vdev_psize_to_asize(vdev_t *vd, uint64_t psize)
{
return (vd->vdev_ops->vdev_op_asize(vd, psize));
}
/*
* Mark the given vdev faulted. A faulted vdev behaves as if the device could
* not be opened, and no I/O is attempted.
*/
int
vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux)
{
vdev_t *vd, *tvd;
spa_vdev_state_enter(spa, SCL_NONE);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
tvd = vd->vdev_top;
/*
* If user did a 'zpool offline -f' then make the fault persist across
* reboots.
*/
if (aux == VDEV_AUX_EXTERNAL_PERSIST) {
/*
* There are two kinds of forced faults: temporary and
* persistent. Temporary faults go away at pool import, while
* persistent faults stay set. Both types of faults can be
* cleared with a zpool clear.
*
* We tell if a vdev is persistently faulted by looking at the
* ZPOOL_CONFIG_AUX_STATE nvpair. If it's set to "external" at
* import then it's a persistent fault. Otherwise, it's
* temporary. We get ZPOOL_CONFIG_AUX_STATE set to "external"
* by setting vd.vdev_stat.vs_aux to VDEV_AUX_EXTERNAL. This
* tells vdev_config_generate() (which gets run later) to set
* ZPOOL_CONFIG_AUX_STATE to "external" in the nvlist.
*/
vd->vdev_stat.vs_aux = VDEV_AUX_EXTERNAL;
vd->vdev_tmpoffline = B_FALSE;
aux = VDEV_AUX_EXTERNAL;
} else {
vd->vdev_tmpoffline = B_TRUE;
}
/*
* We don't directly use the aux state here, but if we do a
* vdev_reopen(), we need this value to be present to remember why we
* were faulted.
*/
vd->vdev_label_aux = aux;
/*
* Faulted state takes precedence over degraded.
*/
vd->vdev_delayed_close = B_FALSE;
vd->vdev_faulted = 1ULL;
vd->vdev_degraded = 0ULL;
vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, aux);
/*
* If this device has the only valid copy of the data, then
* back off and simply mark the vdev as degraded instead.
*/
if (!tvd->vdev_islog && vd->vdev_aux == NULL && vdev_dtl_required(vd)) {
vd->vdev_degraded = 1ULL;
vd->vdev_faulted = 0ULL;
/*
* If we reopen the device and it's not dead, only then do we
* mark it degraded.
*/
vdev_reopen(tvd);
if (vdev_readable(vd))
vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, aux);
}
return (spa_vdev_state_exit(spa, vd, 0));
}
/*
* Mark the given vdev degraded. A degraded vdev is purely an indication to the
* user that something is wrong. The vdev continues to operate as normal as far
* as I/O is concerned.
*/
int
vdev_degrade(spa_t *spa, uint64_t guid, vdev_aux_t aux)
{
vdev_t *vd;
spa_vdev_state_enter(spa, SCL_NONE);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
/*
* If the vdev is already faulted, then don't do anything.
*/
if (vd->vdev_faulted || vd->vdev_degraded)
return (spa_vdev_state_exit(spa, NULL, 0));
vd->vdev_degraded = 1ULL;
if (!vdev_is_dead(vd))
vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED,
aux);
return (spa_vdev_state_exit(spa, vd, 0));
}
/*
* Online the given vdev.
*
* If 'ZFS_ONLINE_UNSPARE' is set, it implies two things. First, any attached
* spare device should be detached when the device finishes resilvering.
* Second, the online should be treated like a 'test' online case, so no FMA
* events are generated if the device fails to open.
*/
int
vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate)
{
vdev_t *vd, *tvd, *pvd, *rvd = spa->spa_root_vdev;
boolean_t wasoffline;
vdev_state_t oldstate;
spa_vdev_state_enter(spa, SCL_NONE);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
wasoffline = (vd->vdev_offline || vd->vdev_tmpoffline);
oldstate = vd->vdev_state;
tvd = vd->vdev_top;
vd->vdev_offline = B_FALSE;
vd->vdev_tmpoffline = B_FALSE;
vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE);
vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT);
/* XXX - L2ARC 1.0 does not support expansion */
if (!vd->vdev_aux) {
for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
pvd->vdev_expanding = !!((flags & ZFS_ONLINE_EXPAND) ||
spa->spa_autoexpand);
vd->vdev_expansion_time = gethrestime_sec();
}
vdev_reopen(tvd);
vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE;
if (!vd->vdev_aux) {
for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
pvd->vdev_expanding = B_FALSE;
}
if (newstate)
*newstate = vd->vdev_state;
if ((flags & ZFS_ONLINE_UNSPARE) &&
!vdev_is_dead(vd) && vd->vdev_parent &&
vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
vd->vdev_parent->vdev_child[0] == vd)
vd->vdev_unspare = B_TRUE;
if ((flags & ZFS_ONLINE_EXPAND) || spa->spa_autoexpand) {
/* XXX - L2ARC 1.0 does not support expansion */
if (vd->vdev_aux)
return (spa_vdev_state_exit(spa, vd, ENOTSUP));
spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
}
/* Restart initializing if necessary */
mutex_enter(&vd->vdev_initialize_lock);
if (vdev_writeable(vd) &&
vd->vdev_initialize_thread == NULL &&
vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE) {
(void) vdev_initialize(vd);
}
mutex_exit(&vd->vdev_initialize_lock);
/*
* Restart trimming if necessary. We do not restart trimming for cache
* devices here. This is triggered by l2arc_rebuild_vdev()
* asynchronously for the whole device or in l2arc_evict() as it evicts
* space for upcoming writes.
*/
mutex_enter(&vd->vdev_trim_lock);
if (vdev_writeable(vd) && !vd->vdev_isl2cache &&
vd->vdev_trim_thread == NULL &&
vd->vdev_trim_state == VDEV_TRIM_ACTIVE) {
(void) vdev_trim(vd, vd->vdev_trim_rate, vd->vdev_trim_partial,
vd->vdev_trim_secure);
}
mutex_exit(&vd->vdev_trim_lock);
if (wasoffline ||
(oldstate < VDEV_STATE_DEGRADED &&
vd->vdev_state >= VDEV_STATE_DEGRADED))
spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_ONLINE);
return (spa_vdev_state_exit(spa, vd, 0));
}
static int
vdev_offline_locked(spa_t *spa, uint64_t guid, uint64_t flags)
{
vdev_t *vd, *tvd;
int error = 0;
uint64_t generation;
metaslab_group_t *mg;
top:
spa_vdev_state_enter(spa, SCL_ALLOC);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
if (vd->vdev_ops == &vdev_draid_spare_ops)
return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
tvd = vd->vdev_top;
mg = tvd->vdev_mg;
generation = spa->spa_config_generation + 1;
/*
* If the device isn't already offline, try to offline it.
*/
if (!vd->vdev_offline) {
/*
* If this device has the only valid copy of some data,
* don't allow it to be offlined. Log devices are always
* expendable.
*/
if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
vdev_dtl_required(vd))
return (spa_vdev_state_exit(spa, NULL,
SET_ERROR(EBUSY)));
/*
* If the top-level is a slog and it has had allocations
* then proceed. We check that the vdev's metaslab group
* is not NULL since it's possible that we may have just
* added this vdev but not yet initialized its metaslabs.
*/
if (tvd->vdev_islog && mg != NULL) {
/*
* Prevent any future allocations.
*/
ASSERT3P(tvd->vdev_log_mg, ==, NULL);
metaslab_group_passivate(mg);
(void) spa_vdev_state_exit(spa, vd, 0);
error = spa_reset_logs(spa);
/*
* If the log device was successfully reset but has
* checkpointed data, do not offline it.
*/
if (error == 0 &&
tvd->vdev_checkpoint_sm != NULL) {
ASSERT3U(space_map_allocated(
tvd->vdev_checkpoint_sm), !=, 0);
error = ZFS_ERR_CHECKPOINT_EXISTS;
}
spa_vdev_state_enter(spa, SCL_ALLOC);
/*
* Check to see if the config has changed.
*/
if (error || generation != spa->spa_config_generation) {
metaslab_group_activate(mg);
if (error)
return (spa_vdev_state_exit(spa,
vd, error));
(void) spa_vdev_state_exit(spa, vd, 0);
goto top;
}
ASSERT0(tvd->vdev_stat.vs_alloc);
}
/*
* Offline this device and reopen its top-level vdev.
* If the top-level vdev is a log device then just offline
* it. Otherwise, if this action results in the top-level
* vdev becoming unusable, undo it and fail the request.
*/
vd->vdev_offline = B_TRUE;
vdev_reopen(tvd);
if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
vdev_is_dead(tvd)) {
vd->vdev_offline = B_FALSE;
vdev_reopen(tvd);
return (spa_vdev_state_exit(spa, NULL,
SET_ERROR(EBUSY)));
}
/*
* Add the device back into the metaslab rotor so that
* once we online the device it's open for business.
*/
if (tvd->vdev_islog && mg != NULL)
metaslab_group_activate(mg);
}
vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY);
return (spa_vdev_state_exit(spa, vd, 0));
}
int
vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags)
{
int error;
mutex_enter(&spa->spa_vdev_top_lock);
error = vdev_offline_locked(spa, guid, flags);
mutex_exit(&spa->spa_vdev_top_lock);
return (error);
}
/*
* Clear the error counts associated with this vdev. Unlike vdev_online() and
* vdev_offline(), we assume the spa config is locked. We also clear all
* children. If 'vd' is NULL, then the user wants to clear all vdevs.
*/
void
vdev_clear(spa_t *spa, vdev_t *vd)
{
vdev_t *rvd = spa->spa_root_vdev;
ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
if (vd == NULL)
vd = rvd;
vd->vdev_stat.vs_read_errors = 0;
vd->vdev_stat.vs_write_errors = 0;
vd->vdev_stat.vs_checksum_errors = 0;
vd->vdev_stat.vs_slow_ios = 0;
for (int c = 0; c < vd->vdev_children; c++)
vdev_clear(spa, vd->vdev_child[c]);
/*
* It makes no sense to "clear" an indirect vdev.
*/
if (!vdev_is_concrete(vd))
return;
/*
* If we're in the FAULTED state or have experienced failed I/O, then
* clear the persistent state and attempt to reopen the device. We
* also mark the vdev config dirty, so that the new faulted state is
* written out to disk.
*/
if (vd->vdev_faulted || vd->vdev_degraded ||
!vdev_readable(vd) || !vdev_writeable(vd)) {
/*
* When reopening in response to a clear event, it may be due to
* a fmadm repair request. In this case, if the device is
* still broken, we want to still post the ereport again.
*/
vd->vdev_forcefault = B_TRUE;
vd->vdev_faulted = vd->vdev_degraded = 0ULL;
vd->vdev_cant_read = B_FALSE;
vd->vdev_cant_write = B_FALSE;
vd->vdev_stat.vs_aux = 0;
vdev_reopen(vd == rvd ? rvd : vd->vdev_top);
vd->vdev_forcefault = B_FALSE;
if (vd != rvd && vdev_writeable(vd->vdev_top))
vdev_state_dirty(vd->vdev_top);
/* If a resilver isn't required, check if vdevs can be culled */
if (vd->vdev_aux == NULL && !vdev_is_dead(vd) &&
!dsl_scan_resilvering(spa->spa_dsl_pool) &&
!dsl_scan_resilver_scheduled(spa->spa_dsl_pool))
spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_CLEAR);
}
/*
* When clearing a FMA-diagnosed fault, we always want to
* unspare the device, as we assume that the original spare was
* done in response to the FMA fault.
*/
if (!vdev_is_dead(vd) && vd->vdev_parent != NULL &&
vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
vd->vdev_parent->vdev_child[0] == vd)
vd->vdev_unspare = B_TRUE;
/* Clear recent error events cache (i.e. duplicate events tracking) */
zfs_ereport_clear(spa, vd);
}
boolean_t
vdev_is_dead(vdev_t *vd)
{
/*
* Holes and missing devices are always considered "dead".
* This simplifies the code since we don't have to check for
* these types of devices in the various code paths.
* Instead we rely on the fact that we skip over dead devices
* before issuing I/O to them.
*/
return (vd->vdev_state < VDEV_STATE_DEGRADED ||
vd->vdev_ops == &vdev_hole_ops ||
vd->vdev_ops == &vdev_missing_ops);
}
boolean_t
vdev_readable(vdev_t *vd)
{
return (!vdev_is_dead(vd) && !vd->vdev_cant_read);
}
boolean_t
vdev_writeable(vdev_t *vd)
{
return (!vdev_is_dead(vd) && !vd->vdev_cant_write &&
vdev_is_concrete(vd));
}
boolean_t
vdev_allocatable(vdev_t *vd)
{
uint64_t state = vd->vdev_state;
/*
* We currently allow allocations from vdevs which may be in the
* process of reopening (i.e. VDEV_STATE_CLOSED). If the device
* fails to reopen then we'll catch it later when we're holding
* the proper locks. Note that we have to get the vdev state
* in a local variable because although it changes atomically,
* we're asking two separate questions about it.
*/
return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) &&
!vd->vdev_cant_write && vdev_is_concrete(vd) &&
vd->vdev_mg->mg_initialized);
}
boolean_t
vdev_accessible(vdev_t *vd, zio_t *zio)
{
ASSERT(zio->io_vd == vd);
if (vdev_is_dead(vd) || vd->vdev_remove_wanted)
return (B_FALSE);
if (zio->io_type == ZIO_TYPE_READ)
return (!vd->vdev_cant_read);
if (zio->io_type == ZIO_TYPE_WRITE)
return (!vd->vdev_cant_write);
return (B_TRUE);
}
static void
vdev_get_child_stat(vdev_t *cvd, vdev_stat_t *vs, vdev_stat_t *cvs)
{
/*
* Exclude the dRAID spare when aggregating to avoid double counting
* the ops and bytes. These IOs are counted by the physical leaves.
*/
if (cvd->vdev_ops == &vdev_draid_spare_ops)
return;
for (int t = 0; t < VS_ZIO_TYPES; t++) {
vs->vs_ops[t] += cvs->vs_ops[t];
vs->vs_bytes[t] += cvs->vs_bytes[t];
}
cvs->vs_scan_removing = cvd->vdev_removing;
}
/*
* Get extended stats
*/
static void
vdev_get_child_stat_ex(vdev_t *cvd, vdev_stat_ex_t *vsx, vdev_stat_ex_t *cvsx)
{
int t, b;
for (t = 0; t < ZIO_TYPES; t++) {
for (b = 0; b < ARRAY_SIZE(vsx->vsx_disk_histo[0]); b++)
vsx->vsx_disk_histo[t][b] += cvsx->vsx_disk_histo[t][b];
for (b = 0; b < ARRAY_SIZE(vsx->vsx_total_histo[0]); b++) {
vsx->vsx_total_histo[t][b] +=
cvsx->vsx_total_histo[t][b];
}
}
for (t = 0; t < ZIO_PRIORITY_NUM_QUEUEABLE; t++) {
for (b = 0; b < ARRAY_SIZE(vsx->vsx_queue_histo[0]); b++) {
vsx->vsx_queue_histo[t][b] +=
cvsx->vsx_queue_histo[t][b];
}
vsx->vsx_active_queue[t] += cvsx->vsx_active_queue[t];
vsx->vsx_pend_queue[t] += cvsx->vsx_pend_queue[t];
for (b = 0; b < ARRAY_SIZE(vsx->vsx_ind_histo[0]); b++)
vsx->vsx_ind_histo[t][b] += cvsx->vsx_ind_histo[t][b];
for (b = 0; b < ARRAY_SIZE(vsx->vsx_agg_histo[0]); b++)
vsx->vsx_agg_histo[t][b] += cvsx->vsx_agg_histo[t][b];
}
}
boolean_t
vdev_is_spacemap_addressable(vdev_t *vd)
{
if (spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_SPACEMAP_V2))
return (B_TRUE);
/*
* If double-word space map entries are not enabled we assume
* 47 bits of the space map entry are dedicated to the entry's
* offset (see SM_OFFSET_BITS in space_map.h). We then use that
* to calculate the maximum address that can be described by a
* space map entry for the given device.
*/
uint64_t shift = vd->vdev_ashift + SM_OFFSET_BITS;
if (shift >= 63) /* detect potential overflow */
return (B_TRUE);
return (vd->vdev_asize < (1ULL << shift));
}
/*
* Get statistics for the given vdev.
*/
static void
vdev_get_stats_ex_impl(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx)
{
int t;
/*
* If we're getting stats on the root vdev, aggregate the I/O counts
* over all top-level vdevs (i.e. the direct children of the root).
*/
if (!vd->vdev_ops->vdev_op_leaf) {
if (vs) {
memset(vs->vs_ops, 0, sizeof (vs->vs_ops));
memset(vs->vs_bytes, 0, sizeof (vs->vs_bytes));
}
if (vsx)
memset(vsx, 0, sizeof (*vsx));
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
vdev_stat_t *cvs = &cvd->vdev_stat;
vdev_stat_ex_t *cvsx = &cvd->vdev_stat_ex;
vdev_get_stats_ex_impl(cvd, cvs, cvsx);
if (vs)
vdev_get_child_stat(cvd, vs, cvs);
if (vsx)
vdev_get_child_stat_ex(cvd, vsx, cvsx);
}
} else {
/*
* We're a leaf. Just copy our ZIO active queue stats in. The
* other leaf stats are updated in vdev_stat_update().
*/
if (!vsx)
return;
memcpy(vsx, &vd->vdev_stat_ex, sizeof (vd->vdev_stat_ex));
for (t = 0; t < ARRAY_SIZE(vd->vdev_queue.vq_class); t++) {
vsx->vsx_active_queue[t] =
vd->vdev_queue.vq_class[t].vqc_active;
vsx->vsx_pend_queue[t] = avl_numnodes(
&vd->vdev_queue.vq_class[t].vqc_queued_tree);
}
}
}
void
vdev_get_stats_ex(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx)
{
vdev_t *tvd = vd->vdev_top;
mutex_enter(&vd->vdev_stat_lock);
if (vs) {
bcopy(&vd->vdev_stat, vs, sizeof (*vs));
vs->vs_timestamp = gethrtime() - vs->vs_timestamp;
vs->vs_state = vd->vdev_state;
vs->vs_rsize = vdev_get_min_asize(vd);
if (vd->vdev_ops->vdev_op_leaf) {
vs->vs_rsize += VDEV_LABEL_START_SIZE +
VDEV_LABEL_END_SIZE;
/*
* Report initializing progress. Since we don't
* have the initializing locks held, this is only
* an estimate (although a fairly accurate one).
*/
vs->vs_initialize_bytes_done =
vd->vdev_initialize_bytes_done;
vs->vs_initialize_bytes_est =
vd->vdev_initialize_bytes_est;
vs->vs_initialize_state = vd->vdev_initialize_state;
vs->vs_initialize_action_time =
vd->vdev_initialize_action_time;
/*
* Report manual TRIM progress. Since we don't have
* the manual TRIM locks held, this is only an
* estimate (although fairly accurate one).
*/
vs->vs_trim_notsup = !vd->vdev_has_trim;
vs->vs_trim_bytes_done = vd->vdev_trim_bytes_done;
vs->vs_trim_bytes_est = vd->vdev_trim_bytes_est;
vs->vs_trim_state = vd->vdev_trim_state;
vs->vs_trim_action_time = vd->vdev_trim_action_time;
/* Set when there is a deferred resilver. */
vs->vs_resilver_deferred = vd->vdev_resilver_deferred;
}
/*
* Report expandable space on top-level, non-auxiliary devices
* only. The expandable space is reported in terms of metaslab
* sized units since that determines how much space the pool
* can expand.
*/
if (vd->vdev_aux == NULL && tvd != NULL) {
vs->vs_esize = P2ALIGN(
vd->vdev_max_asize - vd->vdev_asize,
1ULL << tvd->vdev_ms_shift);
}
vs->vs_configured_ashift = vd->vdev_top != NULL
? vd->vdev_top->vdev_ashift : vd->vdev_ashift;
vs->vs_logical_ashift = vd->vdev_logical_ashift;
vs->vs_physical_ashift = vd->vdev_physical_ashift;
/*
* Report fragmentation and rebuild progress for top-level,
* non-auxiliary, concrete devices.
*/
if (vd->vdev_aux == NULL && vd == vd->vdev_top &&
vdev_is_concrete(vd)) {
/*
* The vdev fragmentation rating doesn't take into
* account the embedded slog metaslab (vdev_log_mg).
* Since it's only one metaslab, it would have a tiny
* impact on the overall fragmentation.
*/
vs->vs_fragmentation = (vd->vdev_mg != NULL) ?
vd->vdev_mg->mg_fragmentation : 0;
}
}
vdev_get_stats_ex_impl(vd, vs, vsx);
mutex_exit(&vd->vdev_stat_lock);
}
void
vdev_get_stats(vdev_t *vd, vdev_stat_t *vs)
{
return (vdev_get_stats_ex(vd, vs, NULL));
}
void
vdev_clear_stats(vdev_t *vd)
{
mutex_enter(&vd->vdev_stat_lock);
vd->vdev_stat.vs_space = 0;
vd->vdev_stat.vs_dspace = 0;
vd->vdev_stat.vs_alloc = 0;
mutex_exit(&vd->vdev_stat_lock);
}
void
vdev_scan_stat_init(vdev_t *vd)
{
vdev_stat_t *vs = &vd->vdev_stat;
for (int c = 0; c < vd->vdev_children; c++)
vdev_scan_stat_init(vd->vdev_child[c]);
mutex_enter(&vd->vdev_stat_lock);
vs->vs_scan_processed = 0;
mutex_exit(&vd->vdev_stat_lock);
}
void
vdev_stat_update(zio_t *zio, uint64_t psize)
{
spa_t *spa = zio->io_spa;
vdev_t *rvd = spa->spa_root_vdev;
vdev_t *vd = zio->io_vd ? zio->io_vd : rvd;
vdev_t *pvd;
uint64_t txg = zio->io_txg;
vdev_stat_t *vs = &vd->vdev_stat;
vdev_stat_ex_t *vsx = &vd->vdev_stat_ex;
zio_type_t type = zio->io_type;
int flags = zio->io_flags;
/*
* If this i/o is a gang leader, it didn't do any actual work.
*/
if (zio->io_gang_tree)
return;
if (zio->io_error == 0) {
/*
* If this is a root i/o, don't count it -- we've already
* counted the top-level vdevs, and vdev_get_stats() will
* aggregate them when asked. This reduces contention on
* the root vdev_stat_lock and implicitly handles blocks
* that compress away to holes, for which there is no i/o.
* (Holes never create vdev children, so all the counters
* remain zero, which is what we want.)
*
* Note: this only applies to successful i/o (io_error == 0)
* because unlike i/o counts, errors are not additive.
* When reading a ditto block, for example, failure of
* one top-level vdev does not imply a root-level error.
*/
if (vd == rvd)
return;
ASSERT(vd == zio->io_vd);
if (flags & ZIO_FLAG_IO_BYPASS)
return;
mutex_enter(&vd->vdev_stat_lock);
if (flags & ZIO_FLAG_IO_REPAIR) {
/*
* Repair is the result of a resilver issued by the
* scan thread (spa_sync).
*/
if (flags & ZIO_FLAG_SCAN_THREAD) {
dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
dsl_scan_phys_t *scn_phys = &scn->scn_phys;
uint64_t *processed = &scn_phys->scn_processed;
if (vd->vdev_ops->vdev_op_leaf)
atomic_add_64(processed, psize);
vs->vs_scan_processed += psize;
}
/*
* Repair is the result of a rebuild issued by the
* rebuild thread (vdev_rebuild_thread). To avoid
* double counting repaired bytes the virtual dRAID
* spare vdev is excluded from the processed bytes.
*/
if (zio->io_priority == ZIO_PRIORITY_REBUILD) {
vdev_t *tvd = vd->vdev_top;
vdev_rebuild_t *vr = &tvd->vdev_rebuild_config;
vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
uint64_t *rebuilt = &vrp->vrp_bytes_rebuilt;
if (vd->vdev_ops->vdev_op_leaf &&
vd->vdev_ops != &vdev_draid_spare_ops) {
atomic_add_64(rebuilt, psize);
}
vs->vs_rebuild_processed += psize;
}
if (flags & ZIO_FLAG_SELF_HEAL)
vs->vs_self_healed += psize;
}
/*
* The bytes/ops/histograms are recorded at the leaf level and
* aggregated into the higher level vdevs in vdev_get_stats().
*/
if (vd->vdev_ops->vdev_op_leaf &&
(zio->io_priority < ZIO_PRIORITY_NUM_QUEUEABLE)) {
zio_type_t vs_type = type;
zio_priority_t priority = zio->io_priority;
/*
* TRIM ops and bytes are reported to user space as
* ZIO_TYPE_IOCTL. This is done to preserve the
* vdev_stat_t structure layout for user space.
*/
if (type == ZIO_TYPE_TRIM)
vs_type = ZIO_TYPE_IOCTL;
/*
* Solely for the purposes of 'zpool iostat -lqrw'
* reporting use the priority to categorize the IO.
* Only the following are reported to user space:
*
* ZIO_PRIORITY_SYNC_READ,
* ZIO_PRIORITY_SYNC_WRITE,
* ZIO_PRIORITY_ASYNC_READ,
* ZIO_PRIORITY_ASYNC_WRITE,
* ZIO_PRIORITY_SCRUB,
* ZIO_PRIORITY_TRIM.
*/
if (priority == ZIO_PRIORITY_REBUILD) {
priority = ((type == ZIO_TYPE_WRITE) ?
ZIO_PRIORITY_ASYNC_WRITE :
ZIO_PRIORITY_SCRUB);
} else if (priority == ZIO_PRIORITY_INITIALIZING) {
ASSERT3U(type, ==, ZIO_TYPE_WRITE);
priority = ZIO_PRIORITY_ASYNC_WRITE;
} else if (priority == ZIO_PRIORITY_REMOVAL) {
priority = ((type == ZIO_TYPE_WRITE) ?
ZIO_PRIORITY_ASYNC_WRITE :
ZIO_PRIORITY_ASYNC_READ);
}
vs->vs_ops[vs_type]++;
vs->vs_bytes[vs_type] += psize;
if (flags & ZIO_FLAG_DELEGATED) {
vsx->vsx_agg_histo[priority]
[RQ_HISTO(zio->io_size)]++;
} else {
vsx->vsx_ind_histo[priority]
[RQ_HISTO(zio->io_size)]++;
}
if (zio->io_delta && zio->io_delay) {
vsx->vsx_queue_histo[priority]
[L_HISTO(zio->io_delta - zio->io_delay)]++;
vsx->vsx_disk_histo[type]
[L_HISTO(zio->io_delay)]++;
vsx->vsx_total_histo[type]
[L_HISTO(zio->io_delta)]++;
}
}
mutex_exit(&vd->vdev_stat_lock);
return;
}
if (flags & ZIO_FLAG_SPECULATIVE)
return;
/*
* If this is an I/O error that is going to be retried, then ignore the
* error. Otherwise, the user may interpret B_FAILFAST I/O errors as
* hard errors, when in reality they can happen for any number of
* innocuous reasons (bus resets, MPxIO link failure, etc).
*/
if (zio->io_error == EIO &&
!(zio->io_flags & ZIO_FLAG_IO_RETRY))
return;
/*
* Intent logs writes won't propagate their error to the root
* I/O so don't mark these types of failures as pool-level
* errors.
*/
if (zio->io_vd == NULL && (zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
return;
if (type == ZIO_TYPE_WRITE && txg != 0 &&
(!(flags & ZIO_FLAG_IO_REPAIR) ||
(flags & ZIO_FLAG_SCAN_THREAD) ||
spa->spa_claiming)) {
/*
* This is either a normal write (not a repair), or it's
* a repair induced by the scrub thread, or it's a repair
* made by zil_claim() during spa_load() in the first txg.
* In the normal case, we commit the DTL change in the same
* txg as the block was born. In the scrub-induced repair
* case, we know that scrubs run in first-pass syncing context,
* so we commit the DTL change in spa_syncing_txg(spa).
* In the zil_claim() case, we commit in spa_first_txg(spa).
*
* We currently do not make DTL entries for failed spontaneous
* self-healing writes triggered by normal (non-scrubbing)
* reads, because we have no transactional context in which to
* do so -- and it's not clear that it'd be desirable anyway.
*/
if (vd->vdev_ops->vdev_op_leaf) {
uint64_t commit_txg = txg;
if (flags & ZIO_FLAG_SCAN_THREAD) {
ASSERT(flags & ZIO_FLAG_IO_REPAIR);
ASSERT(spa_sync_pass(spa) == 1);
vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1);
commit_txg = spa_syncing_txg(spa);
} else if (spa->spa_claiming) {
ASSERT(flags & ZIO_FLAG_IO_REPAIR);
commit_txg = spa_first_txg(spa);
}
ASSERT(commit_txg >= spa_syncing_txg(spa));
if (vdev_dtl_contains(vd, DTL_MISSING, txg, 1))
return;
for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
vdev_dtl_dirty(pvd, DTL_PARTIAL, txg, 1);
vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg);
}
if (vd != rvd)
vdev_dtl_dirty(vd, DTL_MISSING, txg, 1);
}
}
int64_t
vdev_deflated_space(vdev_t *vd, int64_t space)
{
ASSERT((space & (SPA_MINBLOCKSIZE-1)) == 0);
ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache);
return ((space >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio);
}
/*
* Update the in-core space usage stats for this vdev, its metaslab class,
* and the root vdev.
*/
void
vdev_space_update(vdev_t *vd, int64_t alloc_delta, int64_t defer_delta,
int64_t space_delta)
{
int64_t dspace_delta;
spa_t *spa = vd->vdev_spa;
vdev_t *rvd = spa->spa_root_vdev;
ASSERT(vd == vd->vdev_top);
/*
* Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion
* factor. We must calculate this here and not at the root vdev
* because the root vdev's psize-to-asize is simply the max of its
* children's, thus not accurate enough for us.
*/
dspace_delta = vdev_deflated_space(vd, space_delta);
mutex_enter(&vd->vdev_stat_lock);
/* ensure we won't underflow */
if (alloc_delta < 0) {
ASSERT3U(vd->vdev_stat.vs_alloc, >=, -alloc_delta);
}
vd->vdev_stat.vs_alloc += alloc_delta;
vd->vdev_stat.vs_space += space_delta;
vd->vdev_stat.vs_dspace += dspace_delta;
mutex_exit(&vd->vdev_stat_lock);
/* every class but log contributes to root space stats */
if (vd->vdev_mg != NULL && !vd->vdev_islog) {
ASSERT(!vd->vdev_isl2cache);
mutex_enter(&rvd->vdev_stat_lock);
rvd->vdev_stat.vs_alloc += alloc_delta;
rvd->vdev_stat.vs_space += space_delta;
rvd->vdev_stat.vs_dspace += dspace_delta;
mutex_exit(&rvd->vdev_stat_lock);
}
/* Note: metaslab_class_space_update moved to metaslab_space_update */
}
/*
* Mark a top-level vdev's config as dirty, placing it on the dirty list
* so that it will be written out next time the vdev configuration is synced.
* If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs.
*/
void
vdev_config_dirty(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
vdev_t *rvd = spa->spa_root_vdev;
int c;
ASSERT(spa_writeable(spa));
/*
* If this is an aux vdev (as with l2cache and spare devices), then we
* update the vdev config manually and set the sync flag.
*/
if (vd->vdev_aux != NULL) {
spa_aux_vdev_t *sav = vd->vdev_aux;
nvlist_t **aux;
uint_t naux;
for (c = 0; c < sav->sav_count; c++) {
if (sav->sav_vdevs[c] == vd)
break;
}
if (c == sav->sav_count) {
/*
* We're being removed. There's nothing more to do.
*/
ASSERT(sav->sav_sync == B_TRUE);
return;
}
sav->sav_sync = B_TRUE;
if (nvlist_lookup_nvlist_array(sav->sav_config,
ZPOOL_CONFIG_L2CACHE, &aux, &naux) != 0) {
VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
ZPOOL_CONFIG_SPARES, &aux, &naux) == 0);
}
ASSERT(c < naux);
/*
* Setting the nvlist in the middle if the array is a little
* sketchy, but it will work.
*/
nvlist_free(aux[c]);
aux[c] = vdev_config_generate(spa, vd, B_TRUE, 0);
return;
}
/*
* The dirty list is protected by the SCL_CONFIG lock. The caller
* must either hold SCL_CONFIG as writer, or must be the sync thread
* (which holds SCL_CONFIG as reader). There's only one sync thread,
* so this is sufficient to ensure mutual exclusion.
*/
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
(dsl_pool_sync_context(spa_get_dsl(spa)) &&
spa_config_held(spa, SCL_CONFIG, RW_READER)));
if (vd == rvd) {
for (c = 0; c < rvd->vdev_children; c++)
vdev_config_dirty(rvd->vdev_child[c]);
} else {
ASSERT(vd == vd->vdev_top);
if (!list_link_active(&vd->vdev_config_dirty_node) &&
vdev_is_concrete(vd)) {
list_insert_head(&spa->spa_config_dirty_list, vd);
}
}
}
void
vdev_config_clean(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
(dsl_pool_sync_context(spa_get_dsl(spa)) &&
spa_config_held(spa, SCL_CONFIG, RW_READER)));
ASSERT(list_link_active(&vd->vdev_config_dirty_node));
list_remove(&spa->spa_config_dirty_list, vd);
}
/*
* Mark a top-level vdev's state as dirty, so that the next pass of
* spa_sync() can convert this into vdev_config_dirty(). We distinguish
* the state changes from larger config changes because they require
* much less locking, and are often needed for administrative actions.
*/
void
vdev_state_dirty(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_writeable(spa));
ASSERT(vd == vd->vdev_top);
/*
* The state list is protected by the SCL_STATE lock. The caller
* must either hold SCL_STATE as writer, or must be the sync thread
* (which holds SCL_STATE as reader). There's only one sync thread,
* so this is sufficient to ensure mutual exclusion.
*/
ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
(dsl_pool_sync_context(spa_get_dsl(spa)) &&
spa_config_held(spa, SCL_STATE, RW_READER)));
if (!list_link_active(&vd->vdev_state_dirty_node) &&
vdev_is_concrete(vd))
list_insert_head(&spa->spa_state_dirty_list, vd);
}
void
vdev_state_clean(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
(dsl_pool_sync_context(spa_get_dsl(spa)) &&
spa_config_held(spa, SCL_STATE, RW_READER)));
ASSERT(list_link_active(&vd->vdev_state_dirty_node));
list_remove(&spa->spa_state_dirty_list, vd);
}
/*
* Propagate vdev state up from children to parent.
*/
void
vdev_propagate_state(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
vdev_t *rvd = spa->spa_root_vdev;
int degraded = 0, faulted = 0;
int corrupted = 0;
vdev_t *child;
if (vd->vdev_children > 0) {
for (int c = 0; c < vd->vdev_children; c++) {
child = vd->vdev_child[c];
/*
* Don't factor holes or indirect vdevs into the
* decision.
*/
if (!vdev_is_concrete(child))
continue;
if (!vdev_readable(child) ||
(!vdev_writeable(child) && spa_writeable(spa))) {
/*
* Root special: if there is a top-level log
* device, treat the root vdev as if it were
* degraded.
*/
if (child->vdev_islog && vd == rvd)
degraded++;
else
faulted++;
} else if (child->vdev_state <= VDEV_STATE_DEGRADED) {
degraded++;
}
if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA)
corrupted++;
}
vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded);
/*
* Root special: if there is a top-level vdev that cannot be
* opened due to corrupted metadata, then propagate the root
* vdev's aux state as 'corrupt' rather than 'insufficient
* replicas'.
*/
if (corrupted && vd == rvd &&
rvd->vdev_state == VDEV_STATE_CANT_OPEN)
vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
}
if (vd->vdev_parent)
vdev_propagate_state(vd->vdev_parent);
}
/*
* Set a vdev's state. If this is during an open, we don't update the parent
* state, because we're in the process of opening children depth-first.
* Otherwise, we propagate the change to the parent.
*
* If this routine places a device in a faulted state, an appropriate ereport is
* generated.
*/
void
vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux)
{
uint64_t save_state;
spa_t *spa = vd->vdev_spa;
if (state == vd->vdev_state) {
/*
* Since vdev_offline() code path is already in an offline
* state we can miss a statechange event to OFFLINE. Check
* the previous state to catch this condition.
*/
if (vd->vdev_ops->vdev_op_leaf &&
(state == VDEV_STATE_OFFLINE) &&
(vd->vdev_prevstate >= VDEV_STATE_FAULTED)) {
/* post an offline state change */
zfs_post_state_change(spa, vd, vd->vdev_prevstate);
}
vd->vdev_stat.vs_aux = aux;
return;
}
save_state = vd->vdev_state;
vd->vdev_state = state;
vd->vdev_stat.vs_aux = aux;
/*
* If we are setting the vdev state to anything but an open state, then
* always close the underlying device unless the device has requested
* a delayed close (i.e. we're about to remove or fault the device).
* Otherwise, we keep accessible but invalid devices open forever.
* We don't call vdev_close() itself, because that implies some extra
* checks (offline, etc) that we don't want here. This is limited to
* leaf devices, because otherwise closing the device will affect other
* children.
*/
if (!vd->vdev_delayed_close && vdev_is_dead(vd) &&
vd->vdev_ops->vdev_op_leaf)
vd->vdev_ops->vdev_op_close(vd);
if (vd->vdev_removed &&
state == VDEV_STATE_CANT_OPEN &&
(aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) {
/*
* If the previous state is set to VDEV_STATE_REMOVED, then this
* device was previously marked removed and someone attempted to
* reopen it. If this failed due to a nonexistent device, then
* keep the device in the REMOVED state. We also let this be if
* it is one of our special test online cases, which is only
* attempting to online the device and shouldn't generate an FMA
* fault.
*/
vd->vdev_state = VDEV_STATE_REMOVED;
vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
} else if (state == VDEV_STATE_REMOVED) {
vd->vdev_removed = B_TRUE;
} else if (state == VDEV_STATE_CANT_OPEN) {
/*
* If we fail to open a vdev during an import or recovery, we
* mark it as "not available", which signifies that it was
* never there to begin with. Failure to open such a device
* is not considered an error.
*/
if ((spa_load_state(spa) == SPA_LOAD_IMPORT ||
spa_load_state(spa) == SPA_LOAD_RECOVER) &&
vd->vdev_ops->vdev_op_leaf)
vd->vdev_not_present = 1;
/*
* Post the appropriate ereport. If the 'prevstate' field is
* set to something other than VDEV_STATE_UNKNOWN, it indicates
* that this is part of a vdev_reopen(). In this case, we don't
* want to post the ereport if the device was already in the
* CANT_OPEN state beforehand.
*
* If the 'checkremove' flag is set, then this is an attempt to
* online the device in response to an insertion event. If we
* hit this case, then we have detected an insertion event for a
* faulted or offline device that wasn't in the removed state.
* In this scenario, we don't post an ereport because we are
* about to replace the device, or attempt an online with
* vdev_forcefault, which will generate the fault for us.
*/
if ((vd->vdev_prevstate != state || vd->vdev_forcefault) &&
!vd->vdev_not_present && !vd->vdev_checkremove &&
vd != spa->spa_root_vdev) {
const char *class;
switch (aux) {
case VDEV_AUX_OPEN_FAILED:
class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED;
break;
case VDEV_AUX_CORRUPT_DATA:
class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA;
break;
case VDEV_AUX_NO_REPLICAS:
class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS;
break;
case VDEV_AUX_BAD_GUID_SUM:
class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM;
break;
case VDEV_AUX_TOO_SMALL:
class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL;
break;
case VDEV_AUX_BAD_LABEL:
class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL;
break;
case VDEV_AUX_BAD_ASHIFT:
class = FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT;
break;
default:
class = FM_EREPORT_ZFS_DEVICE_UNKNOWN;
}
(void) zfs_ereport_post(class, spa, vd, NULL, NULL,
save_state);
}
/* Erase any notion of persistent removed state */
vd->vdev_removed = B_FALSE;
} else {
vd->vdev_removed = B_FALSE;
}
/*
* Notify ZED of any significant state-change on a leaf vdev.
*
*/
if (vd->vdev_ops->vdev_op_leaf) {
/* preserve original state from a vdev_reopen() */
if ((vd->vdev_prevstate != VDEV_STATE_UNKNOWN) &&
(vd->vdev_prevstate != vd->vdev_state) &&
(save_state <= VDEV_STATE_CLOSED))
save_state = vd->vdev_prevstate;
/* filter out state change due to initial vdev_open */
if (save_state > VDEV_STATE_CLOSED)
zfs_post_state_change(spa, vd, save_state);
}
if (!isopen && vd->vdev_parent)
vdev_propagate_state(vd->vdev_parent);
}
boolean_t
vdev_children_are_offline(vdev_t *vd)
{
ASSERT(!vd->vdev_ops->vdev_op_leaf);
for (uint64_t i = 0; i < vd->vdev_children; i++) {
if (vd->vdev_child[i]->vdev_state != VDEV_STATE_OFFLINE)
return (B_FALSE);
}
return (B_TRUE);
}
/*
* Check the vdev configuration to ensure that it's capable of supporting
* a root pool. We do not support partial configuration.
*/
boolean_t
vdev_is_bootable(vdev_t *vd)
{
if (!vd->vdev_ops->vdev_op_leaf) {
const char *vdev_type = vd->vdev_ops->vdev_op_type;
if (strcmp(vdev_type, VDEV_TYPE_MISSING) == 0)
return (B_FALSE);
}
for (int c = 0; c < vd->vdev_children; c++) {
if (!vdev_is_bootable(vd->vdev_child[c]))
return (B_FALSE);
}
return (B_TRUE);
}
boolean_t
vdev_is_concrete(vdev_t *vd)
{
vdev_ops_t *ops = vd->vdev_ops;
if (ops == &vdev_indirect_ops || ops == &vdev_hole_ops ||
ops == &vdev_missing_ops || ops == &vdev_root_ops) {
return (B_FALSE);
} else {
return (B_TRUE);
}
}
/*
* Determine if a log device has valid content. If the vdev was
* removed or faulted in the MOS config then we know that
* the content on the log device has already been written to the pool.
*/
boolean_t
vdev_log_state_valid(vdev_t *vd)
{
if (vd->vdev_ops->vdev_op_leaf && !vd->vdev_faulted &&
!vd->vdev_removed)
return (B_TRUE);
for (int c = 0; c < vd->vdev_children; c++)
if (vdev_log_state_valid(vd->vdev_child[c]))
return (B_TRUE);
return (B_FALSE);
}
/*
* Expand a vdev if possible.
*/
void
vdev_expand(vdev_t *vd, uint64_t txg)
{
ASSERT(vd->vdev_top == vd);
ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
ASSERT(vdev_is_concrete(vd));
vdev_set_deflate_ratio(vd);
if ((vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count &&
vdev_is_concrete(vd)) {
vdev_metaslab_group_create(vd);
VERIFY(vdev_metaslab_init(vd, txg) == 0);
vdev_config_dirty(vd);
}
}
/*
* Split a vdev.
*/
void
vdev_split(vdev_t *vd)
{
vdev_t *cvd, *pvd = vd->vdev_parent;
vdev_remove_child(pvd, vd);
vdev_compact_children(pvd);
cvd = pvd->vdev_child[0];
if (pvd->vdev_children == 1) {
vdev_remove_parent(cvd);
cvd->vdev_splitting = B_TRUE;
}
vdev_propagate_state(cvd);
}
void
vdev_deadman(vdev_t *vd, char *tag)
{
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
vdev_deadman(cvd, tag);
}
if (vd->vdev_ops->vdev_op_leaf) {
vdev_queue_t *vq = &vd->vdev_queue;
mutex_enter(&vq->vq_lock);
if (avl_numnodes(&vq->vq_active_tree) > 0) {
spa_t *spa = vd->vdev_spa;
zio_t *fio;
uint64_t delta;
zfs_dbgmsg("slow vdev: %s has %lu active IOs",
vd->vdev_path, avl_numnodes(&vq->vq_active_tree));
/*
* Look at the head of all the pending queues,
* if any I/O has been outstanding for longer than
* the spa_deadman_synctime invoke the deadman logic.
*/
fio = avl_first(&vq->vq_active_tree);
delta = gethrtime() - fio->io_timestamp;
if (delta > spa_deadman_synctime(spa))
zio_deadman(fio, tag);
}
mutex_exit(&vq->vq_lock);
}
}
void
vdev_defer_resilver(vdev_t *vd)
{
ASSERT(vd->vdev_ops->vdev_op_leaf);
vd->vdev_resilver_deferred = B_TRUE;
vd->vdev_spa->spa_resilver_deferred = B_TRUE;
}
/*
* Clears the resilver deferred flag on all leaf devs under vd. Returns
* B_TRUE if we have devices that need to be resilvered and are available to
* accept resilver I/Os.
*/
boolean_t
vdev_clear_resilver_deferred(vdev_t *vd, dmu_tx_t *tx)
{
boolean_t resilver_needed = B_FALSE;
spa_t *spa = vd->vdev_spa;
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
resilver_needed |= vdev_clear_resilver_deferred(cvd, tx);
}
if (vd == spa->spa_root_vdev &&
spa_feature_is_active(spa, SPA_FEATURE_RESILVER_DEFER)) {
spa_feature_decr(spa, SPA_FEATURE_RESILVER_DEFER, tx);
vdev_config_dirty(vd);
spa->spa_resilver_deferred = B_FALSE;
return (resilver_needed);
}
if (!vdev_is_concrete(vd) || vd->vdev_aux ||
!vd->vdev_ops->vdev_op_leaf)
return (resilver_needed);
vd->vdev_resilver_deferred = B_FALSE;
return (!vdev_is_dead(vd) && !vd->vdev_offline &&
vdev_resilver_needed(vd, NULL, NULL));
}
boolean_t
vdev_xlate_is_empty(range_seg64_t *rs)
{
return (rs->rs_start == rs->rs_end);
}
/*
* Translate a logical range to the first contiguous physical range for the
* specified vdev_t. This function is initially called with a leaf vdev and
* will walk each parent vdev until it reaches a top-level vdev. Once the
* top-level is reached the physical range is initialized and the recursive
* function begins to unwind. As it unwinds it calls the parent's vdev
* specific translation function to do the real conversion.
*/
void
vdev_xlate(vdev_t *vd, const range_seg64_t *logical_rs,
range_seg64_t *physical_rs, range_seg64_t *remain_rs)
{
/*
* Walk up the vdev tree
*/
if (vd != vd->vdev_top) {
vdev_xlate(vd->vdev_parent, logical_rs, physical_rs,
remain_rs);
} else {
/*
* We've reached the top-level vdev, initialize the physical
* range to the logical range and set an empty remaining
* range then start to unwind.
*/
physical_rs->rs_start = logical_rs->rs_start;
physical_rs->rs_end = logical_rs->rs_end;
remain_rs->rs_start = logical_rs->rs_start;
remain_rs->rs_end = logical_rs->rs_start;
return;
}
vdev_t *pvd = vd->vdev_parent;
ASSERT3P(pvd, !=, NULL);
ASSERT3P(pvd->vdev_ops->vdev_op_xlate, !=, NULL);
/*
* As this recursive function unwinds, translate the logical
* range into its physical and any remaining components by calling
* the vdev specific translate function.
*/
range_seg64_t intermediate = { 0 };
pvd->vdev_ops->vdev_op_xlate(vd, physical_rs, &intermediate, remain_rs);
physical_rs->rs_start = intermediate.rs_start;
physical_rs->rs_end = intermediate.rs_end;
}
void
vdev_xlate_walk(vdev_t *vd, const range_seg64_t *logical_rs,
vdev_xlate_func_t *func, void *arg)
{
range_seg64_t iter_rs = *logical_rs;
range_seg64_t physical_rs;
range_seg64_t remain_rs;
while (!vdev_xlate_is_empty(&iter_rs)) {
vdev_xlate(vd, &iter_rs, &physical_rs, &remain_rs);
/*
* With raidz and dRAID, it's possible that the logical range
* does not live on this leaf vdev. Only when there is a non-
* zero physical size call the provided function.
*/
if (!vdev_xlate_is_empty(&physical_rs))
func(arg, &physical_rs);
iter_rs = remain_rs;
}
}
/*
* Look at the vdev tree and determine whether any devices are currently being
* replaced.
*/
boolean_t
vdev_replace_in_progress(vdev_t *vdev)
{
ASSERT(spa_config_held(vdev->vdev_spa, SCL_ALL, RW_READER) != 0);
if (vdev->vdev_ops == &vdev_replacing_ops)
return (B_TRUE);
/*
* A 'spare' vdev indicates that we have a replace in progress, unless
* it has exactly two children, and the second, the hot spare, has
* finished being resilvered.
*/
if (vdev->vdev_ops == &vdev_spare_ops && (vdev->vdev_children > 2 ||
!vdev_dtl_empty(vdev->vdev_child[1], DTL_MISSING)))
return (B_TRUE);
for (int i = 0; i < vdev->vdev_children; i++) {
if (vdev_replace_in_progress(vdev->vdev_child[i]))
return (B_TRUE);
}
return (B_FALSE);
}
EXPORT_SYMBOL(vdev_fault);
EXPORT_SYMBOL(vdev_degrade);
EXPORT_SYMBOL(vdev_online);
EXPORT_SYMBOL(vdev_offline);
EXPORT_SYMBOL(vdev_clear);
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, default_ms_count, INT, ZMOD_RW,
"Target number of metaslabs per top-level vdev");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, default_ms_shift, INT, ZMOD_RW,
"Default limit for metaslab size");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, min_ms_count, INT, ZMOD_RW,
"Minimum number of metaslabs per top-level vdev");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, ms_count_limit, INT, ZMOD_RW,
"Practical upper limit of total metaslabs per top-level vdev");
ZFS_MODULE_PARAM(zfs, zfs_, slow_io_events_per_second, UINT, ZMOD_RW,
"Rate limit slow IO (delay) events to this many per second");
ZFS_MODULE_PARAM(zfs, zfs_, checksum_events_per_second, UINT, ZMOD_RW,
"Rate limit checksum events to this many checksum errors per second "
"(do not set below zed threshold).");
ZFS_MODULE_PARAM(zfs, zfs_, scan_ignore_errors, INT, ZMOD_RW,
"Ignore errors during resilver/scrub");
ZFS_MODULE_PARAM(zfs_vdev, vdev_, validate_skip, INT, ZMOD_RW,
"Bypass vdev_validate()");
ZFS_MODULE_PARAM(zfs, zfs_, nocacheflush, INT, ZMOD_RW,
"Disable cache flushes");
ZFS_MODULE_PARAM(zfs, zfs_, embedded_slog_min_ms, INT, ZMOD_RW,
"Minimum number of metaslabs required to dedicate one for log blocks");
ZFS_MODULE_PARAM_CALL(zfs_vdev, zfs_vdev_, min_auto_ashift,
param_set_min_auto_ashift, param_get_ulong, ZMOD_RW,
"Minimum ashift used when creating new top-level vdevs");
ZFS_MODULE_PARAM_CALL(zfs_vdev, zfs_vdev_, max_auto_ashift,
param_set_max_auto_ashift, param_get_ulong, ZMOD_RW,
"Maximum ashift used when optimizing for logical -> physical sector "
"size on new top-level vdevs");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/vdev_raidz_math.c b/sys/contrib/openzfs/module/zfs/vdev_raidz_math.c
index 25d76970e99a..138b7dac5956 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_raidz_math.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_raidz_math.c
@@ -1,666 +1,666 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (C) 2016 Gvozden Nešković. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/types.h>
#include <sys/zio.h>
#include <sys/debug.h>
#include <sys/zfs_debug.h>
#include <sys/vdev_raidz.h>
#include <sys/vdev_raidz_impl.h>
#include <sys/simd.h>
/* Opaque implementation with NULL methods to represent original methods */
static const raidz_impl_ops_t vdev_raidz_original_impl = {
.name = "original",
.is_supported = raidz_will_scalar_work,
};
/* RAIDZ parity op that contain the fastest methods */
static raidz_impl_ops_t vdev_raidz_fastest_impl = {
.name = "fastest"
};
/* All compiled in implementations */
const raidz_impl_ops_t *raidz_all_maths[] = {
&vdev_raidz_original_impl,
&vdev_raidz_scalar_impl,
#if defined(__x86_64) && defined(HAVE_SSE2) /* only x86_64 for now */
&vdev_raidz_sse2_impl,
#endif
#if defined(__x86_64) && defined(HAVE_SSSE3) /* only x86_64 for now */
&vdev_raidz_ssse3_impl,
#endif
#if defined(__x86_64) && defined(HAVE_AVX2) /* only x86_64 for now */
&vdev_raidz_avx2_impl,
#endif
#if defined(__x86_64) && defined(HAVE_AVX512F) /* only x86_64 for now */
&vdev_raidz_avx512f_impl,
#endif
#if defined(__x86_64) && defined(HAVE_AVX512BW) /* only x86_64 for now */
&vdev_raidz_avx512bw_impl,
#endif
#if defined(__aarch64__) && !defined(__FreeBSD__)
&vdev_raidz_aarch64_neon_impl,
&vdev_raidz_aarch64_neonx2_impl,
#endif
#if defined(__powerpc__) && defined(__altivec__)
&vdev_raidz_powerpc_altivec_impl,
#endif
};
/* Indicate that benchmark has been completed */
static boolean_t raidz_math_initialized = B_FALSE;
/* Select raidz implementation */
#define IMPL_FASTEST (UINT32_MAX)
#define IMPL_CYCLE (UINT32_MAX - 1)
#define IMPL_ORIGINAL (0)
#define IMPL_SCALAR (1)
#define RAIDZ_IMPL_READ(i) (*(volatile uint32_t *) &(i))
static uint32_t zfs_vdev_raidz_impl = IMPL_SCALAR;
static uint32_t user_sel_impl = IMPL_FASTEST;
/* Hold all supported implementations */
static size_t raidz_supp_impl_cnt = 0;
static raidz_impl_ops_t *raidz_supp_impl[ARRAY_SIZE(raidz_all_maths)];
#if defined(_KERNEL)
/*
* kstats values for supported implementations
* Values represent per disk throughput of 8 disk+parity raidz vdev [B/s]
*/
static raidz_impl_kstat_t raidz_impl_kstats[ARRAY_SIZE(raidz_all_maths) + 1];
/* kstat for benchmarked implementations */
static kstat_t *raidz_math_kstat = NULL;
#endif
/*
* Returns the RAIDZ operations for raidz_map() parity calculations. When
* a SIMD implementation is not allowed in the current context, then fallback
* to the fastest generic implementation.
*/
const raidz_impl_ops_t *
vdev_raidz_math_get_ops(void)
{
if (!kfpu_allowed())
return (&vdev_raidz_scalar_impl);
raidz_impl_ops_t *ops = NULL;
const uint32_t impl = RAIDZ_IMPL_READ(zfs_vdev_raidz_impl);
switch (impl) {
case IMPL_FASTEST:
ASSERT(raidz_math_initialized);
ops = &vdev_raidz_fastest_impl;
break;
case IMPL_CYCLE:
/* Cycle through all supported implementations */
ASSERT(raidz_math_initialized);
ASSERT3U(raidz_supp_impl_cnt, >, 0);
static size_t cycle_impl_idx = 0;
size_t idx = (++cycle_impl_idx) % raidz_supp_impl_cnt;
ops = raidz_supp_impl[idx];
break;
case IMPL_ORIGINAL:
ops = (raidz_impl_ops_t *)&vdev_raidz_original_impl;
break;
case IMPL_SCALAR:
ops = (raidz_impl_ops_t *)&vdev_raidz_scalar_impl;
break;
default:
ASSERT3U(impl, <, raidz_supp_impl_cnt);
ASSERT3U(raidz_supp_impl_cnt, >, 0);
if (impl < ARRAY_SIZE(raidz_all_maths))
ops = raidz_supp_impl[impl];
break;
}
ASSERT3P(ops, !=, NULL);
return (ops);
}
/*
* Select parity generation method for raidz_map
*/
int
vdev_raidz_math_generate(raidz_map_t *rm, raidz_row_t *rr)
{
raidz_gen_f gen_parity = NULL;
switch (raidz_parity(rm)) {
case 1:
gen_parity = rm->rm_ops->gen[RAIDZ_GEN_P];
break;
case 2:
gen_parity = rm->rm_ops->gen[RAIDZ_GEN_PQ];
break;
case 3:
gen_parity = rm->rm_ops->gen[RAIDZ_GEN_PQR];
break;
default:
gen_parity = NULL;
- cmn_err(CE_PANIC, "invalid RAID-Z configuration %d",
- raidz_parity(rm));
+ cmn_err(CE_PANIC, "invalid RAID-Z configuration %llu",
+ (u_longlong_t)raidz_parity(rm));
break;
}
/* if method is NULL execute the original implementation */
if (gen_parity == NULL)
return (RAIDZ_ORIGINAL_IMPL);
gen_parity(rr);
return (0);
}
static raidz_rec_f
reconstruct_fun_p_sel(raidz_map_t *rm, const int *parity_valid,
const int nbaddata)
{
if (nbaddata == 1 && parity_valid[CODE_P]) {
return (rm->rm_ops->rec[RAIDZ_REC_P]);
}
return ((raidz_rec_f) NULL);
}
static raidz_rec_f
reconstruct_fun_pq_sel(raidz_map_t *rm, const int *parity_valid,
const int nbaddata)
{
if (nbaddata == 1) {
if (parity_valid[CODE_P]) {
return (rm->rm_ops->rec[RAIDZ_REC_P]);
} else if (parity_valid[CODE_Q]) {
return (rm->rm_ops->rec[RAIDZ_REC_Q]);
}
} else if (nbaddata == 2 &&
parity_valid[CODE_P] && parity_valid[CODE_Q]) {
return (rm->rm_ops->rec[RAIDZ_REC_PQ]);
}
return ((raidz_rec_f) NULL);
}
static raidz_rec_f
reconstruct_fun_pqr_sel(raidz_map_t *rm, const int *parity_valid,
const int nbaddata)
{
if (nbaddata == 1) {
if (parity_valid[CODE_P]) {
return (rm->rm_ops->rec[RAIDZ_REC_P]);
} else if (parity_valid[CODE_Q]) {
return (rm->rm_ops->rec[RAIDZ_REC_Q]);
} else if (parity_valid[CODE_R]) {
return (rm->rm_ops->rec[RAIDZ_REC_R]);
}
} else if (nbaddata == 2) {
if (parity_valid[CODE_P] && parity_valid[CODE_Q]) {
return (rm->rm_ops->rec[RAIDZ_REC_PQ]);
} else if (parity_valid[CODE_P] && parity_valid[CODE_R]) {
return (rm->rm_ops->rec[RAIDZ_REC_PR]);
} else if (parity_valid[CODE_Q] && parity_valid[CODE_R]) {
return (rm->rm_ops->rec[RAIDZ_REC_QR]);
}
} else if (nbaddata == 3 &&
parity_valid[CODE_P] && parity_valid[CODE_Q] &&
parity_valid[CODE_R]) {
return (rm->rm_ops->rec[RAIDZ_REC_PQR]);
}
return ((raidz_rec_f) NULL);
}
/*
* Select data reconstruction method for raidz_map
* @parity_valid - Parity validity flag
* @dt - Failed data index array
* @nbaddata - Number of failed data columns
*/
int
vdev_raidz_math_reconstruct(raidz_map_t *rm, raidz_row_t *rr,
const int *parity_valid, const int *dt, const int nbaddata)
{
raidz_rec_f rec_fn = NULL;
switch (raidz_parity(rm)) {
case PARITY_P:
rec_fn = reconstruct_fun_p_sel(rm, parity_valid, nbaddata);
break;
case PARITY_PQ:
rec_fn = reconstruct_fun_pq_sel(rm, parity_valid, nbaddata);
break;
case PARITY_PQR:
rec_fn = reconstruct_fun_pqr_sel(rm, parity_valid, nbaddata);
break;
default:
- cmn_err(CE_PANIC, "invalid RAID-Z configuration %d",
- raidz_parity(rm));
+ cmn_err(CE_PANIC, "invalid RAID-Z configuration %llu",
+ (u_longlong_t)raidz_parity(rm));
break;
}
if (rec_fn == NULL)
return (RAIDZ_ORIGINAL_IMPL);
else
return (rec_fn(rr, dt));
}
const char *raidz_gen_name[] = {
"gen_p", "gen_pq", "gen_pqr"
};
const char *raidz_rec_name[] = {
"rec_p", "rec_q", "rec_r",
"rec_pq", "rec_pr", "rec_qr", "rec_pqr"
};
#if defined(_KERNEL)
#define RAIDZ_KSTAT_LINE_LEN (17 + 10*12 + 1)
static int
raidz_math_kstat_headers(char *buf, size_t size)
{
int i;
ssize_t off;
ASSERT3U(size, >=, RAIDZ_KSTAT_LINE_LEN);
off = snprintf(buf, size, "%-17s", "implementation");
for (i = 0; i < ARRAY_SIZE(raidz_gen_name); i++)
off += snprintf(buf + off, size - off, "%-16s",
raidz_gen_name[i]);
for (i = 0; i < ARRAY_SIZE(raidz_rec_name); i++)
off += snprintf(buf + off, size - off, "%-16s",
raidz_rec_name[i]);
(void) snprintf(buf + off, size - off, "\n");
return (0);
}
static int
raidz_math_kstat_data(char *buf, size_t size, void *data)
{
raidz_impl_kstat_t *fstat = &raidz_impl_kstats[raidz_supp_impl_cnt];
raidz_impl_kstat_t *cstat = (raidz_impl_kstat_t *)data;
ssize_t off = 0;
int i;
ASSERT3U(size, >=, RAIDZ_KSTAT_LINE_LEN);
if (cstat == fstat) {
off += snprintf(buf + off, size - off, "%-17s", "fastest");
for (i = 0; i < ARRAY_SIZE(raidz_gen_name); i++) {
int id = fstat->gen[i];
off += snprintf(buf + off, size - off, "%-16s",
raidz_supp_impl[id]->name);
}
for (i = 0; i < ARRAY_SIZE(raidz_rec_name); i++) {
int id = fstat->rec[i];
off += snprintf(buf + off, size - off, "%-16s",
raidz_supp_impl[id]->name);
}
} else {
ptrdiff_t id = cstat - raidz_impl_kstats;
off += snprintf(buf + off, size - off, "%-17s",
raidz_supp_impl[id]->name);
for (i = 0; i < ARRAY_SIZE(raidz_gen_name); i++)
off += snprintf(buf + off, size - off, "%-16llu",
(u_longlong_t)cstat->gen[i]);
for (i = 0; i < ARRAY_SIZE(raidz_rec_name); i++)
off += snprintf(buf + off, size - off, "%-16llu",
(u_longlong_t)cstat->rec[i]);
}
(void) snprintf(buf + off, size - off, "\n");
return (0);
}
static void *
raidz_math_kstat_addr(kstat_t *ksp, loff_t n)
{
if (n <= raidz_supp_impl_cnt)
ksp->ks_private = (void *) (raidz_impl_kstats + n);
else
ksp->ks_private = NULL;
return (ksp->ks_private);
}
#define BENCH_D_COLS (8ULL)
#define BENCH_COLS (BENCH_D_COLS + PARITY_PQR)
#define BENCH_ZIO_SIZE (1ULL << SPA_OLD_MAXBLOCKSHIFT) /* 128 kiB */
#define BENCH_NS MSEC2NSEC(1) /* 1ms */
typedef void (*benchmark_fn)(raidz_map_t *rm, const int fn);
static void
benchmark_gen_impl(raidz_map_t *rm, const int fn)
{
(void) fn;
vdev_raidz_generate_parity(rm);
}
static void
benchmark_rec_impl(raidz_map_t *rm, const int fn)
{
static const int rec_tgt[7][3] = {
{1, 2, 3}, /* rec_p: bad QR & D[0] */
{0, 2, 3}, /* rec_q: bad PR & D[0] */
{0, 1, 3}, /* rec_r: bad PQ & D[0] */
{2, 3, 4}, /* rec_pq: bad R & D[0][1] */
{1, 3, 4}, /* rec_pr: bad Q & D[0][1] */
{0, 3, 4}, /* rec_qr: bad P & D[0][1] */
{3, 4, 5} /* rec_pqr: bad & D[0][1][2] */
};
vdev_raidz_reconstruct(rm, rec_tgt[fn], 3);
}
/*
* Benchmarking of all supported implementations (raidz_supp_impl_cnt)
* is performed by setting the rm_ops pointer and calling the top level
* generate/reconstruct methods of bench_rm.
*/
static void
benchmark_raidz_impl(raidz_map_t *bench_rm, const int fn, benchmark_fn bench_fn)
{
uint64_t run_cnt, speed, best_speed = 0;
hrtime_t t_start, t_diff;
raidz_impl_ops_t *curr_impl;
raidz_impl_kstat_t *fstat = &raidz_impl_kstats[raidz_supp_impl_cnt];
int impl, i;
for (impl = 0; impl < raidz_supp_impl_cnt; impl++) {
/* set an implementation to benchmark */
curr_impl = raidz_supp_impl[impl];
bench_rm->rm_ops = curr_impl;
run_cnt = 0;
t_start = gethrtime();
do {
for (i = 0; i < 5; i++, run_cnt++)
bench_fn(bench_rm, fn);
t_diff = gethrtime() - t_start;
} while (t_diff < BENCH_NS);
speed = run_cnt * BENCH_ZIO_SIZE * NANOSEC;
speed /= (t_diff * BENCH_COLS);
if (bench_fn == benchmark_gen_impl)
raidz_impl_kstats[impl].gen[fn] = speed;
else
raidz_impl_kstats[impl].rec[fn] = speed;
/* Update fastest implementation method */
if (speed > best_speed) {
best_speed = speed;
if (bench_fn == benchmark_gen_impl) {
fstat->gen[fn] = impl;
vdev_raidz_fastest_impl.gen[fn] =
curr_impl->gen[fn];
} else {
fstat->rec[fn] = impl;
vdev_raidz_fastest_impl.rec[fn] =
curr_impl->rec[fn];
}
}
}
}
#endif
/*
* Initialize and benchmark all supported implementations.
*/
static void
benchmark_raidz(void)
{
raidz_impl_ops_t *curr_impl;
int i, c;
/* Move supported impl into raidz_supp_impl */
for (i = 0, c = 0; i < ARRAY_SIZE(raidz_all_maths); i++) {
curr_impl = (raidz_impl_ops_t *)raidz_all_maths[i];
if (curr_impl->init)
curr_impl->init();
if (curr_impl->is_supported())
raidz_supp_impl[c++] = (raidz_impl_ops_t *)curr_impl;
}
membar_producer(); /* complete raidz_supp_impl[] init */
raidz_supp_impl_cnt = c; /* number of supported impl */
#if defined(_KERNEL)
zio_t *bench_zio = NULL;
raidz_map_t *bench_rm = NULL;
uint64_t bench_parity;
/* Fake a zio and run the benchmark on a warmed up buffer */
bench_zio = kmem_zalloc(sizeof (zio_t), KM_SLEEP);
bench_zio->io_offset = 0;
bench_zio->io_size = BENCH_ZIO_SIZE; /* only data columns */
bench_zio->io_abd = abd_alloc_linear(BENCH_ZIO_SIZE, B_TRUE);
memset(abd_to_buf(bench_zio->io_abd), 0xAA, BENCH_ZIO_SIZE);
/* Benchmark parity generation methods */
for (int fn = 0; fn < RAIDZ_GEN_NUM; fn++) {
bench_parity = fn + 1;
/* New raidz_map is needed for each generate_p/q/r */
bench_rm = vdev_raidz_map_alloc(bench_zio, SPA_MINBLOCKSHIFT,
BENCH_D_COLS + bench_parity, bench_parity);
benchmark_raidz_impl(bench_rm, fn, benchmark_gen_impl);
vdev_raidz_map_free(bench_rm);
}
/* Benchmark data reconstruction methods */
bench_rm = vdev_raidz_map_alloc(bench_zio, SPA_MINBLOCKSHIFT,
BENCH_COLS, PARITY_PQR);
for (int fn = 0; fn < RAIDZ_REC_NUM; fn++)
benchmark_raidz_impl(bench_rm, fn, benchmark_rec_impl);
vdev_raidz_map_free(bench_rm);
/* cleanup the bench zio */
abd_free(bench_zio->io_abd);
kmem_free(bench_zio, sizeof (zio_t));
#else
/*
* Skip the benchmark in user space to avoid impacting libzpool
* consumers (zdb, zhack, zinject, ztest). The last implementation
* is assumed to be the fastest and used by default.
*/
memcpy(&vdev_raidz_fastest_impl,
raidz_supp_impl[raidz_supp_impl_cnt - 1],
sizeof (vdev_raidz_fastest_impl));
strcpy(vdev_raidz_fastest_impl.name, "fastest");
#endif /* _KERNEL */
}
void
vdev_raidz_math_init(void)
{
/* Determine the fastest available implementation. */
benchmark_raidz();
#if defined(_KERNEL)
/* Install kstats for all implementations */
raidz_math_kstat = kstat_create("zfs", 0, "vdev_raidz_bench", "misc",
KSTAT_TYPE_RAW, 0, KSTAT_FLAG_VIRTUAL);
if (raidz_math_kstat != NULL) {
raidz_math_kstat->ks_data = NULL;
raidz_math_kstat->ks_ndata = UINT32_MAX;
kstat_set_raw_ops(raidz_math_kstat,
raidz_math_kstat_headers,
raidz_math_kstat_data,
raidz_math_kstat_addr);
kstat_install(raidz_math_kstat);
}
#endif
/* Finish initialization */
atomic_swap_32(&zfs_vdev_raidz_impl, user_sel_impl);
raidz_math_initialized = B_TRUE;
}
void
vdev_raidz_math_fini(void)
{
raidz_impl_ops_t const *curr_impl;
#if defined(_KERNEL)
if (raidz_math_kstat != NULL) {
kstat_delete(raidz_math_kstat);
raidz_math_kstat = NULL;
}
#endif
for (int i = 0; i < ARRAY_SIZE(raidz_all_maths); i++) {
curr_impl = raidz_all_maths[i];
if (curr_impl->fini)
curr_impl->fini();
}
}
static const struct {
char *name;
uint32_t sel;
} math_impl_opts[] = {
{ "cycle", IMPL_CYCLE },
{ "fastest", IMPL_FASTEST },
{ "original", IMPL_ORIGINAL },
{ "scalar", IMPL_SCALAR }
};
/*
* Function sets desired raidz implementation.
*
* If we are called before init(), user preference will be saved in
* user_sel_impl, and applied in later init() call. This occurs when module
* parameter is specified on module load. Otherwise, directly update
* zfs_vdev_raidz_impl.
*
* @val Name of raidz implementation to use
* @param Unused.
*/
int
vdev_raidz_impl_set(const char *val)
{
int err = -EINVAL;
char req_name[RAIDZ_IMPL_NAME_MAX];
uint32_t impl = RAIDZ_IMPL_READ(user_sel_impl);
size_t i;
/* sanitize input */
i = strnlen(val, RAIDZ_IMPL_NAME_MAX);
if (i == 0 || i == RAIDZ_IMPL_NAME_MAX)
return (err);
strlcpy(req_name, val, RAIDZ_IMPL_NAME_MAX);
while (i > 0 && !!isspace(req_name[i-1]))
i--;
req_name[i] = '\0';
/* Check mandatory options */
for (i = 0; i < ARRAY_SIZE(math_impl_opts); i++) {
if (strcmp(req_name, math_impl_opts[i].name) == 0) {
impl = math_impl_opts[i].sel;
err = 0;
break;
}
}
/* check all supported impl if init() was already called */
if (err != 0 && raidz_math_initialized) {
/* check all supported implementations */
for (i = 0; i < raidz_supp_impl_cnt; i++) {
if (strcmp(req_name, raidz_supp_impl[i]->name) == 0) {
impl = i;
err = 0;
break;
}
}
}
if (err == 0) {
if (raidz_math_initialized)
atomic_swap_32(&zfs_vdev_raidz_impl, impl);
else
atomic_swap_32(&user_sel_impl, impl);
}
return (err);
}
#if defined(_KERNEL) && defined(__linux__)
static int
zfs_vdev_raidz_impl_set(const char *val, zfs_kernel_param_t *kp)
{
return (vdev_raidz_impl_set(val));
}
static int
zfs_vdev_raidz_impl_get(char *buffer, zfs_kernel_param_t *kp)
{
int i, cnt = 0;
char *fmt;
const uint32_t impl = RAIDZ_IMPL_READ(zfs_vdev_raidz_impl);
ASSERT(raidz_math_initialized);
/* list mandatory options */
for (i = 0; i < ARRAY_SIZE(math_impl_opts) - 2; i++) {
fmt = (impl == math_impl_opts[i].sel) ? "[%s] " : "%s ";
cnt += sprintf(buffer + cnt, fmt, math_impl_opts[i].name);
}
/* list all supported implementations */
for (i = 0; i < raidz_supp_impl_cnt; i++) {
fmt = (i == impl) ? "[%s] " : "%s ";
cnt += sprintf(buffer + cnt, fmt, raidz_supp_impl[i]->name);
}
return (cnt);
}
module_param_call(zfs_vdev_raidz_impl, zfs_vdev_raidz_impl_set,
zfs_vdev_raidz_impl_get, NULL, 0644);
MODULE_PARM_DESC(zfs_vdev_raidz_impl, "Select raidz implementation.");
#endif
diff --git a/sys/contrib/openzfs/module/zfs/zfs_log.c b/sys/contrib/openzfs/module/zfs/zfs_log.c
index 0f330ec933aa..e248dc3cc4e8 100644
--- a/sys/contrib/openzfs/module/zfs/zfs_log.c
+++ b/sys/contrib/openzfs/module/zfs/zfs_log.c
@@ -1,791 +1,792 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2018 by Delphix. All rights reserved.
*/
#include <sys/types.h>
#include <sys/param.h>
#include <sys/sysmacros.h>
#include <sys/cmn_err.h>
#include <sys/kmem.h>
#include <sys/thread.h>
#include <sys/file.h>
#include <sys/vfs.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_dir.h>
#include <sys/zil.h>
#include <sys/zil_impl.h>
#include <sys/byteorder.h>
#include <sys/policy.h>
#include <sys/stat.h>
#include <sys/acl.h>
#include <sys/dmu.h>
#include <sys/dbuf.h>
#include <sys/spa.h>
#include <sys/zfs_fuid.h>
#include <sys/dsl_dataset.h>
/*
* These zfs_log_* functions must be called within a dmu tx, in one
* of 2 contexts depending on zilog->z_replay:
*
* Non replay mode
* ---------------
* We need to record the transaction so that if it is committed to
* the Intent Log then it can be replayed. An intent log transaction
* structure (itx_t) is allocated and all the information necessary to
* possibly replay the transaction is saved in it. The itx is then assigned
* a sequence number and inserted in the in-memory list anchored in the zilog.
*
* Replay mode
* -----------
* We need to mark the intent log record as replayed in the log header.
* This is done in the same transaction as the replay so that they
* commit atomically.
*/
int
zfs_log_create_txtype(zil_create_t type, vsecattr_t *vsecp, vattr_t *vap)
{
int isxvattr = (vap->va_mask & ATTR_XVATTR);
switch (type) {
case Z_FILE:
if (vsecp == NULL && !isxvattr)
return (TX_CREATE);
if (vsecp && isxvattr)
return (TX_CREATE_ACL_ATTR);
if (vsecp)
return (TX_CREATE_ACL);
else
return (TX_CREATE_ATTR);
- /*NOTREACHED*/
case Z_DIR:
if (vsecp == NULL && !isxvattr)
return (TX_MKDIR);
if (vsecp && isxvattr)
return (TX_MKDIR_ACL_ATTR);
if (vsecp)
return (TX_MKDIR_ACL);
else
return (TX_MKDIR_ATTR);
case Z_XATTRDIR:
return (TX_MKXATTR);
}
ASSERT(0);
return (TX_MAX_TYPE);
}
/*
* build up the log data necessary for logging xvattr_t
* First lr_attr_t is initialized. following the lr_attr_t
* is the mapsize and attribute bitmap copied from the xvattr_t.
* Following the bitmap and bitmapsize two 64 bit words are reserved
* for the create time which may be set. Following the create time
* records a single 64 bit integer which has the bits to set on
* replay for the xvattr.
*/
static void
zfs_log_xvattr(lr_attr_t *lrattr, xvattr_t *xvap)
{
uint32_t *bitmap;
uint64_t *attrs;
uint64_t *crtime;
xoptattr_t *xoap;
void *scanstamp;
int i;
xoap = xva_getxoptattr(xvap);
ASSERT(xoap);
lrattr->lr_attr_masksize = xvap->xva_mapsize;
bitmap = &lrattr->lr_attr_bitmap;
for (i = 0; i != xvap->xva_mapsize; i++, bitmap++) {
*bitmap = xvap->xva_reqattrmap[i];
}
/* Now pack the attributes up in a single uint64_t */
attrs = (uint64_t *)bitmap;
+ *attrs = 0;
crtime = attrs + 1;
+ bzero(crtime, 2 * sizeof (uint64_t));
scanstamp = (caddr_t)(crtime + 2);
- *attrs = 0;
+ bzero(scanstamp, AV_SCANSTAMP_SZ);
if (XVA_ISSET_REQ(xvap, XAT_READONLY))
*attrs |= (xoap->xoa_readonly == 0) ? 0 :
XAT0_READONLY;
if (XVA_ISSET_REQ(xvap, XAT_HIDDEN))
*attrs |= (xoap->xoa_hidden == 0) ? 0 :
XAT0_HIDDEN;
if (XVA_ISSET_REQ(xvap, XAT_SYSTEM))
*attrs |= (xoap->xoa_system == 0) ? 0 :
XAT0_SYSTEM;
if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE))
*attrs |= (xoap->xoa_archive == 0) ? 0 :
XAT0_ARCHIVE;
if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE))
*attrs |= (xoap->xoa_immutable == 0) ? 0 :
XAT0_IMMUTABLE;
if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK))
*attrs |= (xoap->xoa_nounlink == 0) ? 0 :
XAT0_NOUNLINK;
if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY))
*attrs |= (xoap->xoa_appendonly == 0) ? 0 :
XAT0_APPENDONLY;
if (XVA_ISSET_REQ(xvap, XAT_OPAQUE))
*attrs |= (xoap->xoa_opaque == 0) ? 0 :
XAT0_APPENDONLY;
if (XVA_ISSET_REQ(xvap, XAT_NODUMP))
*attrs |= (xoap->xoa_nodump == 0) ? 0 :
XAT0_NODUMP;
if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED))
*attrs |= (xoap->xoa_av_quarantined == 0) ? 0 :
XAT0_AV_QUARANTINED;
if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED))
*attrs |= (xoap->xoa_av_modified == 0) ? 0 :
XAT0_AV_MODIFIED;
if (XVA_ISSET_REQ(xvap, XAT_CREATETIME))
ZFS_TIME_ENCODE(&xoap->xoa_createtime, crtime);
if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) {
ASSERT(!XVA_ISSET_REQ(xvap, XAT_PROJID));
bcopy(xoap->xoa_av_scanstamp, scanstamp, AV_SCANSTAMP_SZ);
} else if (XVA_ISSET_REQ(xvap, XAT_PROJID)) {
/*
* XAT_PROJID and XAT_AV_SCANSTAMP will never be valid
* at the same time, so we can share the same space.
*/
bcopy(&xoap->xoa_projid, scanstamp, sizeof (uint64_t));
}
if (XVA_ISSET_REQ(xvap, XAT_REPARSE))
*attrs |= (xoap->xoa_reparse == 0) ? 0 :
XAT0_REPARSE;
if (XVA_ISSET_REQ(xvap, XAT_OFFLINE))
*attrs |= (xoap->xoa_offline == 0) ? 0 :
XAT0_OFFLINE;
if (XVA_ISSET_REQ(xvap, XAT_SPARSE))
*attrs |= (xoap->xoa_sparse == 0) ? 0 :
XAT0_SPARSE;
if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT))
*attrs |= (xoap->xoa_projinherit == 0) ? 0 :
XAT0_PROJINHERIT;
}
static void *
zfs_log_fuid_ids(zfs_fuid_info_t *fuidp, void *start)
{
zfs_fuid_t *zfuid;
uint64_t *fuidloc = start;
/* First copy in the ACE FUIDs */
for (zfuid = list_head(&fuidp->z_fuids); zfuid;
zfuid = list_next(&fuidp->z_fuids, zfuid)) {
*fuidloc++ = zfuid->z_logfuid;
}
return (fuidloc);
}
static void *
zfs_log_fuid_domains(zfs_fuid_info_t *fuidp, void *start)
{
zfs_fuid_domain_t *zdomain;
/* now copy in the domain info, if any */
if (fuidp->z_domain_str_sz != 0) {
for (zdomain = list_head(&fuidp->z_domains); zdomain;
zdomain = list_next(&fuidp->z_domains, zdomain)) {
bcopy((void *)zdomain->z_domain, start,
strlen(zdomain->z_domain) + 1);
start = (caddr_t)start +
strlen(zdomain->z_domain) + 1;
}
}
return (start);
}
/*
* If zp is an xattr node, check whether the xattr owner is unlinked.
* We don't want to log anything if the owner is unlinked.
*/
static int
zfs_xattr_owner_unlinked(znode_t *zp)
{
int unlinked = 0;
znode_t *dzp;
#ifdef __FreeBSD__
znode_t *tzp = zp;
/*
* zrele drops the vnode lock which violates the VOP locking contract
* on FreeBSD. See comment at the top of zfs_replay.c for more detail.
*/
/*
* if zp is XATTR node, keep walking up via z_xattr_parent until we
* get the owner
*/
while (tzp->z_pflags & ZFS_XATTR) {
ASSERT3U(zp->z_xattr_parent, !=, 0);
if (zfs_zget(ZTOZSB(tzp), tzp->z_xattr_parent, &dzp) != 0) {
unlinked = 1;
break;
}
if (tzp != zp)
zrele(tzp);
tzp = dzp;
unlinked = tzp->z_unlinked;
}
if (tzp != zp)
zrele(tzp);
#else
zhold(zp);
/*
* if zp is XATTR node, keep walking up via z_xattr_parent until we
* get the owner
*/
while (zp->z_pflags & ZFS_XATTR) {
ASSERT3U(zp->z_xattr_parent, !=, 0);
if (zfs_zget(ZTOZSB(zp), zp->z_xattr_parent, &dzp) != 0) {
unlinked = 1;
break;
}
zrele(zp);
zp = dzp;
unlinked = zp->z_unlinked;
}
zrele(zp);
#endif
return (unlinked);
}
/*
* Handles TX_CREATE, TX_CREATE_ATTR, TX_MKDIR, TX_MKDIR_ATTR and
* TK_MKXATTR transactions.
*
* TX_CREATE and TX_MKDIR are standard creates, but they may have FUID
* domain information appended prior to the name. In this case the
* uid/gid in the log record will be a log centric FUID.
*
* TX_CREATE_ACL_ATTR and TX_MKDIR_ACL_ATTR handle special creates that
* may contain attributes, ACL and optional fuid information.
*
* TX_CREATE_ACL and TX_MKDIR_ACL handle special creates that specify
* and ACL and normal users/groups in the ACEs.
*
* There may be an optional xvattr attribute information similar
* to zfs_log_setattr.
*
* Also, after the file name "domain" strings may be appended.
*/
void
zfs_log_create(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
znode_t *dzp, znode_t *zp, const char *name, vsecattr_t *vsecp,
zfs_fuid_info_t *fuidp, vattr_t *vap)
{
itx_t *itx;
lr_create_t *lr;
lr_acl_create_t *lracl;
size_t aclsize = 0;
size_t xvatsize = 0;
size_t txsize;
xvattr_t *xvap = (xvattr_t *)vap;
void *end;
size_t lrsize;
size_t namesize = strlen(name) + 1;
size_t fuidsz = 0;
if (zil_replaying(zilog, tx) || zfs_xattr_owner_unlinked(dzp))
return;
/*
* If we have FUIDs present then add in space for
* domains and ACE fuid's if any.
*/
if (fuidp) {
fuidsz += fuidp->z_domain_str_sz;
fuidsz += fuidp->z_fuid_cnt * sizeof (uint64_t);
}
if (vap->va_mask & ATTR_XVATTR)
xvatsize = ZIL_XVAT_SIZE(xvap->xva_mapsize);
if ((int)txtype == TX_CREATE_ATTR || (int)txtype == TX_MKDIR_ATTR ||
(int)txtype == TX_CREATE || (int)txtype == TX_MKDIR ||
(int)txtype == TX_MKXATTR) {
txsize = sizeof (*lr) + namesize + fuidsz + xvatsize;
lrsize = sizeof (*lr);
} else {
txsize =
sizeof (lr_acl_create_t) + namesize + fuidsz +
ZIL_ACE_LENGTH(aclsize) + xvatsize;
lrsize = sizeof (lr_acl_create_t);
}
itx = zil_itx_create(txtype, txsize);
lr = (lr_create_t *)&itx->itx_lr;
lr->lr_doid = dzp->z_id;
lr->lr_foid = zp->z_id;
/* Store dnode slot count in 8 bits above object id. */
LR_FOID_SET_SLOTS(lr->lr_foid, zp->z_dnodesize >> DNODE_SHIFT);
lr->lr_mode = zp->z_mode;
if (!IS_EPHEMERAL(KUID_TO_SUID(ZTOUID(zp)))) {
lr->lr_uid = (uint64_t)KUID_TO_SUID(ZTOUID(zp));
} else {
lr->lr_uid = fuidp->z_fuid_owner;
}
if (!IS_EPHEMERAL(KGID_TO_SGID(ZTOGID(zp)))) {
lr->lr_gid = (uint64_t)KGID_TO_SGID(ZTOGID(zp));
} else {
lr->lr_gid = fuidp->z_fuid_group;
}
(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(ZTOZSB(zp)), &lr->lr_gen,
sizeof (uint64_t));
(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(ZTOZSB(zp)),
lr->lr_crtime, sizeof (uint64_t) * 2);
if (sa_lookup(zp->z_sa_hdl, SA_ZPL_RDEV(ZTOZSB(zp)), &lr->lr_rdev,
sizeof (lr->lr_rdev)) != 0)
lr->lr_rdev = 0;
/*
* Fill in xvattr info if any
*/
if (vap->va_mask & ATTR_XVATTR) {
zfs_log_xvattr((lr_attr_t *)((caddr_t)lr + lrsize), xvap);
end = (caddr_t)lr + lrsize + xvatsize;
} else {
end = (caddr_t)lr + lrsize;
}
/* Now fill in any ACL info */
if (vsecp) {
lracl = (lr_acl_create_t *)&itx->itx_lr;
lracl->lr_aclcnt = vsecp->vsa_aclcnt;
lracl->lr_acl_bytes = aclsize;
lracl->lr_domcnt = fuidp ? fuidp->z_domain_cnt : 0;
lracl->lr_fuidcnt = fuidp ? fuidp->z_fuid_cnt : 0;
if (vsecp->vsa_aclflags & VSA_ACE_ACLFLAGS)
lracl->lr_acl_flags = (uint64_t)vsecp->vsa_aclflags;
else
lracl->lr_acl_flags = 0;
bcopy(vsecp->vsa_aclentp, end, aclsize);
end = (caddr_t)end + ZIL_ACE_LENGTH(aclsize);
}
/* drop in FUID info */
if (fuidp) {
end = zfs_log_fuid_ids(fuidp, end);
end = zfs_log_fuid_domains(fuidp, end);
}
/*
* Now place file name in log record
*/
bcopy(name, end, namesize);
zil_itx_assign(zilog, itx, tx);
}
/*
* Handles both TX_REMOVE and TX_RMDIR transactions.
*/
void
zfs_log_remove(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
znode_t *dzp, const char *name, uint64_t foid, boolean_t unlinked)
{
itx_t *itx;
lr_remove_t *lr;
size_t namesize = strlen(name) + 1;
if (zil_replaying(zilog, tx) || zfs_xattr_owner_unlinked(dzp))
return;
itx = zil_itx_create(txtype, sizeof (*lr) + namesize);
lr = (lr_remove_t *)&itx->itx_lr;
lr->lr_doid = dzp->z_id;
bcopy(name, (char *)(lr + 1), namesize);
itx->itx_oid = foid;
/*
* Object ids can be re-instantiated in the next txg so
* remove any async transactions to avoid future leaks.
* This can happen if a fsync occurs on the re-instantiated
* object for a WR_INDIRECT or WR_NEED_COPY write, which gets
* the new file data and flushes a write record for the old object.
*/
if (unlinked) {
ASSERT((txtype & ~TX_CI) == TX_REMOVE);
zil_remove_async(zilog, foid);
}
zil_itx_assign(zilog, itx, tx);
}
/*
* Handles TX_LINK transactions.
*/
void
zfs_log_link(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
znode_t *dzp, znode_t *zp, const char *name)
{
itx_t *itx;
lr_link_t *lr;
size_t namesize = strlen(name) + 1;
if (zil_replaying(zilog, tx))
return;
itx = zil_itx_create(txtype, sizeof (*lr) + namesize);
lr = (lr_link_t *)&itx->itx_lr;
lr->lr_doid = dzp->z_id;
lr->lr_link_obj = zp->z_id;
bcopy(name, (char *)(lr + 1), namesize);
zil_itx_assign(zilog, itx, tx);
}
/*
* Handles TX_SYMLINK transactions.
*/
void
zfs_log_symlink(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
znode_t *dzp, znode_t *zp, const char *name, const char *link)
{
itx_t *itx;
lr_create_t *lr;
size_t namesize = strlen(name) + 1;
size_t linksize = strlen(link) + 1;
if (zil_replaying(zilog, tx))
return;
itx = zil_itx_create(txtype, sizeof (*lr) + namesize + linksize);
lr = (lr_create_t *)&itx->itx_lr;
lr->lr_doid = dzp->z_id;
lr->lr_foid = zp->z_id;
lr->lr_uid = KUID_TO_SUID(ZTOUID(zp));
lr->lr_gid = KGID_TO_SGID(ZTOGID(zp));
lr->lr_mode = zp->z_mode;
(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(ZTOZSB(zp)), &lr->lr_gen,
sizeof (uint64_t));
(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(ZTOZSB(zp)),
lr->lr_crtime, sizeof (uint64_t) * 2);
bcopy(name, (char *)(lr + 1), namesize);
bcopy(link, (char *)(lr + 1) + namesize, linksize);
zil_itx_assign(zilog, itx, tx);
}
/*
* Handles TX_RENAME transactions.
*/
void
zfs_log_rename(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype, znode_t *sdzp,
const char *sname, znode_t *tdzp, const char *dname, znode_t *szp)
{
itx_t *itx;
lr_rename_t *lr;
size_t snamesize = strlen(sname) + 1;
size_t dnamesize = strlen(dname) + 1;
if (zil_replaying(zilog, tx))
return;
itx = zil_itx_create(txtype, sizeof (*lr) + snamesize + dnamesize);
lr = (lr_rename_t *)&itx->itx_lr;
lr->lr_sdoid = sdzp->z_id;
lr->lr_tdoid = tdzp->z_id;
bcopy(sname, (char *)(lr + 1), snamesize);
bcopy(dname, (char *)(lr + 1) + snamesize, dnamesize);
itx->itx_oid = szp->z_id;
zil_itx_assign(zilog, itx, tx);
}
/*
* zfs_log_write() handles TX_WRITE transactions. The specified callback is
* called as soon as the write is on stable storage (be it via a DMU sync or a
* ZIL commit).
*/
long zfs_immediate_write_sz = 32768;
void
zfs_log_write(zilog_t *zilog, dmu_tx_t *tx, int txtype,
znode_t *zp, offset_t off, ssize_t resid, int ioflag,
zil_callback_t callback, void *callback_data)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)sa_get_db(zp->z_sa_hdl);
uint32_t blocksize = zp->z_blksz;
itx_wr_state_t write_state;
uintptr_t fsync_cnt;
uint64_t gen = 0;
ssize_t size = resid;
if (zil_replaying(zilog, tx) || zp->z_unlinked ||
zfs_xattr_owner_unlinked(zp)) {
if (callback != NULL)
callback(callback_data);
return;
}
if (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
write_state = WR_INDIRECT;
else if (!spa_has_slogs(zilog->zl_spa) &&
resid >= zfs_immediate_write_sz)
write_state = WR_INDIRECT;
else if (ioflag & (O_SYNC | O_DSYNC))
write_state = WR_COPIED;
else
write_state = WR_NEED_COPY;
if ((fsync_cnt = (uintptr_t)tsd_get(zfs_fsyncer_key)) != 0) {
(void) tsd_set(zfs_fsyncer_key, (void *)(fsync_cnt - 1));
}
(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(ZTOZSB(zp)), &gen,
sizeof (gen));
while (resid) {
itx_t *itx;
lr_write_t *lr;
itx_wr_state_t wr_state = write_state;
ssize_t len = resid;
/*
* A WR_COPIED record must fit entirely in one log block.
* Large writes can use WR_NEED_COPY, which the ZIL will
* split into multiple records across several log blocks
* if necessary.
*/
if (wr_state == WR_COPIED &&
resid > zil_max_copied_data(zilog))
wr_state = WR_NEED_COPY;
else if (wr_state == WR_INDIRECT)
len = MIN(blocksize - P2PHASE(off, blocksize), resid);
itx = zil_itx_create(txtype, sizeof (*lr) +
(wr_state == WR_COPIED ? len : 0));
lr = (lr_write_t *)&itx->itx_lr;
/*
* For WR_COPIED records, copy the data into the lr_write_t.
*/
if (wr_state == WR_COPIED) {
int err;
DB_DNODE_ENTER(db);
err = dmu_read_by_dnode(DB_DNODE(db), off, len, lr + 1,
DMU_READ_NO_PREFETCH);
if (err != 0) {
zil_itx_destroy(itx);
itx = zil_itx_create(txtype, sizeof (*lr));
lr = (lr_write_t *)&itx->itx_lr;
wr_state = WR_NEED_COPY;
}
DB_DNODE_EXIT(db);
}
itx->itx_wr_state = wr_state;
lr->lr_foid = zp->z_id;
lr->lr_offset = off;
lr->lr_length = len;
lr->lr_blkoff = 0;
BP_ZERO(&lr->lr_blkptr);
itx->itx_private = ZTOZSB(zp);
itx->itx_gen = gen;
if (!(ioflag & (O_SYNC | O_DSYNC)) && (zp->z_sync_cnt == 0) &&
(fsync_cnt == 0))
itx->itx_sync = B_FALSE;
itx->itx_callback = callback;
itx->itx_callback_data = callback_data;
zil_itx_assign(zilog, itx, tx);
off += len;
resid -= len;
}
if (write_state == WR_COPIED || write_state == WR_NEED_COPY) {
dsl_pool_wrlog_count(zilog->zl_dmu_pool, size, tx->tx_txg);
}
}
/*
* Handles TX_TRUNCATE transactions.
*/
void
zfs_log_truncate(zilog_t *zilog, dmu_tx_t *tx, int txtype,
znode_t *zp, uint64_t off, uint64_t len)
{
itx_t *itx;
lr_truncate_t *lr;
if (zil_replaying(zilog, tx) || zp->z_unlinked ||
zfs_xattr_owner_unlinked(zp))
return;
itx = zil_itx_create(txtype, sizeof (*lr));
lr = (lr_truncate_t *)&itx->itx_lr;
lr->lr_foid = zp->z_id;
lr->lr_offset = off;
lr->lr_length = len;
itx->itx_sync = (zp->z_sync_cnt != 0);
zil_itx_assign(zilog, itx, tx);
}
/*
* Handles TX_SETATTR transactions.
*/
void
zfs_log_setattr(zilog_t *zilog, dmu_tx_t *tx, int txtype,
znode_t *zp, vattr_t *vap, uint_t mask_applied, zfs_fuid_info_t *fuidp)
{
itx_t *itx;
lr_setattr_t *lr;
xvattr_t *xvap = (xvattr_t *)vap;
size_t recsize = sizeof (lr_setattr_t);
void *start;
if (zil_replaying(zilog, tx) || zp->z_unlinked)
return;
/*
* If XVATTR set, then log record size needs to allow
* for lr_attr_t + xvattr mask, mapsize and create time
* plus actual attribute values
*/
if (vap->va_mask & ATTR_XVATTR)
recsize = sizeof (*lr) + ZIL_XVAT_SIZE(xvap->xva_mapsize);
if (fuidp)
recsize += fuidp->z_domain_str_sz;
itx = zil_itx_create(txtype, recsize);
lr = (lr_setattr_t *)&itx->itx_lr;
lr->lr_foid = zp->z_id;
lr->lr_mask = (uint64_t)mask_applied;
lr->lr_mode = (uint64_t)vap->va_mode;
if ((mask_applied & ATTR_UID) && IS_EPHEMERAL(vap->va_uid))
lr->lr_uid = fuidp->z_fuid_owner;
else
lr->lr_uid = (uint64_t)vap->va_uid;
if ((mask_applied & ATTR_GID) && IS_EPHEMERAL(vap->va_gid))
lr->lr_gid = fuidp->z_fuid_group;
else
lr->lr_gid = (uint64_t)vap->va_gid;
lr->lr_size = (uint64_t)vap->va_size;
ZFS_TIME_ENCODE(&vap->va_atime, lr->lr_atime);
ZFS_TIME_ENCODE(&vap->va_mtime, lr->lr_mtime);
start = (lr_setattr_t *)(lr + 1);
if (vap->va_mask & ATTR_XVATTR) {
zfs_log_xvattr((lr_attr_t *)start, xvap);
start = (caddr_t)start + ZIL_XVAT_SIZE(xvap->xva_mapsize);
}
/*
* Now stick on domain information if any on end
*/
if (fuidp)
(void) zfs_log_fuid_domains(fuidp, start);
itx->itx_sync = (zp->z_sync_cnt != 0);
zil_itx_assign(zilog, itx, tx);
}
/*
* Handles TX_ACL transactions.
*/
void
zfs_log_acl(zilog_t *zilog, dmu_tx_t *tx, znode_t *zp,
vsecattr_t *vsecp, zfs_fuid_info_t *fuidp)
{
itx_t *itx;
lr_acl_v0_t *lrv0;
lr_acl_t *lr;
int txtype;
int lrsize;
size_t txsize;
size_t aclbytes = vsecp->vsa_aclentsz;
if (zil_replaying(zilog, tx) || zp->z_unlinked)
return;
txtype = (ZTOZSB(zp)->z_version < ZPL_VERSION_FUID) ?
TX_ACL_V0 : TX_ACL;
if (txtype == TX_ACL)
lrsize = sizeof (*lr);
else
lrsize = sizeof (*lrv0);
txsize = lrsize +
((txtype == TX_ACL) ? ZIL_ACE_LENGTH(aclbytes) : aclbytes) +
(fuidp ? fuidp->z_domain_str_sz : 0) +
sizeof (uint64_t) * (fuidp ? fuidp->z_fuid_cnt : 0);
itx = zil_itx_create(txtype, txsize);
lr = (lr_acl_t *)&itx->itx_lr;
lr->lr_foid = zp->z_id;
if (txtype == TX_ACL) {
lr->lr_acl_bytes = aclbytes;
lr->lr_domcnt = fuidp ? fuidp->z_domain_cnt : 0;
lr->lr_fuidcnt = fuidp ? fuidp->z_fuid_cnt : 0;
if (vsecp->vsa_mask & VSA_ACE_ACLFLAGS)
lr->lr_acl_flags = (uint64_t)vsecp->vsa_aclflags;
else
lr->lr_acl_flags = 0;
}
lr->lr_aclcnt = (uint64_t)vsecp->vsa_aclcnt;
if (txtype == TX_ACL_V0) {
lrv0 = (lr_acl_v0_t *)lr;
bcopy(vsecp->vsa_aclentp, (ace_t *)(lrv0 + 1), aclbytes);
} else {
void *start = (ace_t *)(lr + 1);
bcopy(vsecp->vsa_aclentp, start, aclbytes);
start = (caddr_t)start + ZIL_ACE_LENGTH(aclbytes);
if (fuidp) {
start = zfs_log_fuid_ids(fuidp, start);
(void) zfs_log_fuid_domains(fuidp, start);
}
}
itx->itx_sync = (zp->z_sync_cnt != 0);
zil_itx_assign(zilog, itx, tx);
}
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs, zfs_, immediate_write_sz, LONG, ZMOD_RW,
"Largest data block to write to zil");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/zfs_replay.c b/sys/contrib/openzfs/module/zfs/zfs_replay.c
index cba5e8c9cd0b..9073888dbab6 100644
--- a/sys/contrib/openzfs/module/zfs/zfs_replay.c
+++ b/sys/contrib/openzfs/module/zfs/zfs_replay.c
@@ -1,992 +1,992 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 Cyril Plisko. All rights reserved.
* Copyright (c) 2013, 2017 by Delphix. All rights reserved.
*/
#include <sys/types.h>
#include <sys/param.h>
#include <sys/sysmacros.h>
#include <sys/cmn_err.h>
#include <sys/kmem.h>
#include <sys/thread.h>
#include <sys/file.h>
#include <sys/fcntl.h>
#include <sys/vfs.h>
#include <sys/fs/zfs.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_dir.h>
#include <sys/zfs_acl.h>
#include <sys/zfs_fuid.h>
#include <sys/zfs_vnops.h>
#include <sys/spa.h>
#include <sys/zil.h>
#include <sys/byteorder.h>
#include <sys/stat.h>
#include <sys/acl.h>
#include <sys/atomic.h>
#include <sys/cred.h>
#include <sys/zpl.h>
/*
* NB: FreeBSD expects to be able to do vnode locking in lookup and
* hold the locks across all subsequent VOPs until vput is called.
* This means that its zfs vnops routines can't do any internal locking.
* In order to have the same contract as the Linux vnops there would
* needed to be duplicate locked vnops. If the vnops were used more widely
* in common code this would likely be preferable. However, currently
* this is the only file where this is the case.
*/
/*
* Functions to replay ZFS intent log (ZIL) records
* The functions are called through a function vector (zfs_replay_vector)
* which is indexed by the transaction type.
*/
static void
zfs_init_vattr(vattr_t *vap, uint64_t mask, uint64_t mode,
uint64_t uid, uint64_t gid, uint64_t rdev, uint64_t nodeid)
{
bzero(vap, sizeof (*vap));
vap->va_mask = (uint_t)mask;
vap->va_mode = mode;
-#ifdef __FreeBSD__
+#if defined(__FreeBSD__) || defined(__APPLE__)
vap->va_type = IFTOVT(mode);
#endif
vap->va_uid = (uid_t)(IS_EPHEMERAL(uid)) ? -1 : uid;
vap->va_gid = (gid_t)(IS_EPHEMERAL(gid)) ? -1 : gid;
vap->va_rdev = zfs_cmpldev(rdev);
vap->va_nodeid = nodeid;
}
/* ARGSUSED */
static int
zfs_replay_error(void *arg1, void *arg2, boolean_t byteswap)
{
return (SET_ERROR(ENOTSUP));
}
static void
zfs_replay_xvattr(lr_attr_t *lrattr, xvattr_t *xvap)
{
xoptattr_t *xoap = NULL;
uint64_t *attrs;
uint64_t *crtime;
uint32_t *bitmap;
void *scanstamp;
int i;
xvap->xva_vattr.va_mask |= ATTR_XVATTR;
if ((xoap = xva_getxoptattr(xvap)) == NULL) {
xvap->xva_vattr.va_mask &= ~ATTR_XVATTR; /* shouldn't happen */
return;
}
ASSERT(lrattr->lr_attr_masksize == xvap->xva_mapsize);
bitmap = &lrattr->lr_attr_bitmap;
for (i = 0; i != lrattr->lr_attr_masksize; i++, bitmap++)
xvap->xva_reqattrmap[i] = *bitmap;
attrs = (uint64_t *)(lrattr + lrattr->lr_attr_masksize - 1);
crtime = attrs + 1;
scanstamp = (caddr_t)(crtime + 2);
if (XVA_ISSET_REQ(xvap, XAT_HIDDEN))
xoap->xoa_hidden = ((*attrs & XAT0_HIDDEN) != 0);
if (XVA_ISSET_REQ(xvap, XAT_SYSTEM))
xoap->xoa_system = ((*attrs & XAT0_SYSTEM) != 0);
if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE))
xoap->xoa_archive = ((*attrs & XAT0_ARCHIVE) != 0);
if (XVA_ISSET_REQ(xvap, XAT_READONLY))
xoap->xoa_readonly = ((*attrs & XAT0_READONLY) != 0);
if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE))
xoap->xoa_immutable = ((*attrs & XAT0_IMMUTABLE) != 0);
if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK))
xoap->xoa_nounlink = ((*attrs & XAT0_NOUNLINK) != 0);
if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY))
xoap->xoa_appendonly = ((*attrs & XAT0_APPENDONLY) != 0);
if (XVA_ISSET_REQ(xvap, XAT_NODUMP))
xoap->xoa_nodump = ((*attrs & XAT0_NODUMP) != 0);
if (XVA_ISSET_REQ(xvap, XAT_OPAQUE))
xoap->xoa_opaque = ((*attrs & XAT0_OPAQUE) != 0);
if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED))
xoap->xoa_av_modified = ((*attrs & XAT0_AV_MODIFIED) != 0);
if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED))
xoap->xoa_av_quarantined =
((*attrs & XAT0_AV_QUARANTINED) != 0);
if (XVA_ISSET_REQ(xvap, XAT_CREATETIME))
ZFS_TIME_DECODE(&xoap->xoa_createtime, crtime);
if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) {
ASSERT(!XVA_ISSET_REQ(xvap, XAT_PROJID));
bcopy(scanstamp, xoap->xoa_av_scanstamp, AV_SCANSTAMP_SZ);
} else if (XVA_ISSET_REQ(xvap, XAT_PROJID)) {
/*
* XAT_PROJID and XAT_AV_SCANSTAMP will never be valid
* at the same time, so we can share the same space.
*/
bcopy(scanstamp, &xoap->xoa_projid, sizeof (uint64_t));
}
if (XVA_ISSET_REQ(xvap, XAT_REPARSE))
xoap->xoa_reparse = ((*attrs & XAT0_REPARSE) != 0);
if (XVA_ISSET_REQ(xvap, XAT_OFFLINE))
xoap->xoa_offline = ((*attrs & XAT0_OFFLINE) != 0);
if (XVA_ISSET_REQ(xvap, XAT_SPARSE))
xoap->xoa_sparse = ((*attrs & XAT0_SPARSE) != 0);
if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT))
xoap->xoa_projinherit = ((*attrs & XAT0_PROJINHERIT) != 0);
}
static int
zfs_replay_domain_cnt(uint64_t uid, uint64_t gid)
{
uint64_t uid_idx;
uint64_t gid_idx;
int domcnt = 0;
uid_idx = FUID_INDEX(uid);
gid_idx = FUID_INDEX(gid);
if (uid_idx)
domcnt++;
if (gid_idx > 0 && gid_idx != uid_idx)
domcnt++;
return (domcnt);
}
static void *
zfs_replay_fuid_domain_common(zfs_fuid_info_t *fuid_infop, void *start,
int domcnt)
{
int i;
for (i = 0; i != domcnt; i++) {
fuid_infop->z_domain_table[i] = start;
start = (caddr_t)start + strlen(start) + 1;
}
return (start);
}
/*
* Set the uid/gid in the fuid_info structure.
*/
static void
zfs_replay_fuid_ugid(zfs_fuid_info_t *fuid_infop, uint64_t uid, uint64_t gid)
{
/*
* If owner or group are log specific FUIDs then slurp up
* domain information and build zfs_fuid_info_t
*/
if (IS_EPHEMERAL(uid))
fuid_infop->z_fuid_owner = uid;
if (IS_EPHEMERAL(gid))
fuid_infop->z_fuid_group = gid;
}
/*
* Load fuid domains into fuid_info_t
*/
static zfs_fuid_info_t *
zfs_replay_fuid_domain(void *buf, void **end, uint64_t uid, uint64_t gid)
{
int domcnt;
zfs_fuid_info_t *fuid_infop;
fuid_infop = zfs_fuid_info_alloc();
domcnt = zfs_replay_domain_cnt(uid, gid);
if (domcnt == 0)
return (fuid_infop);
fuid_infop->z_domain_table =
kmem_zalloc(domcnt * sizeof (char *), KM_SLEEP);
zfs_replay_fuid_ugid(fuid_infop, uid, gid);
fuid_infop->z_domain_cnt = domcnt;
*end = zfs_replay_fuid_domain_common(fuid_infop, buf, domcnt);
return (fuid_infop);
}
/*
* load zfs_fuid_t's and fuid_domains into fuid_info_t
*/
static zfs_fuid_info_t *
zfs_replay_fuids(void *start, void **end, int idcnt, int domcnt, uint64_t uid,
uint64_t gid)
{
uint64_t *log_fuid = (uint64_t *)start;
zfs_fuid_info_t *fuid_infop;
int i;
fuid_infop = zfs_fuid_info_alloc();
fuid_infop->z_domain_cnt = domcnt;
fuid_infop->z_domain_table =
kmem_zalloc(domcnt * sizeof (char *), KM_SLEEP);
for (i = 0; i != idcnt; i++) {
zfs_fuid_t *zfuid;
zfuid = kmem_alloc(sizeof (zfs_fuid_t), KM_SLEEP);
zfuid->z_logfuid = *log_fuid;
zfuid->z_id = -1;
zfuid->z_domidx = 0;
list_insert_tail(&fuid_infop->z_fuids, zfuid);
log_fuid++;
}
zfs_replay_fuid_ugid(fuid_infop, uid, gid);
*end = zfs_replay_fuid_domain_common(fuid_infop, log_fuid, domcnt);
return (fuid_infop);
}
static void
zfs_replay_swap_attrs(lr_attr_t *lrattr)
{
/* swap the lr_attr structure */
byteswap_uint32_array(lrattr, sizeof (*lrattr));
/* swap the bitmap */
byteswap_uint32_array(lrattr + 1, (lrattr->lr_attr_masksize - 1) *
sizeof (uint32_t));
/* swap the attributes, create time + 64 bit word for attributes */
byteswap_uint64_array((caddr_t)(lrattr + 1) + (sizeof (uint32_t) *
(lrattr->lr_attr_masksize - 1)), 3 * sizeof (uint64_t));
}
/*
* Replay file create with optional ACL, xvattr information as well
* as option FUID information.
*/
static int
zfs_replay_create_acl(void *arg1, void *arg2, boolean_t byteswap)
{
zfsvfs_t *zfsvfs = arg1;
lr_acl_create_t *lracl = arg2;
char *name = NULL; /* location determined later */
lr_create_t *lr = (lr_create_t *)lracl;
znode_t *dzp;
znode_t *zp;
xvattr_t xva;
int vflg = 0;
vsecattr_t vsec = { 0 };
lr_attr_t *lrattr;
void *aclstart;
void *fuidstart;
size_t xvatlen = 0;
uint64_t txtype;
uint64_t objid;
uint64_t dnodesize;
int error;
txtype = (lr->lr_common.lrc_txtype & ~TX_CI);
if (byteswap) {
byteswap_uint64_array(lracl, sizeof (*lracl));
if (txtype == TX_CREATE_ACL_ATTR ||
txtype == TX_MKDIR_ACL_ATTR) {
lrattr = (lr_attr_t *)(caddr_t)(lracl + 1);
zfs_replay_swap_attrs(lrattr);
xvatlen = ZIL_XVAT_SIZE(lrattr->lr_attr_masksize);
}
aclstart = (caddr_t)(lracl + 1) + xvatlen;
zfs_ace_byteswap(aclstart, lracl->lr_acl_bytes, B_FALSE);
/* swap fuids */
if (lracl->lr_fuidcnt) {
byteswap_uint64_array((caddr_t)aclstart +
ZIL_ACE_LENGTH(lracl->lr_acl_bytes),
lracl->lr_fuidcnt * sizeof (uint64_t));
}
}
if ((error = zfs_zget(zfsvfs, lr->lr_doid, &dzp)) != 0)
return (error);
objid = LR_FOID_GET_OBJ(lr->lr_foid);
dnodesize = LR_FOID_GET_SLOTS(lr->lr_foid) << DNODE_SHIFT;
xva_init(&xva);
zfs_init_vattr(&xva.xva_vattr, ATTR_MODE | ATTR_UID | ATTR_GID,
lr->lr_mode, lr->lr_uid, lr->lr_gid, lr->lr_rdev, objid);
/*
* All forms of zfs create (create, mkdir, mkxattrdir, symlink)
* eventually end up in zfs_mknode(), which assigns the object's
* creation time, generation number, and dnode size. The generic
* zfs_create() has no concept of these attributes, so we smuggle
* the values inside the vattr's otherwise unused va_ctime,
* va_nblocks, and va_fsid fields.
*/
ZFS_TIME_DECODE(&xva.xva_vattr.va_ctime, lr->lr_crtime);
xva.xva_vattr.va_nblocks = lr->lr_gen;
xva.xva_vattr.va_fsid = dnodesize;
error = dnode_try_claim(zfsvfs->z_os, objid, dnodesize >> DNODE_SHIFT);
if (error)
goto bail;
if (lr->lr_common.lrc_txtype & TX_CI)
vflg |= FIGNORECASE;
switch (txtype) {
case TX_CREATE_ACL:
aclstart = (caddr_t)(lracl + 1);
fuidstart = (caddr_t)aclstart +
ZIL_ACE_LENGTH(lracl->lr_acl_bytes);
zfsvfs->z_fuid_replay = zfs_replay_fuids(fuidstart,
(void *)&name, lracl->lr_fuidcnt, lracl->lr_domcnt,
lr->lr_uid, lr->lr_gid);
- /*FALLTHROUGH*/
+ /* FALLTHROUGH */
case TX_CREATE_ACL_ATTR:
if (name == NULL) {
lrattr = (lr_attr_t *)(caddr_t)(lracl + 1);
xvatlen = ZIL_XVAT_SIZE(lrattr->lr_attr_masksize);
xva.xva_vattr.va_mask |= ATTR_XVATTR;
zfs_replay_xvattr(lrattr, &xva);
}
vsec.vsa_mask = VSA_ACE | VSA_ACE_ACLFLAGS;
vsec.vsa_aclentp = (caddr_t)(lracl + 1) + xvatlen;
vsec.vsa_aclcnt = lracl->lr_aclcnt;
vsec.vsa_aclentsz = lracl->lr_acl_bytes;
vsec.vsa_aclflags = lracl->lr_acl_flags;
if (zfsvfs->z_fuid_replay == NULL) {
fuidstart = (caddr_t)(lracl + 1) + xvatlen +
ZIL_ACE_LENGTH(lracl->lr_acl_bytes);
zfsvfs->z_fuid_replay =
zfs_replay_fuids(fuidstart,
(void *)&name, lracl->lr_fuidcnt, lracl->lr_domcnt,
lr->lr_uid, lr->lr_gid);
}
error = zfs_create(dzp, name, &xva.xva_vattr,
0, 0, &zp, kcred, vflg, &vsec);
break;
case TX_MKDIR_ACL:
aclstart = (caddr_t)(lracl + 1);
fuidstart = (caddr_t)aclstart +
ZIL_ACE_LENGTH(lracl->lr_acl_bytes);
zfsvfs->z_fuid_replay = zfs_replay_fuids(fuidstart,
(void *)&name, lracl->lr_fuidcnt, lracl->lr_domcnt,
lr->lr_uid, lr->lr_gid);
- /*FALLTHROUGH*/
+ /* FALLTHROUGH */
case TX_MKDIR_ACL_ATTR:
if (name == NULL) {
lrattr = (lr_attr_t *)(caddr_t)(lracl + 1);
xvatlen = ZIL_XVAT_SIZE(lrattr->lr_attr_masksize);
zfs_replay_xvattr(lrattr, &xva);
}
vsec.vsa_mask = VSA_ACE | VSA_ACE_ACLFLAGS;
vsec.vsa_aclentp = (caddr_t)(lracl + 1) + xvatlen;
vsec.vsa_aclcnt = lracl->lr_aclcnt;
vsec.vsa_aclentsz = lracl->lr_acl_bytes;
vsec.vsa_aclflags = lracl->lr_acl_flags;
if (zfsvfs->z_fuid_replay == NULL) {
fuidstart = (caddr_t)(lracl + 1) + xvatlen +
ZIL_ACE_LENGTH(lracl->lr_acl_bytes);
zfsvfs->z_fuid_replay =
zfs_replay_fuids(fuidstart,
(void *)&name, lracl->lr_fuidcnt, lracl->lr_domcnt,
lr->lr_uid, lr->lr_gid);
}
error = zfs_mkdir(dzp, name, &xva.xva_vattr,
&zp, kcred, vflg, &vsec);
break;
default:
error = SET_ERROR(ENOTSUP);
}
bail:
if (error == 0 && zp != NULL) {
#ifdef __FreeBSD__
VOP_UNLOCK1(ZTOV(zp));
#endif
zrele(zp);
}
zrele(dzp);
if (zfsvfs->z_fuid_replay)
zfs_fuid_info_free(zfsvfs->z_fuid_replay);
zfsvfs->z_fuid_replay = NULL;
return (error);
}
static int
zfs_replay_create(void *arg1, void *arg2, boolean_t byteswap)
{
zfsvfs_t *zfsvfs = arg1;
lr_create_t *lr = arg2;
char *name = NULL; /* location determined later */
char *link; /* symlink content follows name */
znode_t *dzp;
znode_t *zp = NULL;
xvattr_t xva;
int vflg = 0;
size_t lrsize = sizeof (lr_create_t);
lr_attr_t *lrattr;
void *start;
size_t xvatlen;
uint64_t txtype;
uint64_t objid;
uint64_t dnodesize;
int error;
txtype = (lr->lr_common.lrc_txtype & ~TX_CI);
if (byteswap) {
byteswap_uint64_array(lr, sizeof (*lr));
if (txtype == TX_CREATE_ATTR || txtype == TX_MKDIR_ATTR)
zfs_replay_swap_attrs((lr_attr_t *)(lr + 1));
}
if ((error = zfs_zget(zfsvfs, lr->lr_doid, &dzp)) != 0)
return (error);
objid = LR_FOID_GET_OBJ(lr->lr_foid);
dnodesize = LR_FOID_GET_SLOTS(lr->lr_foid) << DNODE_SHIFT;
xva_init(&xva);
zfs_init_vattr(&xva.xva_vattr, ATTR_MODE | ATTR_UID | ATTR_GID,
lr->lr_mode, lr->lr_uid, lr->lr_gid, lr->lr_rdev, objid);
/*
* All forms of zfs create (create, mkdir, mkxattrdir, symlink)
* eventually end up in zfs_mknode(), which assigns the object's
* creation time, generation number, and dnode slot count. The
* generic zfs_create() has no concept of these attributes, so
* we smuggle the values inside the vattr's otherwise unused
* va_ctime, va_nblocks, and va_fsid fields.
*/
ZFS_TIME_DECODE(&xva.xva_vattr.va_ctime, lr->lr_crtime);
xva.xva_vattr.va_nblocks = lr->lr_gen;
xva.xva_vattr.va_fsid = dnodesize;
error = dnode_try_claim(zfsvfs->z_os, objid, dnodesize >> DNODE_SHIFT);
if (error)
goto out;
if (lr->lr_common.lrc_txtype & TX_CI)
vflg |= FIGNORECASE;
/*
* Symlinks don't have fuid info, and CIFS never creates
* symlinks.
*
* The _ATTR versions will grab the fuid info in their subcases.
*/
if ((int)lr->lr_common.lrc_txtype != TX_SYMLINK &&
(int)lr->lr_common.lrc_txtype != TX_MKDIR_ATTR &&
(int)lr->lr_common.lrc_txtype != TX_CREATE_ATTR) {
start = (lr + 1);
zfsvfs->z_fuid_replay =
zfs_replay_fuid_domain(start, &start,
lr->lr_uid, lr->lr_gid);
}
switch (txtype) {
case TX_CREATE_ATTR:
lrattr = (lr_attr_t *)(caddr_t)(lr + 1);
xvatlen = ZIL_XVAT_SIZE(lrattr->lr_attr_masksize);
zfs_replay_xvattr((lr_attr_t *)((caddr_t)lr + lrsize), &xva);
start = (caddr_t)(lr + 1) + xvatlen;
zfsvfs->z_fuid_replay =
zfs_replay_fuid_domain(start, &start,
lr->lr_uid, lr->lr_gid);
name = (char *)start;
+ /* FALLTHROUGH */
- /*FALLTHROUGH*/
case TX_CREATE:
if (name == NULL)
name = (char *)start;
error = zfs_create(dzp, name, &xva.xva_vattr,
0, 0, &zp, kcred, vflg, NULL);
break;
case TX_MKDIR_ATTR:
lrattr = (lr_attr_t *)(caddr_t)(lr + 1);
xvatlen = ZIL_XVAT_SIZE(lrattr->lr_attr_masksize);
zfs_replay_xvattr((lr_attr_t *)((caddr_t)lr + lrsize), &xva);
start = (caddr_t)(lr + 1) + xvatlen;
zfsvfs->z_fuid_replay =
zfs_replay_fuid_domain(start, &start,
lr->lr_uid, lr->lr_gid);
name = (char *)start;
+ /* FALLTHROUGH */
- /*FALLTHROUGH*/
case TX_MKDIR:
if (name == NULL)
name = (char *)(lr + 1);
error = zfs_mkdir(dzp, name, &xva.xva_vattr,
&zp, kcred, vflg, NULL);
break;
case TX_MKXATTR:
error = zfs_make_xattrdir(dzp, &xva.xva_vattr, &zp, kcred);
break;
case TX_SYMLINK:
name = (char *)(lr + 1);
link = name + strlen(name) + 1;
error = zfs_symlink(dzp, name, &xva.xva_vattr,
link, &zp, kcred, vflg);
break;
default:
error = SET_ERROR(ENOTSUP);
}
out:
if (error == 0 && zp != NULL) {
#ifdef __FreeBSD__
VOP_UNLOCK1(ZTOV(zp));
#endif
zrele(zp);
}
zrele(dzp);
if (zfsvfs->z_fuid_replay)
zfs_fuid_info_free(zfsvfs->z_fuid_replay);
zfsvfs->z_fuid_replay = NULL;
return (error);
}
static int
zfs_replay_remove(void *arg1, void *arg2, boolean_t byteswap)
{
zfsvfs_t *zfsvfs = arg1;
lr_remove_t *lr = arg2;
char *name = (char *)(lr + 1); /* name follows lr_remove_t */
znode_t *dzp;
int error;
int vflg = 0;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
if ((error = zfs_zget(zfsvfs, lr->lr_doid, &dzp)) != 0)
return (error);
if (lr->lr_common.lrc_txtype & TX_CI)
vflg |= FIGNORECASE;
switch ((int)lr->lr_common.lrc_txtype) {
case TX_REMOVE:
error = zfs_remove(dzp, name, kcred, vflg);
break;
case TX_RMDIR:
error = zfs_rmdir(dzp, name, NULL, kcred, vflg);
break;
default:
error = SET_ERROR(ENOTSUP);
}
zrele(dzp);
return (error);
}
static int
zfs_replay_link(void *arg1, void *arg2, boolean_t byteswap)
{
zfsvfs_t *zfsvfs = arg1;
lr_link_t *lr = arg2;
char *name = (char *)(lr + 1); /* name follows lr_link_t */
znode_t *dzp, *zp;
int error;
int vflg = 0;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
if ((error = zfs_zget(zfsvfs, lr->lr_doid, &dzp)) != 0)
return (error);
if ((error = zfs_zget(zfsvfs, lr->lr_link_obj, &zp)) != 0) {
zrele(dzp);
return (error);
}
if (lr->lr_common.lrc_txtype & TX_CI)
vflg |= FIGNORECASE;
error = zfs_link(dzp, zp, name, kcred, vflg);
zrele(zp);
zrele(dzp);
return (error);
}
static int
zfs_replay_rename(void *arg1, void *arg2, boolean_t byteswap)
{
zfsvfs_t *zfsvfs = arg1;
lr_rename_t *lr = arg2;
char *sname = (char *)(lr + 1); /* sname and tname follow lr_rename_t */
char *tname = sname + strlen(sname) + 1;
znode_t *sdzp, *tdzp;
int error;
int vflg = 0;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
if ((error = zfs_zget(zfsvfs, lr->lr_sdoid, &sdzp)) != 0)
return (error);
if ((error = zfs_zget(zfsvfs, lr->lr_tdoid, &tdzp)) != 0) {
zrele(sdzp);
return (error);
}
if (lr->lr_common.lrc_txtype & TX_CI)
vflg |= FIGNORECASE;
error = zfs_rename(sdzp, sname, tdzp, tname, kcred, vflg);
zrele(tdzp);
zrele(sdzp);
return (error);
}
static int
zfs_replay_write(void *arg1, void *arg2, boolean_t byteswap)
{
zfsvfs_t *zfsvfs = arg1;
lr_write_t *lr = arg2;
char *data = (char *)(lr + 1); /* data follows lr_write_t */
znode_t *zp;
int error;
uint64_t eod, offset, length;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0) {
/*
* As we can log writes out of order, it's possible the
* file has been removed. In this case just drop the write
* and return success.
*/
if (error == ENOENT)
error = 0;
return (error);
}
offset = lr->lr_offset;
length = lr->lr_length;
eod = offset + length; /* end of data for this write */
/*
* This may be a write from a dmu_sync() for a whole block,
* and may extend beyond the current end of the file.
* We can't just replay what was written for this TX_WRITE as
* a future TX_WRITE2 may extend the eof and the data for that
* write needs to be there. So we write the whole block and
* reduce the eof. This needs to be done within the single dmu
* transaction created within vn_rdwr -> zfs_write. So a possible
* new end of file is passed through in zfsvfs->z_replay_eof
*/
zfsvfs->z_replay_eof = 0; /* 0 means don't change end of file */
/* If it's a dmu_sync() block, write the whole block */
if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
if (length < blocksize) {
offset -= offset % blocksize;
length = blocksize;
}
if (zp->z_size < eod)
zfsvfs->z_replay_eof = eod;
}
error = zfs_write_simple(zp, data, length, offset, NULL);
zrele(zp);
zfsvfs->z_replay_eof = 0; /* safety */
return (error);
}
/*
* TX_WRITE2 are only generated when dmu_sync() returns EALREADY
* meaning the pool block is already being synced. So now that we always write
* out full blocks, all we have to do is expand the eof if
* the file is grown.
*/
static int
zfs_replay_write2(void *arg1, void *arg2, boolean_t byteswap)
{
zfsvfs_t *zfsvfs = arg1;
lr_write_t *lr = arg2;
znode_t *zp;
int error;
uint64_t end;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0)
return (error);
top:
end = lr->lr_offset + lr->lr_length;
if (end > zp->z_size) {
dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
zp->z_size = end;
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
zrele(zp);
if (error == ERESTART) {
dmu_tx_wait(tx);
dmu_tx_abort(tx);
goto top;
}
dmu_tx_abort(tx);
return (error);
}
(void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
(void *)&zp->z_size, sizeof (uint64_t), tx);
/* Ensure the replayed seq is updated */
(void) zil_replaying(zfsvfs->z_log, tx);
dmu_tx_commit(tx);
}
zrele(zp);
return (error);
}
static int
zfs_replay_truncate(void *arg1, void *arg2, boolean_t byteswap)
{
zfsvfs_t *zfsvfs = arg1;
lr_truncate_t *lr = arg2;
znode_t *zp;
flock64_t fl;
int error;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0)
return (error);
bzero(&fl, sizeof (fl));
fl.l_type = F_WRLCK;
fl.l_whence = SEEK_SET;
fl.l_start = lr->lr_offset;
fl.l_len = lr->lr_length;
error = zfs_space(zp, F_FREESP, &fl, O_RDWR | O_LARGEFILE,
lr->lr_offset, kcred);
zrele(zp);
return (error);
}
static int
zfs_replay_setattr(void *arg1, void *arg2, boolean_t byteswap)
{
zfsvfs_t *zfsvfs = arg1;
lr_setattr_t *lr = arg2;
znode_t *zp;
xvattr_t xva;
vattr_t *vap = &xva.xva_vattr;
int error;
void *start;
xva_init(&xva);
if (byteswap) {
byteswap_uint64_array(lr, sizeof (*lr));
if ((lr->lr_mask & ATTR_XVATTR) &&
zfsvfs->z_version >= ZPL_VERSION_INITIAL)
zfs_replay_swap_attrs((lr_attr_t *)(lr + 1));
}
if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0)
return (error);
zfs_init_vattr(vap, lr->lr_mask, lr->lr_mode,
lr->lr_uid, lr->lr_gid, 0, lr->lr_foid);
vap->va_size = lr->lr_size;
ZFS_TIME_DECODE(&vap->va_atime, lr->lr_atime);
ZFS_TIME_DECODE(&vap->va_mtime, lr->lr_mtime);
gethrestime(&vap->va_ctime);
vap->va_mask |= ATTR_CTIME;
/*
* Fill in xvattr_t portions if necessary.
*/
start = (lr_setattr_t *)(lr + 1);
if (vap->va_mask & ATTR_XVATTR) {
zfs_replay_xvattr((lr_attr_t *)start, &xva);
start = (caddr_t)start +
ZIL_XVAT_SIZE(((lr_attr_t *)start)->lr_attr_masksize);
} else
xva.xva_vattr.va_mask &= ~ATTR_XVATTR;
zfsvfs->z_fuid_replay = zfs_replay_fuid_domain(start, &start,
lr->lr_uid, lr->lr_gid);
error = zfs_setattr(zp, vap, 0, kcred);
zfs_fuid_info_free(zfsvfs->z_fuid_replay);
zfsvfs->z_fuid_replay = NULL;
zrele(zp);
return (error);
}
static int
zfs_replay_acl_v0(void *arg1, void *arg2, boolean_t byteswap)
{
zfsvfs_t *zfsvfs = arg1;
lr_acl_v0_t *lr = arg2;
ace_t *ace = (ace_t *)(lr + 1); /* ace array follows lr_acl_t */
vsecattr_t vsa;
znode_t *zp;
int error;
if (byteswap) {
byteswap_uint64_array(lr, sizeof (*lr));
zfs_oldace_byteswap(ace, lr->lr_aclcnt);
}
if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0)
return (error);
bzero(&vsa, sizeof (vsa));
vsa.vsa_mask = VSA_ACE | VSA_ACECNT;
vsa.vsa_aclcnt = lr->lr_aclcnt;
vsa.vsa_aclentsz = sizeof (ace_t) * vsa.vsa_aclcnt;
vsa.vsa_aclflags = 0;
vsa.vsa_aclentp = ace;
error = zfs_setsecattr(zp, &vsa, 0, kcred);
zrele(zp);
return (error);
}
/*
* Replaying ACLs is complicated by FUID support.
* The log record may contain some optional data
* to be used for replaying FUID's. These pieces
* are the actual FUIDs that were created initially.
* The FUID table index may no longer be valid and
* during zfs_create() a new index may be assigned.
* Because of this the log will contain the original
* domain+rid in order to create a new FUID.
*
* The individual ACEs may contain an ephemeral uid/gid which is no
* longer valid and will need to be replaced with an actual FUID.
*
*/
static int
zfs_replay_acl(void *arg1, void *arg2, boolean_t byteswap)
{
zfsvfs_t *zfsvfs = arg1;
lr_acl_t *lr = arg2;
ace_t *ace = (ace_t *)(lr + 1);
vsecattr_t vsa;
znode_t *zp;
int error;
if (byteswap) {
byteswap_uint64_array(lr, sizeof (*lr));
zfs_ace_byteswap(ace, lr->lr_acl_bytes, B_FALSE);
if (lr->lr_fuidcnt) {
byteswap_uint64_array((caddr_t)ace +
ZIL_ACE_LENGTH(lr->lr_acl_bytes),
lr->lr_fuidcnt * sizeof (uint64_t));
}
}
if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0)
return (error);
bzero(&vsa, sizeof (vsa));
vsa.vsa_mask = VSA_ACE | VSA_ACECNT | VSA_ACE_ACLFLAGS;
vsa.vsa_aclcnt = lr->lr_aclcnt;
vsa.vsa_aclentp = ace;
vsa.vsa_aclentsz = lr->lr_acl_bytes;
vsa.vsa_aclflags = lr->lr_acl_flags;
if (lr->lr_fuidcnt) {
void *fuidstart = (caddr_t)ace +
ZIL_ACE_LENGTH(lr->lr_acl_bytes);
zfsvfs->z_fuid_replay =
zfs_replay_fuids(fuidstart, &fuidstart,
lr->lr_fuidcnt, lr->lr_domcnt, 0, 0);
}
error = zfs_setsecattr(zp, &vsa, 0, kcred);
if (zfsvfs->z_fuid_replay)
zfs_fuid_info_free(zfsvfs->z_fuid_replay);
zfsvfs->z_fuid_replay = NULL;
zrele(zp);
return (error);
}
/*
* Callback vectors for replaying records
*/
zil_replay_func_t *zfs_replay_vector[TX_MAX_TYPE] = {
zfs_replay_error, /* no such type */
zfs_replay_create, /* TX_CREATE */
zfs_replay_create, /* TX_MKDIR */
zfs_replay_create, /* TX_MKXATTR */
zfs_replay_create, /* TX_SYMLINK */
zfs_replay_remove, /* TX_REMOVE */
zfs_replay_remove, /* TX_RMDIR */
zfs_replay_link, /* TX_LINK */
zfs_replay_rename, /* TX_RENAME */
zfs_replay_write, /* TX_WRITE */
zfs_replay_truncate, /* TX_TRUNCATE */
zfs_replay_setattr, /* TX_SETATTR */
zfs_replay_acl_v0, /* TX_ACL_V0 */
zfs_replay_acl, /* TX_ACL */
zfs_replay_create_acl, /* TX_CREATE_ACL */
zfs_replay_create, /* TX_CREATE_ATTR */
zfs_replay_create_acl, /* TX_CREATE_ACL_ATTR */
zfs_replay_create_acl, /* TX_MKDIR_ACL */
zfs_replay_create, /* TX_MKDIR_ATTR */
zfs_replay_create_acl, /* TX_MKDIR_ACL_ATTR */
zfs_replay_write2, /* TX_WRITE2 */
};
diff --git a/sys/contrib/openzfs/module/zfs/zil.c b/sys/contrib/openzfs/module/zfs/zil.c
index d8d39f861c75..2eeb4fa4fe42 100644
--- a/sys/contrib/openzfs/module/zfs/zil.c
+++ b/sys/contrib/openzfs/module/zfs/zil.c
@@ -1,3700 +1,3705 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2018 by Delphix. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright (c) 2018 Datto Inc.
*/
/* Portions Copyright 2010 Robert Milkowski */
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/dmu.h>
#include <sys/zap.h>
#include <sys/arc.h>
#include <sys/stat.h>
#include <sys/zil.h>
#include <sys/zil_impl.h>
#include <sys/dsl_dataset.h>
#include <sys/vdev_impl.h>
#include <sys/dmu_tx.h>
#include <sys/dsl_pool.h>
#include <sys/metaslab.h>
#include <sys/trace_zfs.h>
#include <sys/abd.h>
/*
* The ZFS Intent Log (ZIL) saves "transaction records" (itxs) of system
* calls that change the file system. Each itx has enough information to
* be able to replay them after a system crash, power loss, or
* equivalent failure mode. These are stored in memory until either:
*
* 1. they are committed to the pool by the DMU transaction group
* (txg), at which point they can be discarded; or
* 2. they are committed to the on-disk ZIL for the dataset being
* modified (e.g. due to an fsync, O_DSYNC, or other synchronous
* requirement).
*
* In the event of a crash or power loss, the itxs contained by each
* dataset's on-disk ZIL will be replayed when that dataset is first
* instantiated (e.g. if the dataset is a normal filesystem, when it is
* first mounted).
*
* As hinted at above, there is one ZIL per dataset (both the in-memory
* representation, and the on-disk representation). The on-disk format
* consists of 3 parts:
*
* - a single, per-dataset, ZIL header; which points to a chain of
* - zero or more ZIL blocks; each of which contains
* - zero or more ZIL records
*
* A ZIL record holds the information necessary to replay a single
* system call transaction. A ZIL block can hold many ZIL records, and
* the blocks are chained together, similarly to a singly linked list.
*
* Each ZIL block contains a block pointer (blkptr_t) to the next ZIL
* block in the chain, and the ZIL header points to the first block in
* the chain.
*
* Note, there is not a fixed place in the pool to hold these ZIL
* blocks; they are dynamically allocated and freed as needed from the
* blocks available on the pool, though they can be preferentially
* allocated from a dedicated "log" vdev.
*/
/*
* This controls the amount of time that a ZIL block (lwb) will remain
* "open" when it isn't "full", and it has a thread waiting for it to be
* committed to stable storage. Please refer to the zil_commit_waiter()
* function (and the comments within it) for more details.
*/
int zfs_commit_timeout_pct = 5;
/*
* See zil.h for more information about these fields.
*/
zil_stats_t zil_stats = {
{ "zil_commit_count", KSTAT_DATA_UINT64 },
{ "zil_commit_writer_count", KSTAT_DATA_UINT64 },
{ "zil_itx_count", KSTAT_DATA_UINT64 },
{ "zil_itx_indirect_count", KSTAT_DATA_UINT64 },
{ "zil_itx_indirect_bytes", KSTAT_DATA_UINT64 },
{ "zil_itx_copied_count", KSTAT_DATA_UINT64 },
{ "zil_itx_copied_bytes", KSTAT_DATA_UINT64 },
{ "zil_itx_needcopy_count", KSTAT_DATA_UINT64 },
{ "zil_itx_needcopy_bytes", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_normal_count", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_normal_bytes", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_slog_count", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_slog_bytes", KSTAT_DATA_UINT64 },
};
static kstat_t *zil_ksp;
/*
* Disable intent logging replay. This global ZIL switch affects all pools.
*/
int zil_replay_disable = 0;
/*
* Disable the DKIOCFLUSHWRITECACHE commands that are normally sent to
* the disk(s) by the ZIL after an LWB write has completed. Setting this
* will cause ZIL corruption on power loss if a volatile out-of-order
* write cache is enabled.
*/
int zil_nocacheflush = 0;
/*
* Limit SLOG write size per commit executed with synchronous priority.
* Any writes above that will be executed with lower (asynchronous) priority
* to limit potential SLOG device abuse by single active ZIL writer.
*/
unsigned long zil_slog_bulk = 768 * 1024;
static kmem_cache_t *zil_lwb_cache;
static kmem_cache_t *zil_zcw_cache;
#define LWB_EMPTY(lwb) ((BP_GET_LSIZE(&lwb->lwb_blk) - \
sizeof (zil_chain_t)) == (lwb->lwb_sz - lwb->lwb_nused))
static int
zil_bp_compare(const void *x1, const void *x2)
{
const dva_t *dva1 = &((zil_bp_node_t *)x1)->zn_dva;
const dva_t *dva2 = &((zil_bp_node_t *)x2)->zn_dva;
int cmp = TREE_CMP(DVA_GET_VDEV(dva1), DVA_GET_VDEV(dva2));
if (likely(cmp))
return (cmp);
return (TREE_CMP(DVA_GET_OFFSET(dva1), DVA_GET_OFFSET(dva2)));
}
static void
zil_bp_tree_init(zilog_t *zilog)
{
avl_create(&zilog->zl_bp_tree, zil_bp_compare,
sizeof (zil_bp_node_t), offsetof(zil_bp_node_t, zn_node));
}
static void
zil_bp_tree_fini(zilog_t *zilog)
{
avl_tree_t *t = &zilog->zl_bp_tree;
zil_bp_node_t *zn;
void *cookie = NULL;
while ((zn = avl_destroy_nodes(t, &cookie)) != NULL)
kmem_free(zn, sizeof (zil_bp_node_t));
avl_destroy(t);
}
int
zil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp)
{
avl_tree_t *t = &zilog->zl_bp_tree;
const dva_t *dva;
zil_bp_node_t *zn;
avl_index_t where;
if (BP_IS_EMBEDDED(bp))
return (0);
dva = BP_IDENTITY(bp);
if (avl_find(t, dva, &where) != NULL)
return (SET_ERROR(EEXIST));
zn = kmem_alloc(sizeof (zil_bp_node_t), KM_SLEEP);
zn->zn_dva = *dva;
avl_insert(t, zn, where);
return (0);
}
static zil_header_t *
zil_header_in_syncing_context(zilog_t *zilog)
{
return ((zil_header_t *)zilog->zl_header);
}
static void
zil_init_log_chain(zilog_t *zilog, blkptr_t *bp)
{
zio_cksum_t *zc = &bp->blk_cksum;
(void) random_get_pseudo_bytes((void *)&zc->zc_word[ZIL_ZC_GUID_0],
sizeof (zc->zc_word[ZIL_ZC_GUID_0]));
(void) random_get_pseudo_bytes((void *)&zc->zc_word[ZIL_ZC_GUID_1],
sizeof (zc->zc_word[ZIL_ZC_GUID_1]));
zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os);
zc->zc_word[ZIL_ZC_SEQ] = 1ULL;
}
/*
* Read a log block and make sure it's valid.
*/
static int
zil_read_log_block(zilog_t *zilog, boolean_t decrypt, const blkptr_t *bp,
blkptr_t *nbp, void *dst, char **end)
{
enum zio_flag zio_flags = ZIO_FLAG_CANFAIL;
arc_flags_t aflags = ARC_FLAG_WAIT;
arc_buf_t *abuf = NULL;
zbookmark_phys_t zb;
int error;
if (zilog->zl_header->zh_claim_txg == 0)
zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB;
if (!(zilog->zl_header->zh_flags & ZIL_CLAIM_LR_SEQ_VALID))
zio_flags |= ZIO_FLAG_SPECULATIVE;
if (!decrypt)
zio_flags |= ZIO_FLAG_RAW;
SET_BOOKMARK(&zb, bp->blk_cksum.zc_word[ZIL_ZC_OBJSET],
ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func,
&abuf, ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
if (error == 0) {
zio_cksum_t cksum = bp->blk_cksum;
/*
* Validate the checksummed log block.
*
* Sequence numbers should be... sequential. The checksum
* verifier for the next block should be bp's checksum plus 1.
*
* Also check the log chain linkage and size used.
*/
cksum.zc_word[ZIL_ZC_SEQ]++;
if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) {
zil_chain_t *zilc = abuf->b_data;
char *lr = (char *)(zilc + 1);
uint64_t len = zilc->zc_nused - sizeof (zil_chain_t);
if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk)) {
error = SET_ERROR(ECKSUM);
} else {
ASSERT3U(len, <=, SPA_OLD_MAXBLOCKSIZE);
bcopy(lr, dst, len);
*end = (char *)dst + len;
*nbp = zilc->zc_next_blk;
}
} else {
char *lr = abuf->b_data;
uint64_t size = BP_GET_LSIZE(bp);
zil_chain_t *zilc = (zil_chain_t *)(lr + size) - 1;
if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk) ||
(zilc->zc_nused > (size - sizeof (*zilc)))) {
error = SET_ERROR(ECKSUM);
} else {
ASSERT3U(zilc->zc_nused, <=,
SPA_OLD_MAXBLOCKSIZE);
bcopy(lr, dst, zilc->zc_nused);
*end = (char *)dst + zilc->zc_nused;
*nbp = zilc->zc_next_blk;
}
}
arc_buf_destroy(abuf, &abuf);
}
return (error);
}
/*
* Read a TX_WRITE log data block.
*/
static int
zil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf)
{
enum zio_flag zio_flags = ZIO_FLAG_CANFAIL;
const blkptr_t *bp = &lr->lr_blkptr;
arc_flags_t aflags = ARC_FLAG_WAIT;
arc_buf_t *abuf = NULL;
zbookmark_phys_t zb;
int error;
if (BP_IS_HOLE(bp)) {
if (wbuf != NULL)
bzero(wbuf, MAX(BP_GET_LSIZE(bp), lr->lr_length));
return (0);
}
if (zilog->zl_header->zh_claim_txg == 0)
zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB;
/*
* If we are not using the resulting data, we are just checking that
* it hasn't been corrupted so we don't need to waste CPU time
* decompressing and decrypting it.
*/
if (wbuf == NULL)
zio_flags |= ZIO_FLAG_RAW;
SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid,
ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp));
error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf,
ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
if (error == 0) {
if (wbuf != NULL)
bcopy(abuf->b_data, wbuf, arc_buf_size(abuf));
arc_buf_destroy(abuf, &abuf);
}
return (error);
}
/*
* Parse the intent log, and call parse_func for each valid record within.
*/
int
zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func,
zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg,
boolean_t decrypt)
{
const zil_header_t *zh = zilog->zl_header;
boolean_t claimed = !!zh->zh_claim_txg;
uint64_t claim_blk_seq = claimed ? zh->zh_claim_blk_seq : UINT64_MAX;
uint64_t claim_lr_seq = claimed ? zh->zh_claim_lr_seq : UINT64_MAX;
uint64_t max_blk_seq = 0;
uint64_t max_lr_seq = 0;
uint64_t blk_count = 0;
uint64_t lr_count = 0;
blkptr_t blk, next_blk;
char *lrbuf, *lrp;
int error = 0;
bzero(&next_blk, sizeof (blkptr_t));
/*
* Old logs didn't record the maximum zh_claim_lr_seq.
*/
if (!(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID))
claim_lr_seq = UINT64_MAX;
/*
* Starting at the block pointed to by zh_log we read the log chain.
* For each block in the chain we strongly check that block to
* ensure its validity. We stop when an invalid block is found.
* For each block pointer in the chain we call parse_blk_func().
* For each record in each valid block we call parse_lr_func().
* If the log has been claimed, stop if we encounter a sequence
* number greater than the highest claimed sequence number.
*/
lrbuf = zio_buf_alloc(SPA_OLD_MAXBLOCKSIZE);
zil_bp_tree_init(zilog);
for (blk = zh->zh_log; !BP_IS_HOLE(&blk); blk = next_blk) {
uint64_t blk_seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ];
int reclen;
char *end = NULL;
if (blk_seq > claim_blk_seq)
break;
error = parse_blk_func(zilog, &blk, arg, txg);
if (error != 0)
break;
ASSERT3U(max_blk_seq, <, blk_seq);
max_blk_seq = blk_seq;
blk_count++;
if (max_lr_seq == claim_lr_seq && max_blk_seq == claim_blk_seq)
break;
error = zil_read_log_block(zilog, decrypt, &blk, &next_blk,
lrbuf, &end);
if (error != 0)
break;
for (lrp = lrbuf; lrp < end; lrp += reclen) {
lr_t *lr = (lr_t *)lrp;
reclen = lr->lrc_reclen;
ASSERT3U(reclen, >=, sizeof (lr_t));
if (lr->lrc_seq > claim_lr_seq)
goto done;
error = parse_lr_func(zilog, lr, arg, txg);
if (error != 0)
goto done;
ASSERT3U(max_lr_seq, <, lr->lrc_seq);
max_lr_seq = lr->lrc_seq;
lr_count++;
}
}
done:
zilog->zl_parse_error = error;
zilog->zl_parse_blk_seq = max_blk_seq;
zilog->zl_parse_lr_seq = max_lr_seq;
zilog->zl_parse_blk_count = blk_count;
zilog->zl_parse_lr_count = lr_count;
ASSERT(!claimed || !(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID) ||
(max_blk_seq == claim_blk_seq && max_lr_seq == claim_lr_seq) ||
(decrypt && error == EIO));
zil_bp_tree_fini(zilog);
zio_buf_free(lrbuf, SPA_OLD_MAXBLOCKSIZE);
return (error);
}
/* ARGSUSED */
static int
zil_clear_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx,
uint64_t first_txg)
{
ASSERT(!BP_IS_HOLE(bp));
/*
* As we call this function from the context of a rewind to a
* checkpoint, each ZIL block whose txg is later than the txg
* that we rewind to is invalid. Thus, we return -1 so
* zil_parse() doesn't attempt to read it.
*/
if (bp->blk_birth >= first_txg)
return (-1);
if (zil_bp_tree_add(zilog, bp) != 0)
return (0);
zio_free(zilog->zl_spa, first_txg, bp);
return (0);
}
/* ARGSUSED */
static int
zil_noop_log_record(zilog_t *zilog, const lr_t *lrc, void *tx,
uint64_t first_txg)
{
return (0);
}
static int
zil_claim_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx,
uint64_t first_txg)
{
/*
* Claim log block if not already committed and not already claimed.
* If tx == NULL, just verify that the block is claimable.
*/
if (BP_IS_HOLE(bp) || bp->blk_birth < first_txg ||
zil_bp_tree_add(zilog, bp) != 0)
return (0);
return (zio_wait(zio_claim(NULL, zilog->zl_spa,
tx == NULL ? 0 : first_txg, bp, spa_claim_notify, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB)));
}
static int
zil_claim_log_record(zilog_t *zilog, const lr_t *lrc, void *tx,
uint64_t first_txg)
{
lr_write_t *lr = (lr_write_t *)lrc;
int error;
if (lrc->lrc_txtype != TX_WRITE)
return (0);
/*
* If the block is not readable, don't claim it. This can happen
* in normal operation when a log block is written to disk before
* some of the dmu_sync() blocks it points to. In this case, the
* transaction cannot have been committed to anyone (we would have
* waited for all writes to be stable first), so it is semantically
* correct to declare this the end of the log.
*/
if (lr->lr_blkptr.blk_birth >= first_txg) {
error = zil_read_log_data(zilog, lr, NULL);
if (error != 0)
return (error);
}
return (zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg));
}
/* ARGSUSED */
static int
zil_free_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx,
uint64_t claim_txg)
{
zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
return (0);
}
static int
zil_free_log_record(zilog_t *zilog, const lr_t *lrc, void *tx,
uint64_t claim_txg)
{
lr_write_t *lr = (lr_write_t *)lrc;
blkptr_t *bp = &lr->lr_blkptr;
/*
* If we previously claimed it, we need to free it.
*/
if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE &&
bp->blk_birth >= claim_txg && zil_bp_tree_add(zilog, bp) == 0 &&
!BP_IS_HOLE(bp))
zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
return (0);
}
static int
zil_lwb_vdev_compare(const void *x1, const void *x2)
{
const uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev;
const uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev;
return (TREE_CMP(v1, v2));
}
static lwb_t *
zil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, boolean_t slog, uint64_t txg,
boolean_t fastwrite)
{
lwb_t *lwb;
lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP);
lwb->lwb_zilog = zilog;
lwb->lwb_blk = *bp;
lwb->lwb_fastwrite = fastwrite;
lwb->lwb_slog = slog;
lwb->lwb_state = LWB_STATE_CLOSED;
lwb->lwb_buf = zio_buf_alloc(BP_GET_LSIZE(bp));
lwb->lwb_max_txg = txg;
lwb->lwb_write_zio = NULL;
lwb->lwb_root_zio = NULL;
lwb->lwb_tx = NULL;
lwb->lwb_issued_timestamp = 0;
if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) {
lwb->lwb_nused = sizeof (zil_chain_t);
lwb->lwb_sz = BP_GET_LSIZE(bp);
} else {
lwb->lwb_nused = 0;
lwb->lwb_sz = BP_GET_LSIZE(bp) - sizeof (zil_chain_t);
}
mutex_enter(&zilog->zl_lock);
list_insert_tail(&zilog->zl_lwb_list, lwb);
mutex_exit(&zilog->zl_lock);
ASSERT(!MUTEX_HELD(&lwb->lwb_vdev_lock));
ASSERT(avl_is_empty(&lwb->lwb_vdev_tree));
VERIFY(list_is_empty(&lwb->lwb_waiters));
VERIFY(list_is_empty(&lwb->lwb_itxs));
return (lwb);
}
static void
zil_free_lwb(zilog_t *zilog, lwb_t *lwb)
{
ASSERT(MUTEX_HELD(&zilog->zl_lock));
ASSERT(!MUTEX_HELD(&lwb->lwb_vdev_lock));
VERIFY(list_is_empty(&lwb->lwb_waiters));
VERIFY(list_is_empty(&lwb->lwb_itxs));
ASSERT(avl_is_empty(&lwb->lwb_vdev_tree));
ASSERT3P(lwb->lwb_write_zio, ==, NULL);
ASSERT3P(lwb->lwb_root_zio, ==, NULL);
ASSERT3U(lwb->lwb_max_txg, <=, spa_syncing_txg(zilog->zl_spa));
ASSERT(lwb->lwb_state == LWB_STATE_CLOSED ||
lwb->lwb_state == LWB_STATE_FLUSH_DONE);
/*
* Clear the zilog's field to indicate this lwb is no longer
* valid, and prevent use-after-free errors.
*/
if (zilog->zl_last_lwb_opened == lwb)
zilog->zl_last_lwb_opened = NULL;
kmem_cache_free(zil_lwb_cache, lwb);
}
/*
* Called when we create in-memory log transactions so that we know
* to cleanup the itxs at the end of spa_sync().
*/
static void
zilog_dirty(zilog_t *zilog, uint64_t txg)
{
dsl_pool_t *dp = zilog->zl_dmu_pool;
dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
ASSERT(spa_writeable(zilog->zl_spa));
if (ds->ds_is_snapshot)
panic("dirtying snapshot!");
if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) {
/* up the hold count until we can be written out */
dmu_buf_add_ref(ds->ds_dbuf, zilog);
zilog->zl_dirty_max_txg = MAX(txg, zilog->zl_dirty_max_txg);
}
}
/*
* Determine if the zil is dirty in the specified txg. Callers wanting to
* ensure that the dirty state does not change must hold the itxg_lock for
* the specified txg. Holding the lock will ensure that the zil cannot be
* dirtied (zil_itx_assign) or cleaned (zil_clean) while we check its current
* state.
*/
static boolean_t __maybe_unused
zilog_is_dirty_in_txg(zilog_t *zilog, uint64_t txg)
{
dsl_pool_t *dp = zilog->zl_dmu_pool;
if (txg_list_member(&dp->dp_dirty_zilogs, zilog, txg & TXG_MASK))
return (B_TRUE);
return (B_FALSE);
}
/*
* Determine if the zil is dirty. The zil is considered dirty if it has
* any pending itx records that have not been cleaned by zil_clean().
*/
static boolean_t
zilog_is_dirty(zilog_t *zilog)
{
dsl_pool_t *dp = zilog->zl_dmu_pool;
for (int t = 0; t < TXG_SIZE; t++) {
if (txg_list_member(&dp->dp_dirty_zilogs, zilog, t))
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Create an on-disk intent log.
*/
static lwb_t *
zil_create(zilog_t *zilog)
{
const zil_header_t *zh = zilog->zl_header;
lwb_t *lwb = NULL;
uint64_t txg = 0;
dmu_tx_t *tx = NULL;
blkptr_t blk;
int error = 0;
boolean_t fastwrite = FALSE;
boolean_t slog = FALSE;
/*
* Wait for any previous destroy to complete.
*/
txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
ASSERT(zh->zh_claim_txg == 0);
ASSERT(zh->zh_replay_seq == 0);
blk = zh->zh_log;
/*
* Allocate an initial log block if:
* - there isn't one already
* - the existing block is the wrong endianness
*/
if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) {
tx = dmu_tx_create(zilog->zl_os);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
txg = dmu_tx_get_txg(tx);
if (!BP_IS_HOLE(&blk)) {
zio_free(zilog->zl_spa, txg, &blk);
BP_ZERO(&blk);
}
error = zio_alloc_zil(zilog->zl_spa, zilog->zl_os, txg, &blk,
ZIL_MIN_BLKSZ, &slog);
fastwrite = TRUE;
if (error == 0)
zil_init_log_chain(zilog, &blk);
}
/*
* Allocate a log write block (lwb) for the first log block.
*/
if (error == 0)
lwb = zil_alloc_lwb(zilog, &blk, slog, txg, fastwrite);
/*
* If we just allocated the first log block, commit our transaction
* and wait for zil_sync() to stuff the block pointer into zh_log.
* (zh is part of the MOS, so we cannot modify it in open context.)
*/
if (tx != NULL) {
dmu_tx_commit(tx);
txg_wait_synced(zilog->zl_dmu_pool, txg);
}
ASSERT(error != 0 || bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0);
IMPLY(error == 0, lwb != NULL);
return (lwb);
}
/*
* In one tx, free all log blocks and clear the log header. If keep_first
* is set, then we're replaying a log with no content. We want to keep the
* first block, however, so that the first synchronous transaction doesn't
* require a txg_wait_synced() in zil_create(). We don't need to
* txg_wait_synced() here either when keep_first is set, because both
* zil_create() and zil_destroy() will wait for any in-progress destroys
* to complete.
*/
void
zil_destroy(zilog_t *zilog, boolean_t keep_first)
{
const zil_header_t *zh = zilog->zl_header;
lwb_t *lwb;
dmu_tx_t *tx;
uint64_t txg;
/*
* Wait for any previous destroy to complete.
*/
txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
zilog->zl_old_header = *zh; /* debugging aid */
if (BP_IS_HOLE(&zh->zh_log))
return;
tx = dmu_tx_create(zilog->zl_os);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
txg = dmu_tx_get_txg(tx);
mutex_enter(&zilog->zl_lock);
ASSERT3U(zilog->zl_destroy_txg, <, txg);
zilog->zl_destroy_txg = txg;
zilog->zl_keep_first = keep_first;
if (!list_is_empty(&zilog->zl_lwb_list)) {
ASSERT(zh->zh_claim_txg == 0);
VERIFY(!keep_first);
while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
if (lwb->lwb_fastwrite)
metaslab_fastwrite_unmark(zilog->zl_spa,
&lwb->lwb_blk);
list_remove(&zilog->zl_lwb_list, lwb);
if (lwb->lwb_buf != NULL)
zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
zio_free(zilog->zl_spa, txg, &lwb->lwb_blk);
zil_free_lwb(zilog, lwb);
}
} else if (!keep_first) {
zil_destroy_sync(zilog, tx);
}
mutex_exit(&zilog->zl_lock);
dmu_tx_commit(tx);
}
void
zil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx)
{
ASSERT(list_is_empty(&zilog->zl_lwb_list));
(void) zil_parse(zilog, zil_free_log_block,
zil_free_log_record, tx, zilog->zl_header->zh_claim_txg, B_FALSE);
}
int
zil_claim(dsl_pool_t *dp, dsl_dataset_t *ds, void *txarg)
{
dmu_tx_t *tx = txarg;
zilog_t *zilog;
uint64_t first_txg;
zil_header_t *zh;
objset_t *os;
int error;
error = dmu_objset_own_obj(dp, ds->ds_object,
DMU_OST_ANY, B_FALSE, B_FALSE, FTAG, &os);
if (error != 0) {
/*
* EBUSY indicates that the objset is inconsistent, in which
* case it can not have a ZIL.
*/
if (error != EBUSY) {
cmn_err(CE_WARN, "can't open objset for %llu, error %u",
(unsigned long long)ds->ds_object, error);
}
return (0);
}
zilog = dmu_objset_zil(os);
zh = zil_header_in_syncing_context(zilog);
ASSERT3U(tx->tx_txg, ==, spa_first_txg(zilog->zl_spa));
first_txg = spa_min_claim_txg(zilog->zl_spa);
/*
* If the spa_log_state is not set to be cleared, check whether
* the current uberblock is a checkpoint one and if the current
* header has been claimed before moving on.
*
* If the current uberblock is a checkpointed uberblock then
* one of the following scenarios took place:
*
* 1] We are currently rewinding to the checkpoint of the pool.
* 2] We crashed in the middle of a checkpoint rewind but we
* did manage to write the checkpointed uberblock to the
* vdev labels, so when we tried to import the pool again
* the checkpointed uberblock was selected from the import
* procedure.
*
* In both cases we want to zero out all the ZIL blocks, except
* the ones that have been claimed at the time of the checkpoint
* (their zh_claim_txg != 0). The reason is that these blocks
* may be corrupted since we may have reused their locations on
* disk after we took the checkpoint.
*
* We could try to set spa_log_state to SPA_LOG_CLEAR earlier
* when we first figure out whether the current uberblock is
* checkpointed or not. Unfortunately, that would discard all
* the logs, including the ones that are claimed, and we would
* leak space.
*/
if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR ||
(zilog->zl_spa->spa_uberblock.ub_checkpoint_txg != 0 &&
zh->zh_claim_txg == 0)) {
if (!BP_IS_HOLE(&zh->zh_log)) {
(void) zil_parse(zilog, zil_clear_log_block,
zil_noop_log_record, tx, first_txg, B_FALSE);
}
BP_ZERO(&zh->zh_log);
if (os->os_encrypted)
os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE;
dsl_dataset_dirty(dmu_objset_ds(os), tx);
dmu_objset_disown(os, B_FALSE, FTAG);
return (0);
}
/*
* If we are not rewinding and opening the pool normally, then
* the min_claim_txg should be equal to the first txg of the pool.
*/
ASSERT3U(first_txg, ==, spa_first_txg(zilog->zl_spa));
/*
* Claim all log blocks if we haven't already done so, and remember
* the highest claimed sequence number. This ensures that if we can
* read only part of the log now (e.g. due to a missing device),
* but we can read the entire log later, we will not try to replay
* or destroy beyond the last block we successfully claimed.
*/
ASSERT3U(zh->zh_claim_txg, <=, first_txg);
if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) {
(void) zil_parse(zilog, zil_claim_log_block,
zil_claim_log_record, tx, first_txg, B_FALSE);
zh->zh_claim_txg = first_txg;
zh->zh_claim_blk_seq = zilog->zl_parse_blk_seq;
zh->zh_claim_lr_seq = zilog->zl_parse_lr_seq;
if (zilog->zl_parse_lr_count || zilog->zl_parse_blk_count > 1)
zh->zh_flags |= ZIL_REPLAY_NEEDED;
zh->zh_flags |= ZIL_CLAIM_LR_SEQ_VALID;
if (os->os_encrypted)
os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE;
dsl_dataset_dirty(dmu_objset_ds(os), tx);
}
ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1));
dmu_objset_disown(os, B_FALSE, FTAG);
return (0);
}
/*
* Check the log by walking the log chain.
* Checksum errors are ok as they indicate the end of the chain.
* Any other error (no device or read failure) returns an error.
*/
/* ARGSUSED */
int
zil_check_log_chain(dsl_pool_t *dp, dsl_dataset_t *ds, void *tx)
{
zilog_t *zilog;
objset_t *os;
blkptr_t *bp;
int error;
ASSERT(tx == NULL);
error = dmu_objset_from_ds(ds, &os);
if (error != 0) {
cmn_err(CE_WARN, "can't open objset %llu, error %d",
(unsigned long long)ds->ds_object, error);
return (0);
}
zilog = dmu_objset_zil(os);
bp = (blkptr_t *)&zilog->zl_header->zh_log;
if (!BP_IS_HOLE(bp)) {
vdev_t *vd;
boolean_t valid = B_TRUE;
/*
* Check the first block and determine if it's on a log device
* which may have been removed or faulted prior to loading this
* pool. If so, there's no point in checking the rest of the
* log as its content should have already been synced to the
* pool.
*/
spa_config_enter(os->os_spa, SCL_STATE, FTAG, RW_READER);
vd = vdev_lookup_top(os->os_spa, DVA_GET_VDEV(&bp->blk_dva[0]));
if (vd->vdev_islog && vdev_is_dead(vd))
valid = vdev_log_state_valid(vd);
spa_config_exit(os->os_spa, SCL_STATE, FTAG);
if (!valid)
return (0);
/*
* Check whether the current uberblock is checkpointed (e.g.
* we are rewinding) and whether the current header has been
* claimed or not. If it hasn't then skip verifying it. We
* do this because its ZIL blocks may be part of the pool's
* state before the rewind, which is no longer valid.
*/
zil_header_t *zh = zil_header_in_syncing_context(zilog);
if (zilog->zl_spa->spa_uberblock.ub_checkpoint_txg != 0 &&
zh->zh_claim_txg == 0)
return (0);
}
/*
* Because tx == NULL, zil_claim_log_block() will not actually claim
* any blocks, but just determine whether it is possible to do so.
* In addition to checking the log chain, zil_claim_log_block()
* will invoke zio_claim() with a done func of spa_claim_notify(),
* which will update spa_max_claim_txg. See spa_load() for details.
*/
error = zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, tx,
zilog->zl_header->zh_claim_txg ? -1ULL :
spa_min_claim_txg(os->os_spa), B_FALSE);
return ((error == ECKSUM || error == ENOENT) ? 0 : error);
}
/*
* When an itx is "skipped", this function is used to properly mark the
* waiter as "done, and signal any thread(s) waiting on it. An itx can
* be skipped (and not committed to an lwb) for a variety of reasons,
* one of them being that the itx was committed via spa_sync(), prior to
* it being committed to an lwb; this can happen if a thread calling
* zil_commit() is racing with spa_sync().
*/
static void
zil_commit_waiter_skip(zil_commit_waiter_t *zcw)
{
mutex_enter(&zcw->zcw_lock);
ASSERT3B(zcw->zcw_done, ==, B_FALSE);
zcw->zcw_done = B_TRUE;
cv_broadcast(&zcw->zcw_cv);
mutex_exit(&zcw->zcw_lock);
}
/*
* This function is used when the given waiter is to be linked into an
* lwb's "lwb_waiter" list; i.e. when the itx is committed to the lwb.
* At this point, the waiter will no longer be referenced by the itx,
* and instead, will be referenced by the lwb.
*/
static void
zil_commit_waiter_link_lwb(zil_commit_waiter_t *zcw, lwb_t *lwb)
{
/*
* The lwb_waiters field of the lwb is protected by the zilog's
* zl_lock, thus it must be held when calling this function.
*/
ASSERT(MUTEX_HELD(&lwb->lwb_zilog->zl_lock));
mutex_enter(&zcw->zcw_lock);
ASSERT(!list_link_active(&zcw->zcw_node));
ASSERT3P(zcw->zcw_lwb, ==, NULL);
ASSERT3P(lwb, !=, NULL);
ASSERT(lwb->lwb_state == LWB_STATE_OPENED ||
lwb->lwb_state == LWB_STATE_ISSUED ||
lwb->lwb_state == LWB_STATE_WRITE_DONE);
list_insert_tail(&lwb->lwb_waiters, zcw);
zcw->zcw_lwb = lwb;
mutex_exit(&zcw->zcw_lock);
}
/*
* This function is used when zio_alloc_zil() fails to allocate a ZIL
* block, and the given waiter must be linked to the "nolwb waiters"
* list inside of zil_process_commit_list().
*/
static void
zil_commit_waiter_link_nolwb(zil_commit_waiter_t *zcw, list_t *nolwb)
{
mutex_enter(&zcw->zcw_lock);
ASSERT(!list_link_active(&zcw->zcw_node));
ASSERT3P(zcw->zcw_lwb, ==, NULL);
list_insert_tail(nolwb, zcw);
mutex_exit(&zcw->zcw_lock);
}
void
zil_lwb_add_block(lwb_t *lwb, const blkptr_t *bp)
{
avl_tree_t *t = &lwb->lwb_vdev_tree;
avl_index_t where;
zil_vdev_node_t *zv, zvsearch;
int ndvas = BP_GET_NDVAS(bp);
int i;
if (zil_nocacheflush)
return;
mutex_enter(&lwb->lwb_vdev_lock);
for (i = 0; i < ndvas; i++) {
zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
if (avl_find(t, &zvsearch, &where) == NULL) {
zv = kmem_alloc(sizeof (*zv), KM_SLEEP);
zv->zv_vdev = zvsearch.zv_vdev;
avl_insert(t, zv, where);
}
}
mutex_exit(&lwb->lwb_vdev_lock);
}
static void
zil_lwb_flush_defer(lwb_t *lwb, lwb_t *nlwb)
{
avl_tree_t *src = &lwb->lwb_vdev_tree;
avl_tree_t *dst = &nlwb->lwb_vdev_tree;
void *cookie = NULL;
zil_vdev_node_t *zv;
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_WRITE_DONE);
ASSERT3S(nlwb->lwb_state, !=, LWB_STATE_WRITE_DONE);
ASSERT3S(nlwb->lwb_state, !=, LWB_STATE_FLUSH_DONE);
/*
* While 'lwb' is at a point in its lifetime where lwb_vdev_tree does
* not need the protection of lwb_vdev_lock (it will only be modified
* while holding zilog->zl_lock) as its writes and those of its
* children have all completed. The younger 'nlwb' may be waiting on
* future writes to additional vdevs.
*/
mutex_enter(&nlwb->lwb_vdev_lock);
/*
* Tear down the 'lwb' vdev tree, ensuring that entries which do not
* exist in 'nlwb' are moved to it, freeing any would-be duplicates.
*/
while ((zv = avl_destroy_nodes(src, &cookie)) != NULL) {
avl_index_t where;
if (avl_find(dst, zv, &where) == NULL) {
avl_insert(dst, zv, where);
} else {
kmem_free(zv, sizeof (*zv));
}
}
mutex_exit(&nlwb->lwb_vdev_lock);
}
void
zil_lwb_add_txg(lwb_t *lwb, uint64_t txg)
{
lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg);
}
/*
* This function is a called after all vdevs associated with a given lwb
* write have completed their DKIOCFLUSHWRITECACHE command; or as soon
* as the lwb write completes, if "zil_nocacheflush" is set. Further,
* all "previous" lwb's will have completed before this function is
* called; i.e. this function is called for all previous lwbs before
* it's called for "this" lwb (enforced via zio the dependencies
* configured in zil_lwb_set_zio_dependency()).
*
* The intention is for this function to be called as soon as the
* contents of an lwb are considered "stable" on disk, and will survive
* any sudden loss of power. At this point, any threads waiting for the
* lwb to reach this state are signalled, and the "waiter" structures
* are marked "done".
*/
static void
zil_lwb_flush_vdevs_done(zio_t *zio)
{
lwb_t *lwb = zio->io_private;
zilog_t *zilog = lwb->lwb_zilog;
dmu_tx_t *tx = lwb->lwb_tx;
zil_commit_waiter_t *zcw;
itx_t *itx;
spa_config_exit(zilog->zl_spa, SCL_STATE, lwb);
zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
mutex_enter(&zilog->zl_lock);
/*
* Ensure the lwb buffer pointer is cleared before releasing the
* txg. If we have had an allocation failure and the txg is
* waiting to sync then we want zil_sync() to remove the lwb so
* that it's not picked up as the next new one in
* zil_process_commit_list(). zil_sync() will only remove the
* lwb if lwb_buf is null.
*/
lwb->lwb_buf = NULL;
lwb->lwb_tx = NULL;
ASSERT3U(lwb->lwb_issued_timestamp, >, 0);
zilog->zl_last_lwb_latency = gethrtime() - lwb->lwb_issued_timestamp;
lwb->lwb_root_zio = NULL;
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_WRITE_DONE);
lwb->lwb_state = LWB_STATE_FLUSH_DONE;
if (zilog->zl_last_lwb_opened == lwb) {
/*
* Remember the highest committed log sequence number
* for ztest. We only update this value when all the log
* writes succeeded, because ztest wants to ASSERT that
* it got the whole log chain.
*/
zilog->zl_commit_lr_seq = zilog->zl_lr_seq;
}
while ((itx = list_head(&lwb->lwb_itxs)) != NULL) {
list_remove(&lwb->lwb_itxs, itx);
zil_itx_destroy(itx);
}
while ((zcw = list_head(&lwb->lwb_waiters)) != NULL) {
mutex_enter(&zcw->zcw_lock);
ASSERT(list_link_active(&zcw->zcw_node));
list_remove(&lwb->lwb_waiters, zcw);
ASSERT3P(zcw->zcw_lwb, ==, lwb);
zcw->zcw_lwb = NULL;
zcw->zcw_zio_error = zio->io_error;
ASSERT3B(zcw->zcw_done, ==, B_FALSE);
zcw->zcw_done = B_TRUE;
cv_broadcast(&zcw->zcw_cv);
mutex_exit(&zcw->zcw_lock);
}
mutex_exit(&zilog->zl_lock);
/*
* Now that we've written this log block, we have a stable pointer
* to the next block in the chain, so it's OK to let the txg in
* which we allocated the next block sync.
*/
dmu_tx_commit(tx);
}
/*
* This is called when an lwb's write zio completes. The callback's
* purpose is to issue the DKIOCFLUSHWRITECACHE commands for the vdevs
* in the lwb's lwb_vdev_tree. The tree will contain the vdevs involved
* in writing out this specific lwb's data, and in the case that cache
* flushes have been deferred, vdevs involved in writing the data for
* previous lwbs. The writes corresponding to all the vdevs in the
* lwb_vdev_tree will have completed by the time this is called, due to
* the zio dependencies configured in zil_lwb_set_zio_dependency(),
* which takes deferred flushes into account. The lwb will be "done"
* once zil_lwb_flush_vdevs_done() is called, which occurs in the zio
* completion callback for the lwb's root zio.
*/
static void
zil_lwb_write_done(zio_t *zio)
{
lwb_t *lwb = zio->io_private;
spa_t *spa = zio->io_spa;
zilog_t *zilog = lwb->lwb_zilog;
avl_tree_t *t = &lwb->lwb_vdev_tree;
void *cookie = NULL;
zil_vdev_node_t *zv;
lwb_t *nlwb;
ASSERT3S(spa_config_held(spa, SCL_STATE, RW_READER), !=, 0);
ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF);
ASSERT(BP_GET_TYPE(zio->io_bp) == DMU_OT_INTENT_LOG);
ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
ASSERT(BP_GET_BYTEORDER(zio->io_bp) == ZFS_HOST_BYTEORDER);
ASSERT(!BP_IS_GANG(zio->io_bp));
ASSERT(!BP_IS_HOLE(zio->io_bp));
ASSERT(BP_GET_FILL(zio->io_bp) == 0);
abd_free(zio->io_abd);
mutex_enter(&zilog->zl_lock);
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_ISSUED);
lwb->lwb_state = LWB_STATE_WRITE_DONE;
lwb->lwb_write_zio = NULL;
lwb->lwb_fastwrite = FALSE;
nlwb = list_next(&zilog->zl_lwb_list, lwb);
mutex_exit(&zilog->zl_lock);
if (avl_numnodes(t) == 0)
return;
/*
* If there was an IO error, we're not going to call zio_flush()
* on these vdevs, so we simply empty the tree and free the
* nodes. We avoid calling zio_flush() since there isn't any
* good reason for doing so, after the lwb block failed to be
* written out.
*/
if (zio->io_error != 0) {
while ((zv = avl_destroy_nodes(t, &cookie)) != NULL)
kmem_free(zv, sizeof (*zv));
return;
}
/*
* If this lwb does not have any threads waiting for it to
* complete, we want to defer issuing the DKIOCFLUSHWRITECACHE
* command to the vdevs written to by "this" lwb, and instead
* rely on the "next" lwb to handle the DKIOCFLUSHWRITECACHE
* command for those vdevs. Thus, we merge the vdev tree of
* "this" lwb with the vdev tree of the "next" lwb in the list,
* and assume the "next" lwb will handle flushing the vdevs (or
* deferring the flush(s) again).
*
* This is a useful performance optimization, especially for
* workloads with lots of async write activity and few sync
* write and/or fsync activity, as it has the potential to
* coalesce multiple flush commands to a vdev into one.
*/
if (list_head(&lwb->lwb_waiters) == NULL && nlwb != NULL) {
zil_lwb_flush_defer(lwb, nlwb);
ASSERT(avl_is_empty(&lwb->lwb_vdev_tree));
return;
}
while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) {
vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev);
if (vd != NULL)
zio_flush(lwb->lwb_root_zio, vd);
kmem_free(zv, sizeof (*zv));
}
}
static void
zil_lwb_set_zio_dependency(zilog_t *zilog, lwb_t *lwb)
{
lwb_t *last_lwb_opened = zilog->zl_last_lwb_opened;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT(MUTEX_HELD(&zilog->zl_lock));
/*
* The zilog's "zl_last_lwb_opened" field is used to build the
* lwb/zio dependency chain, which is used to preserve the
* ordering of lwb completions that is required by the semantics
* of the ZIL. Each new lwb zio becomes a parent of the
* "previous" lwb zio, such that the new lwb's zio cannot
* complete until the "previous" lwb's zio completes.
*
* This is required by the semantics of zil_commit(); the commit
* waiters attached to the lwbs will be woken in the lwb zio's
* completion callback, so this zio dependency graph ensures the
* waiters are woken in the correct order (the same order the
* lwbs were created).
*/
if (last_lwb_opened != NULL &&
last_lwb_opened->lwb_state != LWB_STATE_FLUSH_DONE) {
ASSERT(last_lwb_opened->lwb_state == LWB_STATE_OPENED ||
last_lwb_opened->lwb_state == LWB_STATE_ISSUED ||
last_lwb_opened->lwb_state == LWB_STATE_WRITE_DONE);
ASSERT3P(last_lwb_opened->lwb_root_zio, !=, NULL);
zio_add_child(lwb->lwb_root_zio,
last_lwb_opened->lwb_root_zio);
/*
* If the previous lwb's write hasn't already completed,
* we also want to order the completion of the lwb write
* zios (above, we only order the completion of the lwb
* root zios). This is required because of how we can
* defer the DKIOCFLUSHWRITECACHE commands for each lwb.
*
* When the DKIOCFLUSHWRITECACHE commands are deferred,
* the previous lwb will rely on this lwb to flush the
* vdevs written to by that previous lwb. Thus, we need
* to ensure this lwb doesn't issue the flush until
* after the previous lwb's write completes. We ensure
* this ordering by setting the zio parent/child
* relationship here.
*
* Without this relationship on the lwb's write zio,
* it's possible for this lwb's write to complete prior
* to the previous lwb's write completing; and thus, the
* vdevs for the previous lwb would be flushed prior to
* that lwb's data being written to those vdevs (the
* vdevs are flushed in the lwb write zio's completion
* handler, zil_lwb_write_done()).
*/
if (last_lwb_opened->lwb_state != LWB_STATE_WRITE_DONE) {
ASSERT(last_lwb_opened->lwb_state == LWB_STATE_OPENED ||
last_lwb_opened->lwb_state == LWB_STATE_ISSUED);
ASSERT3P(last_lwb_opened->lwb_write_zio, !=, NULL);
zio_add_child(lwb->lwb_write_zio,
last_lwb_opened->lwb_write_zio);
}
}
}
/*
* This function's purpose is to "open" an lwb such that it is ready to
* accept new itxs being committed to it. To do this, the lwb's zio
* structures are created, and linked to the lwb. This function is
* idempotent; if the passed in lwb has already been opened, this
* function is essentially a no-op.
*/
static void
zil_lwb_write_open(zilog_t *zilog, lwb_t *lwb)
{
zbookmark_phys_t zb;
zio_priority_t prio;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT3P(lwb, !=, NULL);
EQUIV(lwb->lwb_root_zio == NULL, lwb->lwb_state == LWB_STATE_CLOSED);
EQUIV(lwb->lwb_root_zio != NULL, lwb->lwb_state == LWB_STATE_OPENED);
SET_BOOKMARK(&zb, lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET],
ZB_ZIL_OBJECT, ZB_ZIL_LEVEL,
lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]);
/* Lock so zil_sync() doesn't fastwrite_unmark after zio is created */
mutex_enter(&zilog->zl_lock);
if (lwb->lwb_root_zio == NULL) {
abd_t *lwb_abd = abd_get_from_buf(lwb->lwb_buf,
BP_GET_LSIZE(&lwb->lwb_blk));
if (!lwb->lwb_fastwrite) {
metaslab_fastwrite_mark(zilog->zl_spa, &lwb->lwb_blk);
lwb->lwb_fastwrite = 1;
}
if (!lwb->lwb_slog || zilog->zl_cur_used <= zil_slog_bulk)
prio = ZIO_PRIORITY_SYNC_WRITE;
else
prio = ZIO_PRIORITY_ASYNC_WRITE;
lwb->lwb_root_zio = zio_root(zilog->zl_spa,
zil_lwb_flush_vdevs_done, lwb, ZIO_FLAG_CANFAIL);
ASSERT3P(lwb->lwb_root_zio, !=, NULL);
lwb->lwb_write_zio = zio_rewrite(lwb->lwb_root_zio,
zilog->zl_spa, 0, &lwb->lwb_blk, lwb_abd,
BP_GET_LSIZE(&lwb->lwb_blk), zil_lwb_write_done, lwb,
prio, ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE |
ZIO_FLAG_FASTWRITE, &zb);
ASSERT3P(lwb->lwb_write_zio, !=, NULL);
lwb->lwb_state = LWB_STATE_OPENED;
zil_lwb_set_zio_dependency(zilog, lwb);
zilog->zl_last_lwb_opened = lwb;
}
mutex_exit(&zilog->zl_lock);
ASSERT3P(lwb->lwb_root_zio, !=, NULL);
ASSERT3P(lwb->lwb_write_zio, !=, NULL);
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED);
}
/*
* Define a limited set of intent log block sizes.
*
* These must be a multiple of 4KB. Note only the amount used (again
* aligned to 4KB) actually gets written. However, we can't always just
* allocate SPA_OLD_MAXBLOCKSIZE as the slog space could be exhausted.
*/
struct {
uint64_t limit;
uint64_t blksz;
} zil_block_buckets[] = {
{ 4096, 4096 }, /* non TX_WRITE */
{ 8192 + 4096, 8192 + 4096 }, /* database */
{ 32768 + 4096, 32768 + 4096 }, /* NFS writes */
{ 65536 + 4096, 65536 + 4096 }, /* 64KB writes */
{ 131072, 131072 }, /* < 128KB writes */
{ 131072 +4096, 65536 + 4096 }, /* 128KB writes */
{ UINT64_MAX, SPA_OLD_MAXBLOCKSIZE}, /* > 128KB writes */
};
/*
* Maximum block size used by the ZIL. This is picked up when the ZIL is
* initialized. Otherwise this should not be used directly; see
* zl_max_block_size instead.
*/
int zil_maxblocksize = SPA_OLD_MAXBLOCKSIZE;
/*
* Start a log block write and advance to the next log block.
* Calls are serialized.
*/
static lwb_t *
zil_lwb_write_issue(zilog_t *zilog, lwb_t *lwb)
{
lwb_t *nlwb = NULL;
zil_chain_t *zilc;
spa_t *spa = zilog->zl_spa;
blkptr_t *bp;
dmu_tx_t *tx;
uint64_t txg;
uint64_t zil_blksz, wsz;
int i, error;
boolean_t slog;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT3P(lwb->lwb_root_zio, !=, NULL);
ASSERT3P(lwb->lwb_write_zio, !=, NULL);
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED);
if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) {
zilc = (zil_chain_t *)lwb->lwb_buf;
bp = &zilc->zc_next_blk;
} else {
zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_sz);
bp = &zilc->zc_next_blk;
}
ASSERT(lwb->lwb_nused <= lwb->lwb_sz);
/*
* Allocate the next block and save its address in this block
* before writing it in order to establish the log chain.
* Note that if the allocation of nlwb synced before we wrote
* the block that points at it (lwb), we'd leak it if we crashed.
* Therefore, we don't do dmu_tx_commit() until zil_lwb_write_done().
* We dirty the dataset to ensure that zil_sync() will be called
* to clean up in the event of allocation failure or I/O failure.
*/
tx = dmu_tx_create(zilog->zl_os);
/*
* Since we are not going to create any new dirty data, and we
* can even help with clearing the existing dirty data, we
* should not be subject to the dirty data based delays. We
* use TXG_NOTHROTTLE to bypass the delay mechanism.
*/
VERIFY0(dmu_tx_assign(tx, TXG_WAIT | TXG_NOTHROTTLE));
dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
txg = dmu_tx_get_txg(tx);
lwb->lwb_tx = tx;
/*
* Log blocks are pre-allocated. Here we select the size of the next
* block, based on size used in the last block.
* - first find the smallest bucket that will fit the block from a
* limited set of block sizes. This is because it's faster to write
* blocks allocated from the same metaslab as they are adjacent or
* close.
* - next find the maximum from the new suggested size and an array of
* previous sizes. This lessens a picket fence effect of wrongly
* guessing the size if we have a stream of say 2k, 64k, 2k, 64k
* requests.
*
* Note we only write what is used, but we can't just allocate
* the maximum block size because we can exhaust the available
* pool log space.
*/
zil_blksz = zilog->zl_cur_used + sizeof (zil_chain_t);
for (i = 0; zil_blksz > zil_block_buckets[i].limit; i++)
continue;
zil_blksz = MIN(zil_block_buckets[i].blksz, zilog->zl_max_block_size);
zilog->zl_prev_blks[zilog->zl_prev_rotor] = zil_blksz;
for (i = 0; i < ZIL_PREV_BLKS; i++)
zil_blksz = MAX(zil_blksz, zilog->zl_prev_blks[i]);
zilog->zl_prev_rotor = (zilog->zl_prev_rotor + 1) & (ZIL_PREV_BLKS - 1);
BP_ZERO(bp);
error = zio_alloc_zil(spa, zilog->zl_os, txg, bp, zil_blksz, &slog);
if (slog) {
ZIL_STAT_BUMP(zil_itx_metaslab_slog_count);
ZIL_STAT_INCR(zil_itx_metaslab_slog_bytes, lwb->lwb_nused);
} else {
ZIL_STAT_BUMP(zil_itx_metaslab_normal_count);
ZIL_STAT_INCR(zil_itx_metaslab_normal_bytes, lwb->lwb_nused);
}
if (error == 0) {
ASSERT3U(bp->blk_birth, ==, txg);
bp->blk_cksum = lwb->lwb_blk.blk_cksum;
bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++;
/*
* Allocate a new log write block (lwb).
*/
nlwb = zil_alloc_lwb(zilog, bp, slog, txg, TRUE);
}
if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) {
/* For Slim ZIL only write what is used. */
wsz = P2ROUNDUP_TYPED(lwb->lwb_nused, ZIL_MIN_BLKSZ, uint64_t);
ASSERT3U(wsz, <=, lwb->lwb_sz);
zio_shrink(lwb->lwb_write_zio, wsz);
} else {
wsz = lwb->lwb_sz;
}
zilc->zc_pad = 0;
zilc->zc_nused = lwb->lwb_nused;
zilc->zc_eck.zec_cksum = lwb->lwb_blk.blk_cksum;
/*
* clear unused data for security
*/
bzero(lwb->lwb_buf + lwb->lwb_nused, wsz - lwb->lwb_nused);
spa_config_enter(zilog->zl_spa, SCL_STATE, lwb, RW_READER);
zil_lwb_add_block(lwb, &lwb->lwb_blk);
lwb->lwb_issued_timestamp = gethrtime();
lwb->lwb_state = LWB_STATE_ISSUED;
zio_nowait(lwb->lwb_root_zio);
zio_nowait(lwb->lwb_write_zio);
/*
* If there was an allocation failure then nlwb will be null which
* forces a txg_wait_synced().
*/
return (nlwb);
}
/*
* Maximum amount of write data that can be put into single log block.
*/
uint64_t
zil_max_log_data(zilog_t *zilog)
{
return (zilog->zl_max_block_size -
sizeof (zil_chain_t) - sizeof (lr_write_t));
}
/*
* Maximum amount of log space we agree to waste to reduce number of
* WR_NEED_COPY chunks to reduce zl_get_data() overhead (~12%).
*/
static inline uint64_t
zil_max_waste_space(zilog_t *zilog)
{
return (zil_max_log_data(zilog) / 8);
}
/*
* Maximum amount of write data for WR_COPIED. For correctness, consumers
* must fall back to WR_NEED_COPY if we can't fit the entire record into one
* maximum sized log block, because each WR_COPIED record must fit in a
* single log block. For space efficiency, we want to fit two records into a
* max-sized log block.
*/
uint64_t
zil_max_copied_data(zilog_t *zilog)
{
return ((zilog->zl_max_block_size - sizeof (zil_chain_t)) / 2 -
sizeof (lr_write_t));
}
static lwb_t *
zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb)
{
lr_t *lrcb, *lrc;
lr_write_t *lrwb, *lrw;
char *lr_buf;
- uint64_t dlen, dnow, lwb_sp, reclen, txg, max_log_data;
+ uint64_t dlen, dnow, dpad, lwb_sp, reclen, txg, max_log_data;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT3P(lwb, !=, NULL);
ASSERT3P(lwb->lwb_buf, !=, NULL);
zil_lwb_write_open(zilog, lwb);
lrc = &itx->itx_lr;
lrw = (lr_write_t *)lrc;
/*
* A commit itx doesn't represent any on-disk state; instead
* it's simply used as a place holder on the commit list, and
* provides a mechanism for attaching a "commit waiter" onto the
* correct lwb (such that the waiter can be signalled upon
* completion of that lwb). Thus, we don't process this itx's
* log record if it's a commit itx (these itx's don't have log
* records), and instead link the itx's waiter onto the lwb's
* list of waiters.
*
* For more details, see the comment above zil_commit().
*/
if (lrc->lrc_txtype == TX_COMMIT) {
mutex_enter(&zilog->zl_lock);
zil_commit_waiter_link_lwb(itx->itx_private, lwb);
itx->itx_private = NULL;
mutex_exit(&zilog->zl_lock);
return (lwb);
}
if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) {
dlen = P2ROUNDUP_TYPED(
lrw->lr_length, sizeof (uint64_t), uint64_t);
+ dpad = dlen - lrw->lr_length;
} else {
- dlen = 0;
+ dlen = dpad = 0;
}
reclen = lrc->lrc_reclen;
zilog->zl_cur_used += (reclen + dlen);
txg = lrc->lrc_txg;
ASSERT3U(zilog->zl_cur_used, <, UINT64_MAX - (reclen + dlen));
cont:
/*
* If this record won't fit in the current log block, start a new one.
* For WR_NEED_COPY optimize layout for minimal number of chunks.
*/
lwb_sp = lwb->lwb_sz - lwb->lwb_nused;
max_log_data = zil_max_log_data(zilog);
if (reclen > lwb_sp || (reclen + dlen > lwb_sp &&
lwb_sp < zil_max_waste_space(zilog) &&
(dlen % max_log_data == 0 ||
lwb_sp < reclen + dlen % max_log_data))) {
lwb = zil_lwb_write_issue(zilog, lwb);
if (lwb == NULL)
return (NULL);
zil_lwb_write_open(zilog, lwb);
ASSERT(LWB_EMPTY(lwb));
lwb_sp = lwb->lwb_sz - lwb->lwb_nused;
/*
* There must be enough space in the new, empty log block to
* hold reclen. For WR_COPIED, we need to fit the whole
* record in one block, and reclen is the header size + the
* data size. For WR_NEED_COPY, we can create multiple
* records, splitting the data into multiple blocks, so we
* only need to fit one word of data per block; in this case
* reclen is just the header size (no data).
*/
ASSERT3U(reclen + MIN(dlen, sizeof (uint64_t)), <=, lwb_sp);
}
dnow = MIN(dlen, lwb_sp - reclen);
lr_buf = lwb->lwb_buf + lwb->lwb_nused;
bcopy(lrc, lr_buf, reclen);
lrcb = (lr_t *)lr_buf; /* Like lrc, but inside lwb. */
lrwb = (lr_write_t *)lrcb; /* Like lrw, but inside lwb. */
ZIL_STAT_BUMP(zil_itx_count);
/*
* If it's a write, fetch the data or get its blkptr as appropriate.
*/
if (lrc->lrc_txtype == TX_WRITE) {
if (txg > spa_freeze_txg(zilog->zl_spa))
txg_wait_synced(zilog->zl_dmu_pool, txg);
if (itx->itx_wr_state == WR_COPIED) {
ZIL_STAT_BUMP(zil_itx_copied_count);
ZIL_STAT_INCR(zil_itx_copied_bytes, lrw->lr_length);
} else {
char *dbuf;
int error;
if (itx->itx_wr_state == WR_NEED_COPY) {
dbuf = lr_buf + reclen;
lrcb->lrc_reclen += dnow;
if (lrwb->lr_length > dnow)
lrwb->lr_length = dnow;
lrw->lr_offset += dnow;
lrw->lr_length -= dnow;
ZIL_STAT_BUMP(zil_itx_needcopy_count);
ZIL_STAT_INCR(zil_itx_needcopy_bytes, dnow);
} else {
ASSERT3S(itx->itx_wr_state, ==, WR_INDIRECT);
dbuf = NULL;
ZIL_STAT_BUMP(zil_itx_indirect_count);
ZIL_STAT_INCR(zil_itx_indirect_bytes,
lrw->lr_length);
}
/*
* We pass in the "lwb_write_zio" rather than
* "lwb_root_zio" so that the "lwb_write_zio"
* becomes the parent of any zio's created by
* the "zl_get_data" callback. The vdevs are
* flushed after the "lwb_write_zio" completes,
* so we want to make sure that completion
* callback waits for these additional zio's,
* such that the vdevs used by those zio's will
* be included in the lwb's vdev tree, and those
* vdevs will be properly flushed. If we passed
* in "lwb_root_zio" here, then these additional
* vdevs may not be flushed; e.g. if these zio's
* completed after "lwb_write_zio" completed.
*/
error = zilog->zl_get_data(itx->itx_private,
itx->itx_gen, lrwb, dbuf, lwb,
lwb->lwb_write_zio);
+ if (dbuf != NULL && error == 0 && dnow == dlen)
+ /* Zero any padding bytes in the last block. */
+ bzero((char *)dbuf + lrwb->lr_length, dpad);
if (error == EIO) {
txg_wait_synced(zilog->zl_dmu_pool, txg);
return (lwb);
}
if (error != 0) {
ASSERT(error == ENOENT || error == EEXIST ||
error == EALREADY);
return (lwb);
}
}
}
/*
* We're actually making an entry, so update lrc_seq to be the
* log record sequence number. Note that this is generally not
* equal to the itx sequence number because not all transactions
* are synchronous, and sometimes spa_sync() gets there first.
*/
lrcb->lrc_seq = ++zilog->zl_lr_seq;
lwb->lwb_nused += reclen + dnow;
zil_lwb_add_txg(lwb, txg);
ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_sz);
ASSERT0(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)));
dlen -= dnow;
if (dlen > 0) {
zilog->zl_cur_used += reclen;
goto cont;
}
return (lwb);
}
itx_t *
-zil_itx_create(uint64_t txtype, size_t lrsize)
+zil_itx_create(uint64_t txtype, size_t olrsize)
{
- size_t itxsize;
+ size_t itxsize, lrsize;
itx_t *itx;
- lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t);
+ lrsize = P2ROUNDUP_TYPED(olrsize, sizeof (uint64_t), size_t);
itxsize = offsetof(itx_t, itx_lr) + lrsize;
itx = zio_data_buf_alloc(itxsize);
itx->itx_lr.lrc_txtype = txtype;
itx->itx_lr.lrc_reclen = lrsize;
itx->itx_lr.lrc_seq = 0; /* defensive */
+ bzero((char *)&itx->itx_lr + olrsize, lrsize - olrsize);
itx->itx_sync = B_TRUE; /* default is synchronous */
itx->itx_callback = NULL;
itx->itx_callback_data = NULL;
itx->itx_size = itxsize;
return (itx);
}
void
zil_itx_destroy(itx_t *itx)
{
IMPLY(itx->itx_lr.lrc_txtype == TX_COMMIT, itx->itx_callback == NULL);
IMPLY(itx->itx_callback != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT);
if (itx->itx_callback != NULL)
itx->itx_callback(itx->itx_callback_data);
zio_data_buf_free(itx, itx->itx_size);
}
/*
* Free up the sync and async itxs. The itxs_t has already been detached
* so no locks are needed.
*/
static void
zil_itxg_clean(void *arg)
{
itx_t *itx;
list_t *list;
avl_tree_t *t;
void *cookie;
itxs_t *itxs = arg;
itx_async_node_t *ian;
list = &itxs->i_sync_list;
while ((itx = list_head(list)) != NULL) {
/*
* In the general case, commit itxs will not be found
* here, as they'll be committed to an lwb via
* zil_lwb_commit(), and free'd in that function. Having
* said that, it is still possible for commit itxs to be
* found here, due to the following race:
*
* - a thread calls zil_commit() which assigns the
* commit itx to a per-txg i_sync_list
* - zil_itxg_clean() is called (e.g. via spa_sync())
* while the waiter is still on the i_sync_list
*
* There's nothing to prevent syncing the txg while the
* waiter is on the i_sync_list. This normally doesn't
* happen because spa_sync() is slower than zil_commit(),
* but if zil_commit() calls txg_wait_synced() (e.g.
* because zil_create() or zil_commit_writer_stall() is
* called) we will hit this case.
*/
if (itx->itx_lr.lrc_txtype == TX_COMMIT)
zil_commit_waiter_skip(itx->itx_private);
list_remove(list, itx);
zil_itx_destroy(itx);
}
cookie = NULL;
t = &itxs->i_async_tree;
while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) {
list = &ian->ia_list;
while ((itx = list_head(list)) != NULL) {
list_remove(list, itx);
/* commit itxs should never be on the async lists. */
ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT);
zil_itx_destroy(itx);
}
list_destroy(list);
kmem_free(ian, sizeof (itx_async_node_t));
}
avl_destroy(t);
kmem_free(itxs, sizeof (itxs_t));
}
static int
zil_aitx_compare(const void *x1, const void *x2)
{
const uint64_t o1 = ((itx_async_node_t *)x1)->ia_foid;
const uint64_t o2 = ((itx_async_node_t *)x2)->ia_foid;
return (TREE_CMP(o1, o2));
}
/*
* Remove all async itx with the given oid.
*/
void
zil_remove_async(zilog_t *zilog, uint64_t oid)
{
uint64_t otxg, txg;
itx_async_node_t *ian;
avl_tree_t *t;
avl_index_t where;
list_t clean_list;
itx_t *itx;
ASSERT(oid != 0);
list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node));
if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
otxg = ZILTEST_TXG;
else
otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
mutex_enter(&itxg->itxg_lock);
if (itxg->itxg_txg != txg) {
mutex_exit(&itxg->itxg_lock);
continue;
}
/*
* Locate the object node and append its list.
*/
t = &itxg->itxg_itxs->i_async_tree;
ian = avl_find(t, &oid, &where);
if (ian != NULL)
list_move_tail(&clean_list, &ian->ia_list);
mutex_exit(&itxg->itxg_lock);
}
while ((itx = list_head(&clean_list)) != NULL) {
list_remove(&clean_list, itx);
/* commit itxs should never be on the async lists. */
ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT);
zil_itx_destroy(itx);
}
list_destroy(&clean_list);
}
void
zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx)
{
uint64_t txg;
itxg_t *itxg;
itxs_t *itxs, *clean = NULL;
/*
* Ensure the data of a renamed file is committed before the rename.
*/
if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_RENAME)
zil_async_to_sync(zilog, itx->itx_oid);
if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX)
txg = ZILTEST_TXG;
else
txg = dmu_tx_get_txg(tx);
itxg = &zilog->zl_itxg[txg & TXG_MASK];
mutex_enter(&itxg->itxg_lock);
itxs = itxg->itxg_itxs;
if (itxg->itxg_txg != txg) {
if (itxs != NULL) {
/*
* The zil_clean callback hasn't got around to cleaning
* this itxg. Save the itxs for release below.
* This should be rare.
*/
zfs_dbgmsg("zil_itx_assign: missed itx cleanup for "
"txg %llu", (u_longlong_t)itxg->itxg_txg);
clean = itxg->itxg_itxs;
}
itxg->itxg_txg = txg;
itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t),
KM_SLEEP);
list_create(&itxs->i_sync_list, sizeof (itx_t),
offsetof(itx_t, itx_node));
avl_create(&itxs->i_async_tree, zil_aitx_compare,
sizeof (itx_async_node_t),
offsetof(itx_async_node_t, ia_node));
}
if (itx->itx_sync) {
list_insert_tail(&itxs->i_sync_list, itx);
} else {
avl_tree_t *t = &itxs->i_async_tree;
uint64_t foid =
LR_FOID_GET_OBJ(((lr_ooo_t *)&itx->itx_lr)->lr_foid);
itx_async_node_t *ian;
avl_index_t where;
ian = avl_find(t, &foid, &where);
if (ian == NULL) {
ian = kmem_alloc(sizeof (itx_async_node_t),
KM_SLEEP);
list_create(&ian->ia_list, sizeof (itx_t),
offsetof(itx_t, itx_node));
ian->ia_foid = foid;
avl_insert(t, ian, where);
}
list_insert_tail(&ian->ia_list, itx);
}
itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx);
/*
* We don't want to dirty the ZIL using ZILTEST_TXG, because
* zil_clean() will never be called using ZILTEST_TXG. Thus, we
* need to be careful to always dirty the ZIL using the "real"
* TXG (not itxg_txg) even when the SPA is frozen.
*/
zilog_dirty(zilog, dmu_tx_get_txg(tx));
mutex_exit(&itxg->itxg_lock);
/* Release the old itxs now we've dropped the lock */
if (clean != NULL)
zil_itxg_clean(clean);
}
/*
* If there are any in-memory intent log transactions which have now been
* synced then start up a taskq to free them. We should only do this after we
* have written out the uberblocks (i.e. txg has been committed) so that
* don't inadvertently clean out in-memory log records that would be required
* by zil_commit().
*/
void
zil_clean(zilog_t *zilog, uint64_t synced_txg)
{
itxg_t *itxg = &zilog->zl_itxg[synced_txg & TXG_MASK];
itxs_t *clean_me;
ASSERT3U(synced_txg, <, ZILTEST_TXG);
mutex_enter(&itxg->itxg_lock);
if (itxg->itxg_itxs == NULL || itxg->itxg_txg == ZILTEST_TXG) {
mutex_exit(&itxg->itxg_lock);
return;
}
ASSERT3U(itxg->itxg_txg, <=, synced_txg);
ASSERT3U(itxg->itxg_txg, !=, 0);
clean_me = itxg->itxg_itxs;
itxg->itxg_itxs = NULL;
itxg->itxg_txg = 0;
mutex_exit(&itxg->itxg_lock);
/*
* Preferably start a task queue to free up the old itxs but
* if taskq_dispatch can't allocate resources to do that then
* free it in-line. This should be rare. Note, using TQ_SLEEP
* created a bad performance problem.
*/
ASSERT3P(zilog->zl_dmu_pool, !=, NULL);
ASSERT3P(zilog->zl_dmu_pool->dp_zil_clean_taskq, !=, NULL);
taskqid_t id = taskq_dispatch(zilog->zl_dmu_pool->dp_zil_clean_taskq,
zil_itxg_clean, clean_me, TQ_NOSLEEP);
if (id == TASKQID_INVALID)
zil_itxg_clean(clean_me);
}
/*
* This function will traverse the queue of itxs that need to be
* committed, and move them onto the ZIL's zl_itx_commit_list.
*/
static void
zil_get_commit_list(zilog_t *zilog)
{
uint64_t otxg, txg;
list_t *commit_list = &zilog->zl_itx_commit_list;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
otxg = ZILTEST_TXG;
else
otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
/*
* This is inherently racy, since there is nothing to prevent
* the last synced txg from changing. That's okay since we'll
* only commit things in the future.
*/
for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
mutex_enter(&itxg->itxg_lock);
if (itxg->itxg_txg != txg) {
mutex_exit(&itxg->itxg_lock);
continue;
}
/*
* If we're adding itx records to the zl_itx_commit_list,
* then the zil better be dirty in this "txg". We can assert
* that here since we're holding the itxg_lock which will
* prevent spa_sync from cleaning it. Once we add the itxs
* to the zl_itx_commit_list we must commit it to disk even
* if it's unnecessary (i.e. the txg was synced).
*/
ASSERT(zilog_is_dirty_in_txg(zilog, txg) ||
spa_freeze_txg(zilog->zl_spa) != UINT64_MAX);
list_move_tail(commit_list, &itxg->itxg_itxs->i_sync_list);
mutex_exit(&itxg->itxg_lock);
}
}
/*
* Move the async itxs for a specified object to commit into sync lists.
*/
void
zil_async_to_sync(zilog_t *zilog, uint64_t foid)
{
uint64_t otxg, txg;
itx_async_node_t *ian;
avl_tree_t *t;
avl_index_t where;
if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
otxg = ZILTEST_TXG;
else
otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
/*
* This is inherently racy, since there is nothing to prevent
* the last synced txg from changing.
*/
for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
mutex_enter(&itxg->itxg_lock);
if (itxg->itxg_txg != txg) {
mutex_exit(&itxg->itxg_lock);
continue;
}
/*
* If a foid is specified then find that node and append its
* list. Otherwise walk the tree appending all the lists
* to the sync list. We add to the end rather than the
* beginning to ensure the create has happened.
*/
t = &itxg->itxg_itxs->i_async_tree;
if (foid != 0) {
ian = avl_find(t, &foid, &where);
if (ian != NULL) {
list_move_tail(&itxg->itxg_itxs->i_sync_list,
&ian->ia_list);
}
} else {
void *cookie = NULL;
while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) {
list_move_tail(&itxg->itxg_itxs->i_sync_list,
&ian->ia_list);
list_destroy(&ian->ia_list);
kmem_free(ian, sizeof (itx_async_node_t));
}
}
mutex_exit(&itxg->itxg_lock);
}
}
/*
* This function will prune commit itxs that are at the head of the
* commit list (it won't prune past the first non-commit itx), and
* either: a) attach them to the last lwb that's still pending
* completion, or b) skip them altogether.
*
* This is used as a performance optimization to prevent commit itxs
* from generating new lwbs when it's unnecessary to do so.
*/
static void
zil_prune_commit_list(zilog_t *zilog)
{
itx_t *itx;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
while ((itx = list_head(&zilog->zl_itx_commit_list)) != NULL) {
lr_t *lrc = &itx->itx_lr;
if (lrc->lrc_txtype != TX_COMMIT)
break;
mutex_enter(&zilog->zl_lock);
lwb_t *last_lwb = zilog->zl_last_lwb_opened;
if (last_lwb == NULL ||
last_lwb->lwb_state == LWB_STATE_FLUSH_DONE) {
/*
* All of the itxs this waiter was waiting on
* must have already completed (or there were
* never any itx's for it to wait on), so it's
* safe to skip this waiter and mark it done.
*/
zil_commit_waiter_skip(itx->itx_private);
} else {
zil_commit_waiter_link_lwb(itx->itx_private, last_lwb);
itx->itx_private = NULL;
}
mutex_exit(&zilog->zl_lock);
list_remove(&zilog->zl_itx_commit_list, itx);
zil_itx_destroy(itx);
}
IMPLY(itx != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT);
}
static void
zil_commit_writer_stall(zilog_t *zilog)
{
/*
* When zio_alloc_zil() fails to allocate the next lwb block on
* disk, we must call txg_wait_synced() to ensure all of the
* lwbs in the zilog's zl_lwb_list are synced and then freed (in
* zil_sync()), such that any subsequent ZIL writer (i.e. a call
* to zil_process_commit_list()) will have to call zil_create(),
* and start a new ZIL chain.
*
* Since zil_alloc_zil() failed, the lwb that was previously
* issued does not have a pointer to the "next" lwb on disk.
* Thus, if another ZIL writer thread was to allocate the "next"
* on-disk lwb, that block could be leaked in the event of a
* crash (because the previous lwb on-disk would not point to
* it).
*
* We must hold the zilog's zl_issuer_lock while we do this, to
* ensure no new threads enter zil_process_commit_list() until
* all lwb's in the zl_lwb_list have been synced and freed
* (which is achieved via the txg_wait_synced() call).
*/
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
txg_wait_synced(zilog->zl_dmu_pool, 0);
ASSERT3P(list_tail(&zilog->zl_lwb_list), ==, NULL);
}
/*
* This function will traverse the commit list, creating new lwbs as
* needed, and committing the itxs from the commit list to these newly
* created lwbs. Additionally, as a new lwb is created, the previous
* lwb will be issued to the zio layer to be written to disk.
*/
static void
zil_process_commit_list(zilog_t *zilog)
{
spa_t *spa = zilog->zl_spa;
list_t nolwb_itxs;
list_t nolwb_waiters;
lwb_t *lwb;
itx_t *itx;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
/*
* Return if there's nothing to commit before we dirty the fs by
* calling zil_create().
*/
if (list_head(&zilog->zl_itx_commit_list) == NULL)
return;
list_create(&nolwb_itxs, sizeof (itx_t), offsetof(itx_t, itx_node));
list_create(&nolwb_waiters, sizeof (zil_commit_waiter_t),
offsetof(zil_commit_waiter_t, zcw_node));
lwb = list_tail(&zilog->zl_lwb_list);
if (lwb == NULL) {
lwb = zil_create(zilog);
} else {
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED);
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_WRITE_DONE);
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_FLUSH_DONE);
}
while ((itx = list_head(&zilog->zl_itx_commit_list)) != NULL) {
lr_t *lrc = &itx->itx_lr;
uint64_t txg = lrc->lrc_txg;
ASSERT3U(txg, !=, 0);
if (lrc->lrc_txtype == TX_COMMIT) {
DTRACE_PROBE2(zil__process__commit__itx,
zilog_t *, zilog, itx_t *, itx);
} else {
DTRACE_PROBE2(zil__process__normal__itx,
zilog_t *, zilog, itx_t *, itx);
}
list_remove(&zilog->zl_itx_commit_list, itx);
boolean_t synced = txg <= spa_last_synced_txg(spa);
boolean_t frozen = txg > spa_freeze_txg(spa);
/*
* If the txg of this itx has already been synced out, then
* we don't need to commit this itx to an lwb. This is
* because the data of this itx will have already been
* written to the main pool. This is inherently racy, and
* it's still ok to commit an itx whose txg has already
* been synced; this will result in a write that's
* unnecessary, but will do no harm.
*
* With that said, we always want to commit TX_COMMIT itxs
* to an lwb, regardless of whether or not that itx's txg
* has been synced out. We do this to ensure any OPENED lwb
* will always have at least one zil_commit_waiter_t linked
* to the lwb.
*
* As a counter-example, if we skipped TX_COMMIT itx's
* whose txg had already been synced, the following
* situation could occur if we happened to be racing with
* spa_sync:
*
* 1. We commit a non-TX_COMMIT itx to an lwb, where the
* itx's txg is 10 and the last synced txg is 9.
* 2. spa_sync finishes syncing out txg 10.
* 3. We move to the next itx in the list, it's a TX_COMMIT
* whose txg is 10, so we skip it rather than committing
* it to the lwb used in (1).
*
* If the itx that is skipped in (3) is the last TX_COMMIT
* itx in the commit list, than it's possible for the lwb
* used in (1) to remain in the OPENED state indefinitely.
*
* To prevent the above scenario from occurring, ensuring
* that once an lwb is OPENED it will transition to ISSUED
* and eventually DONE, we always commit TX_COMMIT itx's to
* an lwb here, even if that itx's txg has already been
* synced.
*
* Finally, if the pool is frozen, we _always_ commit the
* itx. The point of freezing the pool is to prevent data
* from being written to the main pool via spa_sync, and
* instead rely solely on the ZIL to persistently store the
* data; i.e. when the pool is frozen, the last synced txg
* value can't be trusted.
*/
if (frozen || !synced || lrc->lrc_txtype == TX_COMMIT) {
if (lwb != NULL) {
lwb = zil_lwb_commit(zilog, itx, lwb);
if (lwb == NULL)
list_insert_tail(&nolwb_itxs, itx);
else
list_insert_tail(&lwb->lwb_itxs, itx);
} else {
if (lrc->lrc_txtype == TX_COMMIT) {
zil_commit_waiter_link_nolwb(
itx->itx_private, &nolwb_waiters);
}
list_insert_tail(&nolwb_itxs, itx);
}
} else {
ASSERT3S(lrc->lrc_txtype, !=, TX_COMMIT);
zil_itx_destroy(itx);
}
}
if (lwb == NULL) {
/*
* This indicates zio_alloc_zil() failed to allocate the
* "next" lwb on-disk. When this happens, we must stall
* the ZIL write pipeline; see the comment within
* zil_commit_writer_stall() for more details.
*/
zil_commit_writer_stall(zilog);
/*
* Additionally, we have to signal and mark the "nolwb"
* waiters as "done" here, since without an lwb, we
* can't do this via zil_lwb_flush_vdevs_done() like
* normal.
*/
zil_commit_waiter_t *zcw;
while ((zcw = list_head(&nolwb_waiters)) != NULL) {
zil_commit_waiter_skip(zcw);
list_remove(&nolwb_waiters, zcw);
}
/*
* And finally, we have to destroy the itx's that
* couldn't be committed to an lwb; this will also call
* the itx's callback if one exists for the itx.
*/
while ((itx = list_head(&nolwb_itxs)) != NULL) {
list_remove(&nolwb_itxs, itx);
zil_itx_destroy(itx);
}
} else {
ASSERT(list_is_empty(&nolwb_waiters));
ASSERT3P(lwb, !=, NULL);
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED);
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_WRITE_DONE);
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_FLUSH_DONE);
/*
* At this point, the ZIL block pointed at by the "lwb"
* variable is in one of the following states: "closed"
* or "open".
*
* If it's "closed", then no itxs have been committed to
* it, so there's no point in issuing its zio (i.e. it's
* "empty").
*
* If it's "open", then it contains one or more itxs that
* eventually need to be committed to stable storage. In
* this case we intentionally do not issue the lwb's zio
* to disk yet, and instead rely on one of the following
* two mechanisms for issuing the zio:
*
* 1. Ideally, there will be more ZIL activity occurring
* on the system, such that this function will be
* immediately called again (not necessarily by the same
* thread) and this lwb's zio will be issued via
* zil_lwb_commit(). This way, the lwb is guaranteed to
* be "full" when it is issued to disk, and we'll make
* use of the lwb's size the best we can.
*
* 2. If there isn't sufficient ZIL activity occurring on
* the system, such that this lwb's zio isn't issued via
* zil_lwb_commit(), zil_commit_waiter() will issue the
* lwb's zio. If this occurs, the lwb is not guaranteed
* to be "full" by the time its zio is issued, and means
* the size of the lwb was "too large" given the amount
* of ZIL activity occurring on the system at that time.
*
* We do this for a couple of reasons:
*
* 1. To try and reduce the number of IOPs needed to
* write the same number of itxs. If an lwb has space
* available in its buffer for more itxs, and more itxs
* will be committed relatively soon (relative to the
* latency of performing a write), then it's beneficial
* to wait for these "next" itxs. This way, more itxs
* can be committed to stable storage with fewer writes.
*
* 2. To try and use the largest lwb block size that the
* incoming rate of itxs can support. Again, this is to
* try and pack as many itxs into as few lwbs as
* possible, without significantly impacting the latency
* of each individual itx.
*/
}
}
/*
* This function is responsible for ensuring the passed in commit waiter
* (and associated commit itx) is committed to an lwb. If the waiter is
* not already committed to an lwb, all itxs in the zilog's queue of
* itxs will be processed. The assumption is the passed in waiter's
* commit itx will found in the queue just like the other non-commit
* itxs, such that when the entire queue is processed, the waiter will
* have been committed to an lwb.
*
* The lwb associated with the passed in waiter is not guaranteed to
* have been issued by the time this function completes. If the lwb is
* not issued, we rely on future calls to zil_commit_writer() to issue
* the lwb, or the timeout mechanism found in zil_commit_waiter().
*/
static void
zil_commit_writer(zilog_t *zilog, zil_commit_waiter_t *zcw)
{
ASSERT(!MUTEX_HELD(&zilog->zl_lock));
ASSERT(spa_writeable(zilog->zl_spa));
mutex_enter(&zilog->zl_issuer_lock);
if (zcw->zcw_lwb != NULL || zcw->zcw_done) {
/*
* It's possible that, while we were waiting to acquire
* the "zl_issuer_lock", another thread committed this
* waiter to an lwb. If that occurs, we bail out early,
* without processing any of the zilog's queue of itxs.
*
* On certain workloads and system configurations, the
* "zl_issuer_lock" can become highly contended. In an
* attempt to reduce this contention, we immediately drop
* the lock if the waiter has already been processed.
*
* We've measured this optimization to reduce CPU spent
* contending on this lock by up to 5%, using a system
* with 32 CPUs, low latency storage (~50 usec writes),
* and 1024 threads performing sync writes.
*/
goto out;
}
ZIL_STAT_BUMP(zil_commit_writer_count);
zil_get_commit_list(zilog);
zil_prune_commit_list(zilog);
zil_process_commit_list(zilog);
out:
mutex_exit(&zilog->zl_issuer_lock);
}
static void
zil_commit_waiter_timeout(zilog_t *zilog, zil_commit_waiter_t *zcw)
{
ASSERT(!MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT(MUTEX_HELD(&zcw->zcw_lock));
ASSERT3B(zcw->zcw_done, ==, B_FALSE);
lwb_t *lwb = zcw->zcw_lwb;
ASSERT3P(lwb, !=, NULL);
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_CLOSED);
/*
* If the lwb has already been issued by another thread, we can
* immediately return since there's no work to be done (the
* point of this function is to issue the lwb). Additionally, we
* do this prior to acquiring the zl_issuer_lock, to avoid
* acquiring it when it's not necessary to do so.
*/
if (lwb->lwb_state == LWB_STATE_ISSUED ||
lwb->lwb_state == LWB_STATE_WRITE_DONE ||
lwb->lwb_state == LWB_STATE_FLUSH_DONE)
return;
/*
* In order to call zil_lwb_write_issue() we must hold the
* zilog's "zl_issuer_lock". We can't simply acquire that lock,
* since we're already holding the commit waiter's "zcw_lock",
* and those two locks are acquired in the opposite order
* elsewhere.
*/
mutex_exit(&zcw->zcw_lock);
mutex_enter(&zilog->zl_issuer_lock);
mutex_enter(&zcw->zcw_lock);
/*
* Since we just dropped and re-acquired the commit waiter's
* lock, we have to re-check to see if the waiter was marked
* "done" during that process. If the waiter was marked "done",
* the "lwb" pointer is no longer valid (it can be free'd after
* the waiter is marked "done"), so without this check we could
* wind up with a use-after-free error below.
*/
if (zcw->zcw_done)
goto out;
ASSERT3P(lwb, ==, zcw->zcw_lwb);
/*
* We've already checked this above, but since we hadn't acquired
* the zilog's zl_issuer_lock, we have to perform this check a
* second time while holding the lock.
*
* We don't need to hold the zl_lock since the lwb cannot transition
* from OPENED to ISSUED while we hold the zl_issuer_lock. The lwb
* _can_ transition from ISSUED to DONE, but it's OK to race with
* that transition since we treat the lwb the same, whether it's in
* the ISSUED or DONE states.
*
* The important thing, is we treat the lwb differently depending on
* if it's ISSUED or OPENED, and block any other threads that might
* attempt to issue this lwb. For that reason we hold the
* zl_issuer_lock when checking the lwb_state; we must not call
* zil_lwb_write_issue() if the lwb had already been issued.
*
* See the comment above the lwb_state_t structure definition for
* more details on the lwb states, and locking requirements.
*/
if (lwb->lwb_state == LWB_STATE_ISSUED ||
lwb->lwb_state == LWB_STATE_WRITE_DONE ||
lwb->lwb_state == LWB_STATE_FLUSH_DONE)
goto out;
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED);
/*
* As described in the comments above zil_commit_waiter() and
* zil_process_commit_list(), we need to issue this lwb's zio
* since we've reached the commit waiter's timeout and it still
* hasn't been issued.
*/
lwb_t *nlwb = zil_lwb_write_issue(zilog, lwb);
IMPLY(nlwb != NULL, lwb->lwb_state != LWB_STATE_OPENED);
/*
* Since the lwb's zio hadn't been issued by the time this thread
* reached its timeout, we reset the zilog's "zl_cur_used" field
* to influence the zil block size selection algorithm.
*
* By having to issue the lwb's zio here, it means the size of the
* lwb was too large, given the incoming throughput of itxs. By
* setting "zl_cur_used" to zero, we communicate this fact to the
* block size selection algorithm, so it can take this information
* into account, and potentially select a smaller size for the
* next lwb block that is allocated.
*/
zilog->zl_cur_used = 0;
if (nlwb == NULL) {
/*
* When zil_lwb_write_issue() returns NULL, this
* indicates zio_alloc_zil() failed to allocate the
* "next" lwb on-disk. When this occurs, the ZIL write
* pipeline must be stalled; see the comment within the
* zil_commit_writer_stall() function for more details.
*
* We must drop the commit waiter's lock prior to
* calling zil_commit_writer_stall() or else we can wind
* up with the following deadlock:
*
* - This thread is waiting for the txg to sync while
* holding the waiter's lock; txg_wait_synced() is
* used within txg_commit_writer_stall().
*
* - The txg can't sync because it is waiting for this
* lwb's zio callback to call dmu_tx_commit().
*
* - The lwb's zio callback can't call dmu_tx_commit()
* because it's blocked trying to acquire the waiter's
* lock, which occurs prior to calling dmu_tx_commit()
*/
mutex_exit(&zcw->zcw_lock);
zil_commit_writer_stall(zilog);
mutex_enter(&zcw->zcw_lock);
}
out:
mutex_exit(&zilog->zl_issuer_lock);
ASSERT(MUTEX_HELD(&zcw->zcw_lock));
}
/*
* This function is responsible for performing the following two tasks:
*
* 1. its primary responsibility is to block until the given "commit
* waiter" is considered "done".
*
* 2. its secondary responsibility is to issue the zio for the lwb that
* the given "commit waiter" is waiting on, if this function has
* waited "long enough" and the lwb is still in the "open" state.
*
* Given a sufficient amount of itxs being generated and written using
* the ZIL, the lwb's zio will be issued via the zil_lwb_commit()
* function. If this does not occur, this secondary responsibility will
* ensure the lwb is issued even if there is not other synchronous
* activity on the system.
*
* For more details, see zil_process_commit_list(); more specifically,
* the comment at the bottom of that function.
*/
static void
zil_commit_waiter(zilog_t *zilog, zil_commit_waiter_t *zcw)
{
ASSERT(!MUTEX_HELD(&zilog->zl_lock));
ASSERT(!MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT(spa_writeable(zilog->zl_spa));
mutex_enter(&zcw->zcw_lock);
/*
* The timeout is scaled based on the lwb latency to avoid
* significantly impacting the latency of each individual itx.
* For more details, see the comment at the bottom of the
* zil_process_commit_list() function.
*/
int pct = MAX(zfs_commit_timeout_pct, 1);
hrtime_t sleep = (zilog->zl_last_lwb_latency * pct) / 100;
hrtime_t wakeup = gethrtime() + sleep;
boolean_t timedout = B_FALSE;
while (!zcw->zcw_done) {
ASSERT(MUTEX_HELD(&zcw->zcw_lock));
lwb_t *lwb = zcw->zcw_lwb;
/*
* Usually, the waiter will have a non-NULL lwb field here,
* but it's possible for it to be NULL as a result of
* zil_commit() racing with spa_sync().
*
* When zil_clean() is called, it's possible for the itxg
* list (which may be cleaned via a taskq) to contain
* commit itxs. When this occurs, the commit waiters linked
* off of these commit itxs will not be committed to an
* lwb. Additionally, these commit waiters will not be
* marked done until zil_commit_waiter_skip() is called via
* zil_itxg_clean().
*
* Thus, it's possible for this commit waiter (i.e. the
* "zcw" variable) to be found in this "in between" state;
* where it's "zcw_lwb" field is NULL, and it hasn't yet
* been skipped, so it's "zcw_done" field is still B_FALSE.
*/
IMPLY(lwb != NULL, lwb->lwb_state != LWB_STATE_CLOSED);
if (lwb != NULL && lwb->lwb_state == LWB_STATE_OPENED) {
ASSERT3B(timedout, ==, B_FALSE);
/*
* If the lwb hasn't been issued yet, then we
* need to wait with a timeout, in case this
* function needs to issue the lwb after the
* timeout is reached; responsibility (2) from
* the comment above this function.
*/
int rc = cv_timedwait_hires(&zcw->zcw_cv,
&zcw->zcw_lock, wakeup, USEC2NSEC(1),
CALLOUT_FLAG_ABSOLUTE);
if (rc != -1 || zcw->zcw_done)
continue;
timedout = B_TRUE;
zil_commit_waiter_timeout(zilog, zcw);
if (!zcw->zcw_done) {
/*
* If the commit waiter has already been
* marked "done", it's possible for the
* waiter's lwb structure to have already
* been freed. Thus, we can only reliably
* make these assertions if the waiter
* isn't done.
*/
ASSERT3P(lwb, ==, zcw->zcw_lwb);
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_OPENED);
}
} else {
/*
* If the lwb isn't open, then it must have already
* been issued. In that case, there's no need to
* use a timeout when waiting for the lwb to
* complete.
*
* Additionally, if the lwb is NULL, the waiter
* will soon be signaled and marked done via
* zil_clean() and zil_itxg_clean(), so no timeout
* is required.
*/
IMPLY(lwb != NULL,
lwb->lwb_state == LWB_STATE_ISSUED ||
lwb->lwb_state == LWB_STATE_WRITE_DONE ||
lwb->lwb_state == LWB_STATE_FLUSH_DONE);
cv_wait(&zcw->zcw_cv, &zcw->zcw_lock);
}
}
mutex_exit(&zcw->zcw_lock);
}
static zil_commit_waiter_t *
zil_alloc_commit_waiter(void)
{
zil_commit_waiter_t *zcw = kmem_cache_alloc(zil_zcw_cache, KM_SLEEP);
cv_init(&zcw->zcw_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&zcw->zcw_lock, NULL, MUTEX_DEFAULT, NULL);
list_link_init(&zcw->zcw_node);
zcw->zcw_lwb = NULL;
zcw->zcw_done = B_FALSE;
zcw->zcw_zio_error = 0;
return (zcw);
}
static void
zil_free_commit_waiter(zil_commit_waiter_t *zcw)
{
ASSERT(!list_link_active(&zcw->zcw_node));
ASSERT3P(zcw->zcw_lwb, ==, NULL);
ASSERT3B(zcw->zcw_done, ==, B_TRUE);
mutex_destroy(&zcw->zcw_lock);
cv_destroy(&zcw->zcw_cv);
kmem_cache_free(zil_zcw_cache, zcw);
}
/*
* This function is used to create a TX_COMMIT itx and assign it. This
* way, it will be linked into the ZIL's list of synchronous itxs, and
* then later committed to an lwb (or skipped) when
* zil_process_commit_list() is called.
*/
static void
zil_commit_itx_assign(zilog_t *zilog, zil_commit_waiter_t *zcw)
{
dmu_tx_t *tx = dmu_tx_create(zilog->zl_os);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
itx_t *itx = zil_itx_create(TX_COMMIT, sizeof (lr_t));
itx->itx_sync = B_TRUE;
itx->itx_private = zcw;
zil_itx_assign(zilog, itx, tx);
dmu_tx_commit(tx);
}
/*
* Commit ZFS Intent Log transactions (itxs) to stable storage.
*
* When writing ZIL transactions to the on-disk representation of the
* ZIL, the itxs are committed to a Log Write Block (lwb). Multiple
* itxs can be committed to a single lwb. Once a lwb is written and
* committed to stable storage (i.e. the lwb is written, and vdevs have
* been flushed), each itx that was committed to that lwb is also
* considered to be committed to stable storage.
*
* When an itx is committed to an lwb, the log record (lr_t) contained
* by the itx is copied into the lwb's zio buffer, and once this buffer
* is written to disk, it becomes an on-disk ZIL block.
*
* As itxs are generated, they're inserted into the ZIL's queue of
* uncommitted itxs. The semantics of zil_commit() are such that it will
* block until all itxs that were in the queue when it was called, are
* committed to stable storage.
*
* If "foid" is zero, this means all "synchronous" and "asynchronous"
* itxs, for all objects in the dataset, will be committed to stable
* storage prior to zil_commit() returning. If "foid" is non-zero, all
* "synchronous" itxs for all objects, but only "asynchronous" itxs
* that correspond to the foid passed in, will be committed to stable
* storage prior to zil_commit() returning.
*
* Generally speaking, when zil_commit() is called, the consumer doesn't
* actually care about _all_ of the uncommitted itxs. Instead, they're
* simply trying to waiting for a specific itx to be committed to disk,
* but the interface(s) for interacting with the ZIL don't allow such
* fine-grained communication. A better interface would allow a consumer
* to create and assign an itx, and then pass a reference to this itx to
* zil_commit(); such that zil_commit() would return as soon as that
* specific itx was committed to disk (instead of waiting for _all_
* itxs to be committed).
*
* When a thread calls zil_commit() a special "commit itx" will be
* generated, along with a corresponding "waiter" for this commit itx.
* zil_commit() will wait on this waiter's CV, such that when the waiter
* is marked done, and signaled, zil_commit() will return.
*
* This commit itx is inserted into the queue of uncommitted itxs. This
* provides an easy mechanism for determining which itxs were in the
* queue prior to zil_commit() having been called, and which itxs were
* added after zil_commit() was called.
*
* The commit it is special; it doesn't have any on-disk representation.
* When a commit itx is "committed" to an lwb, the waiter associated
* with it is linked onto the lwb's list of waiters. Then, when that lwb
* completes, each waiter on the lwb's list is marked done and signaled
* -- allowing the thread waiting on the waiter to return from zil_commit().
*
* It's important to point out a few critical factors that allow us
* to make use of the commit itxs, commit waiters, per-lwb lists of
* commit waiters, and zio completion callbacks like we're doing:
*
* 1. The list of waiters for each lwb is traversed, and each commit
* waiter is marked "done" and signaled, in the zio completion
* callback of the lwb's zio[*].
*
* * Actually, the waiters are signaled in the zio completion
* callback of the root zio for the DKIOCFLUSHWRITECACHE commands
* that are sent to the vdevs upon completion of the lwb zio.
*
* 2. When the itxs are inserted into the ZIL's queue of uncommitted
* itxs, the order in which they are inserted is preserved[*]; as
* itxs are added to the queue, they are added to the tail of
* in-memory linked lists.
*
* When committing the itxs to lwbs (to be written to disk), they
* are committed in the same order in which the itxs were added to
* the uncommitted queue's linked list(s); i.e. the linked list of
* itxs to commit is traversed from head to tail, and each itx is
* committed to an lwb in that order.
*
* * To clarify:
*
* - the order of "sync" itxs is preserved w.r.t. other
* "sync" itxs, regardless of the corresponding objects.
* - the order of "async" itxs is preserved w.r.t. other
* "async" itxs corresponding to the same object.
* - the order of "async" itxs is *not* preserved w.r.t. other
* "async" itxs corresponding to different objects.
* - the order of "sync" itxs w.r.t. "async" itxs (or vice
* versa) is *not* preserved, even for itxs that correspond
* to the same object.
*
* For more details, see: zil_itx_assign(), zil_async_to_sync(),
* zil_get_commit_list(), and zil_process_commit_list().
*
* 3. The lwbs represent a linked list of blocks on disk. Thus, any
* lwb cannot be considered committed to stable storage, until its
* "previous" lwb is also committed to stable storage. This fact,
* coupled with the fact described above, means that itxs are
* committed in (roughly) the order in which they were generated.
* This is essential because itxs are dependent on prior itxs.
* Thus, we *must not* deem an itx as being committed to stable
* storage, until *all* prior itxs have also been committed to
* stable storage.
*
* To enforce this ordering of lwb zio's, while still leveraging as
* much of the underlying storage performance as possible, we rely
* on two fundamental concepts:
*
* 1. The creation and issuance of lwb zio's is protected by
* the zilog's "zl_issuer_lock", which ensures only a single
* thread is creating and/or issuing lwb's at a time
* 2. The "previous" lwb is a child of the "current" lwb
* (leveraging the zio parent-child dependency graph)
*
* By relying on this parent-child zio relationship, we can have
* many lwb zio's concurrently issued to the underlying storage,
* but the order in which they complete will be the same order in
* which they were created.
*/
void
zil_commit(zilog_t *zilog, uint64_t foid)
{
/*
* We should never attempt to call zil_commit on a snapshot for
* a couple of reasons:
*
* 1. A snapshot may never be modified, thus it cannot have any
* in-flight itxs that would have modified the dataset.
*
* 2. By design, when zil_commit() is called, a commit itx will
* be assigned to this zilog; as a result, the zilog will be
* dirtied. We must not dirty the zilog of a snapshot; there's
* checks in the code that enforce this invariant, and will
* cause a panic if it's not upheld.
*/
ASSERT3B(dmu_objset_is_snapshot(zilog->zl_os), ==, B_FALSE);
if (zilog->zl_sync == ZFS_SYNC_DISABLED)
return;
if (!spa_writeable(zilog->zl_spa)) {
/*
* If the SPA is not writable, there should never be any
* pending itxs waiting to be committed to disk. If that
* weren't true, we'd skip writing those itxs out, and
* would break the semantics of zil_commit(); thus, we're
* verifying that truth before we return to the caller.
*/
ASSERT(list_is_empty(&zilog->zl_lwb_list));
ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL);
for (int i = 0; i < TXG_SIZE; i++)
ASSERT3P(zilog->zl_itxg[i].itxg_itxs, ==, NULL);
return;
}
/*
* If the ZIL is suspended, we don't want to dirty it by calling
* zil_commit_itx_assign() below, nor can we write out
* lwbs like would be done in zil_commit_write(). Thus, we
* simply rely on txg_wait_synced() to maintain the necessary
* semantics, and avoid calling those functions altogether.
*/
if (zilog->zl_suspend > 0) {
txg_wait_synced(zilog->zl_dmu_pool, 0);
return;
}
zil_commit_impl(zilog, foid);
}
void
zil_commit_impl(zilog_t *zilog, uint64_t foid)
{
ZIL_STAT_BUMP(zil_commit_count);
/*
* Move the "async" itxs for the specified foid to the "sync"
* queues, such that they will be later committed (or skipped)
* to an lwb when zil_process_commit_list() is called.
*
* Since these "async" itxs must be committed prior to this
* call to zil_commit returning, we must perform this operation
* before we call zil_commit_itx_assign().
*/
zil_async_to_sync(zilog, foid);
/*
* We allocate a new "waiter" structure which will initially be
* linked to the commit itx using the itx's "itx_private" field.
* Since the commit itx doesn't represent any on-disk state,
* when it's committed to an lwb, rather than copying the its
* lr_t into the lwb's buffer, the commit itx's "waiter" will be
* added to the lwb's list of waiters. Then, when the lwb is
* committed to stable storage, each waiter in the lwb's list of
* waiters will be marked "done", and signalled.
*
* We must create the waiter and assign the commit itx prior to
* calling zil_commit_writer(), or else our specific commit itx
* is not guaranteed to be committed to an lwb prior to calling
* zil_commit_waiter().
*/
zil_commit_waiter_t *zcw = zil_alloc_commit_waiter();
zil_commit_itx_assign(zilog, zcw);
zil_commit_writer(zilog, zcw);
zil_commit_waiter(zilog, zcw);
if (zcw->zcw_zio_error != 0) {
/*
* If there was an error writing out the ZIL blocks that
* this thread is waiting on, then we fallback to
* relying on spa_sync() to write out the data this
* thread is waiting on. Obviously this has performance
* implications, but the expectation is for this to be
* an exceptional case, and shouldn't occur often.
*/
DTRACE_PROBE2(zil__commit__io__error,
zilog_t *, zilog, zil_commit_waiter_t *, zcw);
txg_wait_synced(zilog->zl_dmu_pool, 0);
}
zil_free_commit_waiter(zcw);
}
/*
* Called in syncing context to free committed log blocks and update log header.
*/
void
zil_sync(zilog_t *zilog, dmu_tx_t *tx)
{
zil_header_t *zh = zil_header_in_syncing_context(zilog);
uint64_t txg = dmu_tx_get_txg(tx);
spa_t *spa = zilog->zl_spa;
uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK];
lwb_t *lwb;
/*
* We don't zero out zl_destroy_txg, so make sure we don't try
* to destroy it twice.
*/
if (spa_sync_pass(spa) != 1)
return;
mutex_enter(&zilog->zl_lock);
ASSERT(zilog->zl_stop_sync == 0);
if (*replayed_seq != 0) {
ASSERT(zh->zh_replay_seq < *replayed_seq);
zh->zh_replay_seq = *replayed_seq;
*replayed_seq = 0;
}
if (zilog->zl_destroy_txg == txg) {
blkptr_t blk = zh->zh_log;
ASSERT(list_head(&zilog->zl_lwb_list) == NULL);
bzero(zh, sizeof (zil_header_t));
bzero(zilog->zl_replayed_seq, sizeof (zilog->zl_replayed_seq));
if (zilog->zl_keep_first) {
/*
* If this block was part of log chain that couldn't
* be claimed because a device was missing during
* zil_claim(), but that device later returns,
* then this block could erroneously appear valid.
* To guard against this, assign a new GUID to the new
* log chain so it doesn't matter what blk points to.
*/
zil_init_log_chain(zilog, &blk);
zh->zh_log = blk;
}
}
while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
zh->zh_log = lwb->lwb_blk;
if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg)
break;
list_remove(&zilog->zl_lwb_list, lwb);
zio_free(spa, txg, &lwb->lwb_blk);
zil_free_lwb(zilog, lwb);
/*
* If we don't have anything left in the lwb list then
* we've had an allocation failure and we need to zero
* out the zil_header blkptr so that we don't end
* up freeing the same block twice.
*/
if (list_head(&zilog->zl_lwb_list) == NULL)
BP_ZERO(&zh->zh_log);
}
/*
* Remove fastwrite on any blocks that have been pre-allocated for
* the next commit. This prevents fastwrite counter pollution by
* unused, long-lived LWBs.
*/
for (; lwb != NULL; lwb = list_next(&zilog->zl_lwb_list, lwb)) {
if (lwb->lwb_fastwrite && !lwb->lwb_write_zio) {
metaslab_fastwrite_unmark(zilog->zl_spa, &lwb->lwb_blk);
lwb->lwb_fastwrite = 0;
}
}
mutex_exit(&zilog->zl_lock);
}
/* ARGSUSED */
static int
zil_lwb_cons(void *vbuf, void *unused, int kmflag)
{
lwb_t *lwb = vbuf;
list_create(&lwb->lwb_itxs, sizeof (itx_t), offsetof(itx_t, itx_node));
list_create(&lwb->lwb_waiters, sizeof (zil_commit_waiter_t),
offsetof(zil_commit_waiter_t, zcw_node));
avl_create(&lwb->lwb_vdev_tree, zil_lwb_vdev_compare,
sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node));
mutex_init(&lwb->lwb_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
return (0);
}
/* ARGSUSED */
static void
zil_lwb_dest(void *vbuf, void *unused)
{
lwb_t *lwb = vbuf;
mutex_destroy(&lwb->lwb_vdev_lock);
avl_destroy(&lwb->lwb_vdev_tree);
list_destroy(&lwb->lwb_waiters);
list_destroy(&lwb->lwb_itxs);
}
void
zil_init(void)
{
zil_lwb_cache = kmem_cache_create("zil_lwb_cache",
sizeof (lwb_t), 0, zil_lwb_cons, zil_lwb_dest, NULL, NULL, NULL, 0);
zil_zcw_cache = kmem_cache_create("zil_zcw_cache",
sizeof (zil_commit_waiter_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
zil_ksp = kstat_create("zfs", 0, "zil", "misc",
KSTAT_TYPE_NAMED, sizeof (zil_stats) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL);
if (zil_ksp != NULL) {
zil_ksp->ks_data = &zil_stats;
kstat_install(zil_ksp);
}
}
void
zil_fini(void)
{
kmem_cache_destroy(zil_zcw_cache);
kmem_cache_destroy(zil_lwb_cache);
if (zil_ksp != NULL) {
kstat_delete(zil_ksp);
zil_ksp = NULL;
}
}
void
zil_set_sync(zilog_t *zilog, uint64_t sync)
{
zilog->zl_sync = sync;
}
void
zil_set_logbias(zilog_t *zilog, uint64_t logbias)
{
zilog->zl_logbias = logbias;
}
zilog_t *
zil_alloc(objset_t *os, zil_header_t *zh_phys)
{
zilog_t *zilog;
zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP);
zilog->zl_header = zh_phys;
zilog->zl_os = os;
zilog->zl_spa = dmu_objset_spa(os);
zilog->zl_dmu_pool = dmu_objset_pool(os);
zilog->zl_destroy_txg = TXG_INITIAL - 1;
zilog->zl_logbias = dmu_objset_logbias(os);
zilog->zl_sync = dmu_objset_syncprop(os);
zilog->zl_dirty_max_txg = 0;
zilog->zl_last_lwb_opened = NULL;
zilog->zl_last_lwb_latency = 0;
zilog->zl_max_block_size = zil_maxblocksize;
mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&zilog->zl_issuer_lock, NULL, MUTEX_DEFAULT, NULL);
for (int i = 0; i < TXG_SIZE; i++) {
mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL,
MUTEX_DEFAULT, NULL);
}
list_create(&zilog->zl_lwb_list, sizeof (lwb_t),
offsetof(lwb_t, lwb_node));
list_create(&zilog->zl_itx_commit_list, sizeof (itx_t),
offsetof(itx_t, itx_node));
cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL);
return (zilog);
}
void
zil_free(zilog_t *zilog)
{
int i;
zilog->zl_stop_sync = 1;
ASSERT0(zilog->zl_suspend);
ASSERT0(zilog->zl_suspending);
ASSERT(list_is_empty(&zilog->zl_lwb_list));
list_destroy(&zilog->zl_lwb_list);
ASSERT(list_is_empty(&zilog->zl_itx_commit_list));
list_destroy(&zilog->zl_itx_commit_list);
for (i = 0; i < TXG_SIZE; i++) {
/*
* It's possible for an itx to be generated that doesn't dirty
* a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean()
* callback to remove the entry. We remove those here.
*
* Also free up the ziltest itxs.
*/
if (zilog->zl_itxg[i].itxg_itxs)
zil_itxg_clean(zilog->zl_itxg[i].itxg_itxs);
mutex_destroy(&zilog->zl_itxg[i].itxg_lock);
}
mutex_destroy(&zilog->zl_issuer_lock);
mutex_destroy(&zilog->zl_lock);
cv_destroy(&zilog->zl_cv_suspend);
kmem_free(zilog, sizeof (zilog_t));
}
/*
* Open an intent log.
*/
zilog_t *
zil_open(objset_t *os, zil_get_data_t *get_data)
{
zilog_t *zilog = dmu_objset_zil(os);
ASSERT3P(zilog->zl_get_data, ==, NULL);
ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL);
ASSERT(list_is_empty(&zilog->zl_lwb_list));
zilog->zl_get_data = get_data;
return (zilog);
}
/*
* Close an intent log.
*/
void
zil_close(zilog_t *zilog)
{
lwb_t *lwb;
uint64_t txg;
if (!dmu_objset_is_snapshot(zilog->zl_os)) {
zil_commit(zilog, 0);
} else {
ASSERT3P(list_tail(&zilog->zl_lwb_list), ==, NULL);
ASSERT0(zilog->zl_dirty_max_txg);
ASSERT3B(zilog_is_dirty(zilog), ==, B_FALSE);
}
mutex_enter(&zilog->zl_lock);
lwb = list_tail(&zilog->zl_lwb_list);
if (lwb == NULL)
txg = zilog->zl_dirty_max_txg;
else
txg = MAX(zilog->zl_dirty_max_txg, lwb->lwb_max_txg);
mutex_exit(&zilog->zl_lock);
/*
* We need to use txg_wait_synced() to wait long enough for the
* ZIL to be clean, and to wait for all pending lwbs to be
* written out.
*/
if (txg != 0)
txg_wait_synced(zilog->zl_dmu_pool, txg);
if (zilog_is_dirty(zilog))
zfs_dbgmsg("zil (%px) is dirty, txg %llu", zilog,
(u_longlong_t)txg);
if (txg < spa_freeze_txg(zilog->zl_spa))
VERIFY(!zilog_is_dirty(zilog));
zilog->zl_get_data = NULL;
/*
* We should have only one lwb left on the list; remove it now.
*/
mutex_enter(&zilog->zl_lock);
lwb = list_head(&zilog->zl_lwb_list);
if (lwb != NULL) {
ASSERT3P(lwb, ==, list_tail(&zilog->zl_lwb_list));
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED);
if (lwb->lwb_fastwrite)
metaslab_fastwrite_unmark(zilog->zl_spa, &lwb->lwb_blk);
list_remove(&zilog->zl_lwb_list, lwb);
zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
zil_free_lwb(zilog, lwb);
}
mutex_exit(&zilog->zl_lock);
}
static char *suspend_tag = "zil suspending";
/*
* Suspend an intent log. While in suspended mode, we still honor
* synchronous semantics, but we rely on txg_wait_synced() to do it.
* On old version pools, we suspend the log briefly when taking a
* snapshot so that it will have an empty intent log.
*
* Long holds are not really intended to be used the way we do here --
* held for such a short time. A concurrent caller of dsl_dataset_long_held()
* could fail. Therefore we take pains to only put a long hold if it is
* actually necessary. Fortunately, it will only be necessary if the
* objset is currently mounted (or the ZVOL equivalent). In that case it
* will already have a long hold, so we are not really making things any worse.
*
* Ideally, we would locate the existing long-holder (i.e. the zfsvfs_t or
* zvol_state_t), and use their mechanism to prevent their hold from being
* dropped (e.g. VFS_HOLD()). However, that would be even more pain for
* very little gain.
*
* if cookiep == NULL, this does both the suspend & resume.
* Otherwise, it returns with the dataset "long held", and the cookie
* should be passed into zil_resume().
*/
int
zil_suspend(const char *osname, void **cookiep)
{
objset_t *os;
zilog_t *zilog;
const zil_header_t *zh;
int error;
error = dmu_objset_hold(osname, suspend_tag, &os);
if (error != 0)
return (error);
zilog = dmu_objset_zil(os);
mutex_enter(&zilog->zl_lock);
zh = zilog->zl_header;
if (zh->zh_flags & ZIL_REPLAY_NEEDED) { /* unplayed log */
mutex_exit(&zilog->zl_lock);
dmu_objset_rele(os, suspend_tag);
return (SET_ERROR(EBUSY));
}
/*
* Don't put a long hold in the cases where we can avoid it. This
* is when there is no cookie so we are doing a suspend & resume
* (i.e. called from zil_vdev_offline()), and there's nothing to do
* for the suspend because it's already suspended, or there's no ZIL.
*/
if (cookiep == NULL && !zilog->zl_suspending &&
(zilog->zl_suspend > 0 || BP_IS_HOLE(&zh->zh_log))) {
mutex_exit(&zilog->zl_lock);
dmu_objset_rele(os, suspend_tag);
return (0);
}
dsl_dataset_long_hold(dmu_objset_ds(os), suspend_tag);
dsl_pool_rele(dmu_objset_pool(os), suspend_tag);
zilog->zl_suspend++;
if (zilog->zl_suspend > 1) {
/*
* Someone else is already suspending it.
* Just wait for them to finish.
*/
while (zilog->zl_suspending)
cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock);
mutex_exit(&zilog->zl_lock);
if (cookiep == NULL)
zil_resume(os);
else
*cookiep = os;
return (0);
}
/*
* If there is no pointer to an on-disk block, this ZIL must not
* be active (e.g. filesystem not mounted), so there's nothing
* to clean up.
*/
if (BP_IS_HOLE(&zh->zh_log)) {
ASSERT(cookiep != NULL); /* fast path already handled */
*cookiep = os;
mutex_exit(&zilog->zl_lock);
return (0);
}
/*
* The ZIL has work to do. Ensure that the associated encryption
* key will remain mapped while we are committing the log by
* grabbing a reference to it. If the key isn't loaded we have no
* choice but to return an error until the wrapping key is loaded.
*/
if (os->os_encrypted &&
dsl_dataset_create_key_mapping(dmu_objset_ds(os)) != 0) {
zilog->zl_suspend--;
mutex_exit(&zilog->zl_lock);
dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag);
dsl_dataset_rele(dmu_objset_ds(os), suspend_tag);
return (SET_ERROR(EACCES));
}
zilog->zl_suspending = B_TRUE;
mutex_exit(&zilog->zl_lock);
/*
* We need to use zil_commit_impl to ensure we wait for all
* LWB_STATE_OPENED and LWB_STATE_ISSUED lwbs to be committed
* to disk before proceeding. If we used zil_commit instead, it
* would just call txg_wait_synced(), because zl_suspend is set.
* txg_wait_synced() doesn't wait for these lwb's to be
* LWB_STATE_FLUSH_DONE before returning.
*/
zil_commit_impl(zilog, 0);
/*
* Now that we've ensured all lwb's are LWB_STATE_FLUSH_DONE, we
* use txg_wait_synced() to ensure the data from the zilog has
* migrated to the main pool before calling zil_destroy().
*/
txg_wait_synced(zilog->zl_dmu_pool, 0);
zil_destroy(zilog, B_FALSE);
mutex_enter(&zilog->zl_lock);
zilog->zl_suspending = B_FALSE;
cv_broadcast(&zilog->zl_cv_suspend);
mutex_exit(&zilog->zl_lock);
if (os->os_encrypted)
dsl_dataset_remove_key_mapping(dmu_objset_ds(os));
if (cookiep == NULL)
zil_resume(os);
else
*cookiep = os;
return (0);
}
void
zil_resume(void *cookie)
{
objset_t *os = cookie;
zilog_t *zilog = dmu_objset_zil(os);
mutex_enter(&zilog->zl_lock);
ASSERT(zilog->zl_suspend != 0);
zilog->zl_suspend--;
mutex_exit(&zilog->zl_lock);
dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag);
dsl_dataset_rele(dmu_objset_ds(os), suspend_tag);
}
typedef struct zil_replay_arg {
zil_replay_func_t **zr_replay;
void *zr_arg;
boolean_t zr_byteswap;
char *zr_lr;
} zil_replay_arg_t;
static int
zil_replay_error(zilog_t *zilog, const lr_t *lr, int error)
{
char name[ZFS_MAX_DATASET_NAME_LEN];
zilog->zl_replaying_seq--; /* didn't actually replay this one */
dmu_objset_name(zilog->zl_os, name);
cmn_err(CE_WARN, "ZFS replay transaction error %d, "
"dataset %s, seq 0x%llx, txtype %llu %s\n", error, name,
(u_longlong_t)lr->lrc_seq,
(u_longlong_t)(lr->lrc_txtype & ~TX_CI),
(lr->lrc_txtype & TX_CI) ? "CI" : "");
return (error);
}
static int
zil_replay_log_record(zilog_t *zilog, const lr_t *lr, void *zra,
uint64_t claim_txg)
{
zil_replay_arg_t *zr = zra;
const zil_header_t *zh = zilog->zl_header;
uint64_t reclen = lr->lrc_reclen;
uint64_t txtype = lr->lrc_txtype;
int error = 0;
zilog->zl_replaying_seq = lr->lrc_seq;
if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */
return (0);
if (lr->lrc_txg < claim_txg) /* already committed */
return (0);
/* Strip case-insensitive bit, still present in log record */
txtype &= ~TX_CI;
if (txtype == 0 || txtype >= TX_MAX_TYPE)
return (zil_replay_error(zilog, lr, EINVAL));
/*
* If this record type can be logged out of order, the object
* (lr_foid) may no longer exist. That's legitimate, not an error.
*/
if (TX_OOO(txtype)) {
error = dmu_object_info(zilog->zl_os,
LR_FOID_GET_OBJ(((lr_ooo_t *)lr)->lr_foid), NULL);
if (error == ENOENT || error == EEXIST)
return (0);
}
/*
* Make a copy of the data so we can revise and extend it.
*/
bcopy(lr, zr->zr_lr, reclen);
/*
* If this is a TX_WRITE with a blkptr, suck in the data.
*/
if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) {
error = zil_read_log_data(zilog, (lr_write_t *)lr,
zr->zr_lr + reclen);
if (error != 0)
return (zil_replay_error(zilog, lr, error));
}
/*
* The log block containing this lr may have been byteswapped
* so that we can easily examine common fields like lrc_txtype.
* However, the log is a mix of different record types, and only the
* replay vectors know how to byteswap their records. Therefore, if
* the lr was byteswapped, undo it before invoking the replay vector.
*/
if (zr->zr_byteswap)
byteswap_uint64_array(zr->zr_lr, reclen);
/*
* We must now do two things atomically: replay this log record,
* and update the log header sequence number to reflect the fact that
* we did so. At the end of each replay function the sequence number
* is updated if we are in replay mode.
*/
error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, zr->zr_byteswap);
if (error != 0) {
/*
* The DMU's dnode layer doesn't see removes until the txg
* commits, so a subsequent claim can spuriously fail with
* EEXIST. So if we receive any error we try syncing out
* any removes then retry the transaction. Note that we
* specify B_FALSE for byteswap now, so we don't do it twice.
*/
txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0);
error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, B_FALSE);
if (error != 0)
return (zil_replay_error(zilog, lr, error));
}
return (0);
}
/* ARGSUSED */
static int
zil_incr_blks(zilog_t *zilog, const blkptr_t *bp, void *arg, uint64_t claim_txg)
{
zilog->zl_replay_blks++;
return (0);
}
/*
* If this dataset has a non-empty intent log, replay it and destroy it.
*/
void
zil_replay(objset_t *os, void *arg, zil_replay_func_t *replay_func[TX_MAX_TYPE])
{
zilog_t *zilog = dmu_objset_zil(os);
const zil_header_t *zh = zilog->zl_header;
zil_replay_arg_t zr;
if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) {
zil_destroy(zilog, B_TRUE);
return;
}
zr.zr_replay = replay_func;
zr.zr_arg = arg;
zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log);
zr.zr_lr = vmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP);
/*
* Wait for in-progress removes to sync before starting replay.
*/
txg_wait_synced(zilog->zl_dmu_pool, 0);
zilog->zl_replay = B_TRUE;
zilog->zl_replay_time = ddi_get_lbolt();
ASSERT(zilog->zl_replay_blks == 0);
(void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr,
zh->zh_claim_txg, B_TRUE);
vmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE);
zil_destroy(zilog, B_FALSE);
txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
zilog->zl_replay = B_FALSE;
}
boolean_t
zil_replaying(zilog_t *zilog, dmu_tx_t *tx)
{
if (zilog->zl_sync == ZFS_SYNC_DISABLED)
return (B_TRUE);
if (zilog->zl_replay) {
dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] =
zilog->zl_replaying_seq;
return (B_TRUE);
}
return (B_FALSE);
}
/* ARGSUSED */
int
zil_reset(const char *osname, void *arg)
{
int error;
error = zil_suspend(osname, NULL);
/* EACCES means crypto key not loaded */
if ((error == EACCES) || (error == EBUSY))
return (SET_ERROR(error));
if (error != 0)
return (SET_ERROR(EEXIST));
return (0);
}
EXPORT_SYMBOL(zil_alloc);
EXPORT_SYMBOL(zil_free);
EXPORT_SYMBOL(zil_open);
EXPORT_SYMBOL(zil_close);
EXPORT_SYMBOL(zil_replay);
EXPORT_SYMBOL(zil_replaying);
EXPORT_SYMBOL(zil_destroy);
EXPORT_SYMBOL(zil_destroy_sync);
EXPORT_SYMBOL(zil_itx_create);
EXPORT_SYMBOL(zil_itx_destroy);
EXPORT_SYMBOL(zil_itx_assign);
EXPORT_SYMBOL(zil_commit);
EXPORT_SYMBOL(zil_claim);
EXPORT_SYMBOL(zil_check_log_chain);
EXPORT_SYMBOL(zil_sync);
EXPORT_SYMBOL(zil_clean);
EXPORT_SYMBOL(zil_suspend);
EXPORT_SYMBOL(zil_resume);
EXPORT_SYMBOL(zil_lwb_add_block);
EXPORT_SYMBOL(zil_bp_tree_add);
EXPORT_SYMBOL(zil_set_sync);
EXPORT_SYMBOL(zil_set_logbias);
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs, zfs_, commit_timeout_pct, INT, ZMOD_RW,
"ZIL block open timeout percentage");
ZFS_MODULE_PARAM(zfs_zil, zil_, replay_disable, INT, ZMOD_RW,
"Disable intent logging replay");
ZFS_MODULE_PARAM(zfs_zil, zil_, nocacheflush, INT, ZMOD_RW,
"Disable ZIL cache flushes");
ZFS_MODULE_PARAM(zfs_zil, zil_, slog_bulk, ULONG, ZMOD_RW,
"Limit in bytes slog sync writes per commit");
ZFS_MODULE_PARAM(zfs_zil, zil_, maxblocksize, INT, ZMOD_RW,
"Limit in bytes of ZIL log block size");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/scripts/zfs-tests.sh b/sys/contrib/openzfs/scripts/zfs-tests.sh
index edb9c9f106c2..ac28788582f9 100755
--- a/sys/contrib/openzfs/scripts/zfs-tests.sh
+++ b/sys/contrib/openzfs/scripts/zfs-tests.sh
@@ -1,705 +1,716 @@
#!/bin/sh
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License, Version 1.0 only
# (the "License"). You may not use this file except in compliance
# with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
BASE_DIR=$(dirname "$0")
SCRIPT_COMMON=common.sh
if [ -f "${BASE_DIR}/${SCRIPT_COMMON}" ]; then
. "${BASE_DIR}/${SCRIPT_COMMON}"
else
echo "Missing helper script ${SCRIPT_COMMON}" && exit 1
fi
PROG=zfs-tests.sh
VERBOSE="no"
QUIET=""
CLEANUP="yes"
CLEANUPALL="no"
LOOPBACK="yes"
STACK_TRACER="no"
FILESIZE="4G"
DEFAULT_RUNFILES="common.run,$(uname | tr '[:upper:]' '[:lower:]').run"
RUNFILES=${RUNFILES:-$DEFAULT_RUNFILES}
FILEDIR=${FILEDIR:-/var/tmp}
DISKS=${DISKS:-""}
SINGLETEST=""
SINGLETESTUSER="root"
TAGS=""
ITERATIONS=1
ZFS_DBGMSG="$STF_SUITE/callbacks/zfs_dbgmsg.ksh"
ZFS_DMESG="$STF_SUITE/callbacks/zfs_dmesg.ksh"
UNAME=$(uname -s)
# Override some defaults if on FreeBSD
if [ "$UNAME" = "FreeBSD" ] ; then
TESTFAIL_CALLBACKS=${TESTFAIL_CALLBACKS:-"$ZFS_DMESG"}
LOSETUP=/sbin/mdconfig
DMSETUP=/sbin/gpart
else
ZFS_MMP="$STF_SUITE/callbacks/zfs_mmp.ksh"
TESTFAIL_CALLBACKS=${TESTFAIL_CALLBACKS:-"$ZFS_DBGMSG:$ZFS_DMESG:$ZFS_MMP"}
LOSETUP=${LOSETUP:-/sbin/losetup}
DMSETUP=${DMSETUP:-/sbin/dmsetup}
fi
#
# Log an informational message when additional verbosity is enabled.
#
msg() {
if [ "$VERBOSE" = "yes" ]; then
echo "$@"
fi
}
#
# Log a failure message, cleanup, and return an error.
#
fail() {
echo "$PROG: $1" >&2
cleanup
exit 1
}
cleanup_freebsd_loopback() {
for TEST_LOOPBACK in ${LOOPBACKS}; do
if [ -c "/dev/${TEST_LOOPBACK}" ]; then
sudo "${LOSETUP}" -d -u "${TEST_LOOPBACK}" ||
echo "Failed to destroy: ${TEST_LOOPBACK}"
fi
done
}
cleanup_linux_loopback() {
for TEST_LOOPBACK in ${LOOPBACKS}; do
LOOP_DEV=$(basename "$TEST_LOOPBACK")
DM_DEV=$(sudo "${DMSETUP}" ls 2>/dev/null | \
grep "${LOOP_DEV}" | cut -f1)
if [ -n "$DM_DEV" ]; then
sudo "${DMSETUP}" remove "${DM_DEV}" ||
echo "Failed to remove: ${DM_DEV}"
fi
if [ -n "${TEST_LOOPBACK}" ]; then
sudo "${LOSETUP}" -d "${TEST_LOOPBACK}" ||
echo "Failed to remove: ${TEST_LOOPBACK}"
fi
done
}
#
# Attempt to remove loopback devices and files which where created earlier
# by this script to run the test framework. The '-k' option may be passed
# to the script to suppress cleanup for debugging purposes.
#
cleanup() {
if [ "$CLEANUP" = "no" ]; then
return 0
fi
if [ "$LOOPBACK" = "yes" ]; then
if [ "$UNAME" = "FreeBSD" ] ; then
cleanup_freebsd_loopback
else
cleanup_linux_loopback
fi
fi
for TEST_FILE in ${FILES}; do
rm -f "${TEST_FILE}" >/dev/null 2>&1
done
if [ "$STF_PATH_REMOVE" = "yes" ] && [ -d "$STF_PATH" ]; then
rm -Rf "$STF_PATH"
fi
}
trap cleanup EXIT
#
# Attempt to remove all testpools (testpool.XXX), unopened dm devices,
# loopback devices, and files. This is a useful way to cleanup a previous
# test run failure which has left the system in an unknown state. This can
# be dangerous and should only be used in a dedicated test environment.
#
cleanup_all() {
TEST_POOLS=$(sudo "$ZPOOL" list -H -o name | grep testpool)
if [ "$UNAME" = "FreeBSD" ] ; then
TEST_LOOPBACKS=$(sudo "${LOSETUP}" -l)
else
TEST_LOOPBACKS=$(sudo "${LOSETUP}" -a|grep file-vdev|cut -f1 -d:)
fi
TEST_FILES=$(ls /var/tmp/file-vdev* 2>/dev/null)
msg
msg "--- Cleanup ---"
msg "Removing pool(s): $(echo "${TEST_POOLS}" | tr '\n' ' ')"
for TEST_POOL in $TEST_POOLS; do
sudo "$ZPOOL" destroy "${TEST_POOL}"
done
if [ "$UNAME" != "FreeBSD" ] ; then
msg "Removing dm(s): $(sudo "${DMSETUP}" ls |
grep loop | tr '\n' ' ')"
sudo "${DMSETUP}" remove_all
fi
msg "Removing loopback(s): $(echo "${TEST_LOOPBACKS}" | tr '\n' ' ')"
for TEST_LOOPBACK in $TEST_LOOPBACKS; do
if [ "$UNAME" = "FreeBSD" ] ; then
sudo "${LOSETUP}" -d -u "${TEST_LOOPBACK}"
else
sudo "${LOSETUP}" -d "${TEST_LOOPBACK}"
fi
done
msg "Removing files(s): $(echo "${TEST_FILES}" | tr '\n' ' ')"
for TEST_FILE in $TEST_FILES; do
sudo rm -f "${TEST_FILE}"
done
}
#
# Takes a name as the only arguments and looks for the following variations
# on that name. If one is found it is returned.
#
# $RUNFILE_DIR/<name>
# $RUNFILE_DIR/<name>.run
# <name>
# <name>.run
#
find_runfile() {
NAME=$1
RESULT=""
if [ -f "$RUNFILE_DIR/$NAME" ]; then
RESULT="$RUNFILE_DIR/$NAME"
elif [ -f "$RUNFILE_DIR/$NAME.run" ]; then
RESULT="$RUNFILE_DIR/$NAME.run"
elif [ -f "$NAME" ]; then
RESULT="$NAME"
elif [ -f "$NAME.run" ]; then
RESULT="$NAME.run"
fi
echo "$RESULT"
}
#
# Symlink file if it appears under any of the given paths.
#
create_links() {
dir_list="$1"
file_list="$2"
[ -n "$STF_PATH" ] || fail "STF_PATH wasn't correctly set"
for i in $file_list; do
for j in $dir_list; do
[ ! -e "$STF_PATH/$i" ] || continue
if [ ! -d "$j/$i" ] && [ -e "$j/$i" ]; then
ln -sf "$j/$i" "$STF_PATH/$i" || \
fail "Couldn't link $i"
break
fi
done
[ ! -e "$STF_PATH/$i" ] && \
STF_MISSING_BIN="$STF_MISSING_BIN $i"
done
STF_MISSING_BIN=${STF_MISSING_BIN# }
}
#
# Constrain the path to limit the available binaries to a known set.
# When running in-tree a top level ./bin/ directory is created for
# convenience, otherwise a temporary directory is used.
#
constrain_path() {
. "$STF_SUITE/include/commands.cfg"
# On FreeBSD, base system zfs utils are in /sbin and OpenZFS utils
# install to /usr/local/sbin. To avoid testing the wrong utils we
# need /usr/local to come before / in the path search order.
SYSTEM_DIRS="/usr/local/bin /usr/local/sbin"
SYSTEM_DIRS="$SYSTEM_DIRS /usr/bin /usr/sbin /bin /sbin $LIBEXEC_DIR"
if [ "$INTREE" = "yes" ]; then
# Constrained path set to ./zfs/bin/
STF_PATH="$BIN_DIR"
STF_PATH_REMOVE="no"
STF_MISSING_BIN=""
if [ ! -d "$STF_PATH" ]; then
mkdir "$STF_PATH"
chmod 755 "$STF_PATH" || fail "Couldn't chmod $STF_PATH"
fi
# Special case links for standard zfs utilities
DIRS="$(find "$CMD_DIR" -type d \( ! -name .deps -a \
! -name .libs \) -print | tr '\n' ' ')"
create_links "$DIRS" "$ZFS_FILES"
# Special case links for zfs test suite utilities
DIRS="$(find "$STF_SUITE" -type d \( ! -name .deps -a \
! -name .libs \) -print | tr '\n' ' ')"
create_links "$DIRS" "$ZFSTEST_FILES"
else
# Constrained path set to /var/tmp/constrained_path.*
SYSTEMDIR=${SYSTEMDIR:-/var/tmp/constrained_path.XXXXXX}
STF_PATH=$(mktemp -d "$SYSTEMDIR")
STF_PATH_REMOVE="yes"
STF_MISSING_BIN=""
chmod 755 "$STF_PATH" || fail "Couldn't chmod $STF_PATH"
# Special case links for standard zfs utilities
create_links "$SYSTEM_DIRS" "$ZFS_FILES"
# Special case links for zfs test suite utilities
create_links "$STF_SUITE/bin" "$ZFSTEST_FILES"
fi
# Standard system utilities
SYSTEM_FILES="$SYSTEM_FILES_COMMON"
if [ "$UNAME" = "FreeBSD" ] ; then
SYSTEM_FILES="$SYSTEM_FILES $SYSTEM_FILES_FREEBSD"
else
SYSTEM_FILES="$SYSTEM_FILES $SYSTEM_FILES_LINUX"
fi
create_links "$SYSTEM_DIRS" "$SYSTEM_FILES"
# Exceptions
ln -fs "$STF_PATH/awk" "$STF_PATH/nawk"
if [ "$UNAME" = "Linux" ] ; then
ln -fs /sbin/fsck.ext4 "$STF_PATH/fsck"
ln -fs /sbin/mkfs.ext4 "$STF_PATH/newfs"
ln -fs "$STF_PATH/gzip" "$STF_PATH/compress"
ln -fs "$STF_PATH/gunzip" "$STF_PATH/uncompress"
ln -fs "$STF_PATH/exportfs" "$STF_PATH/share"
ln -fs "$STF_PATH/exportfs" "$STF_PATH/unshare"
elif [ "$UNAME" = "FreeBSD" ] ; then
ln -fs /usr/local/bin/ksh93 "$STF_PATH/ksh"
fi
}
#
# Output a useful usage message.
#
usage() {
cat << EOF
USAGE:
$0 [-hvqxkfS] [-s SIZE] [-r RUNFILES] [-t PATH] [-u USER]
DESCRIPTION:
ZFS Test Suite launch script
OPTIONS:
-h Show this message
-v Verbose zfs-tests.sh output
-q Quiet test-runner output
-x Remove all testpools, dm, lo, and files (unsafe)
-k Disable cleanup after test failure
-f Use files only, disables block device tests
-S Enable stack tracer (negative performance impact)
-c Only create and populate constrained path
-n NFSFILE Use the nfsfile to determine the NFS configuration
-I NUM Number of iterations
-d DIR Use DIR for files and loopback devices
-s SIZE Use vdevs of SIZE (default: 4G)
-r RUNFILES Run tests in RUNFILES (default: ${DEFAULT_RUNFILES})
-t PATH Run single test at PATH relative to test suite
-T TAGS Comma separated list of tags (default: 'functional')
-u USER Run single test as USER (default: root)
EXAMPLES:
# Run the default (linux) suite of tests and output the configuration used.
$0 -v
# Run a smaller suite of tests designed to run more quickly.
$0 -r linux-fast
# Run a single test
$0 -t tests/functional/cli_root/zfs_bookmark/zfs_bookmark_cliargs.ksh
# Cleanup a previous run of the test suite prior to testing, run the
# default (linux) suite of tests and perform no cleanup on exit.
$0 -x
EOF
}
while getopts 'hvqxkfScn:d:s:r:?t:T:u:I:' OPTION; do
case $OPTION in
h)
usage
exit 1
;;
v)
VERBOSE="yes"
;;
q)
QUIET="yes"
;;
x)
CLEANUPALL="yes"
;;
k)
CLEANUP="no"
;;
f)
LOOPBACK="no"
;;
S)
STACK_TRACER="yes"
;;
c)
constrain_path
exit
;;
n)
nfsfile=$OPTARG
[ -f "$nfsfile" ] || fail "Cannot read file: $nfsfile"
export NFS=1
. "$nfsfile"
;;
d)
FILEDIR="$OPTARG"
;;
I)
ITERATIONS="$OPTARG"
if [ "$ITERATIONS" -le 0 ]; then
fail "Iterations must be greater than 0."
fi
;;
s)
FILESIZE="$OPTARG"
;;
r)
RUNFILES="$OPTARG"
;;
t)
if [ -n "$SINGLETEST" ]; then
fail "-t can only be provided once."
fi
SINGLETEST="$OPTARG"
;;
T)
TAGS="$OPTARG"
;;
u)
SINGLETESTUSER="$OPTARG"
;;
?)
usage
exit
;;
esac
done
shift $((OPTIND-1))
FILES=${FILES:-"$FILEDIR/file-vdev0 $FILEDIR/file-vdev1 $FILEDIR/file-vdev2"}
LOOPBACKS=${LOOPBACKS:-""}
if [ -n "$SINGLETEST" ]; then
if [ -n "$TAGS" ]; then
fail "-t and -T are mutually exclusive."
fi
RUNFILE_DIR="/var/tmp"
RUNFILES="zfs-tests.$$.run"
SINGLEQUIET="False"
if [ -n "$QUIET" ]; then
SINGLEQUIET="True"
fi
cat >$RUNFILE_DIR/$RUNFILES << EOF
[DEFAULT]
pre =
quiet = $SINGLEQUIET
pre_user = root
user = $SINGLETESTUSER
timeout = 600
post_user = root
post =
outputdir = /var/tmp/test_results
EOF
SINGLETESTDIR=$(dirname "$SINGLETEST")
SINGLETESTFILE=$(basename "$SINGLETEST")
SETUPSCRIPT=
CLEANUPSCRIPT=
if [ -f "$STF_SUITE/$SINGLETESTDIR/setup.ksh" ]; then
SETUPSCRIPT="setup"
fi
if [ -f "$STF_SUITE/$SINGLETESTDIR/cleanup.ksh" ]; then
CLEANUPSCRIPT="cleanup"
fi
cat >>$RUNFILE_DIR/$RUNFILES << EOF
[$SINGLETESTDIR]
tests = ['$SINGLETESTFILE']
pre = $SETUPSCRIPT
post = $CLEANUPSCRIPT
tags = ['functional']
EOF
fi
#
# Use default tag if none was specified
#
TAGS=${TAGS:='functional'}
#
# Attempt to locate the runfiles describing the test workload.
#
R=""
IFS=,
for RUNFILE in $RUNFILES; do
if [ -n "$RUNFILE" ]; then
SAVED_RUNFILE="$RUNFILE"
RUNFILE=$(find_runfile "$RUNFILE")
[ -z "$RUNFILE" ] && fail "Cannot find runfile: $SAVED_RUNFILE"
R="$R,$RUNFILE"
fi
if [ ! -r "$RUNFILE" ]; then
fail "Cannot read runfile: $RUNFILE"
fi
done
unset IFS
RUNFILES=${R#,}
#
# This script should not be run as root. Instead the test user, which may
# be a normal user account, needs to be configured such that it can
# run commands via sudo passwordlessly.
#
if [ "$(id -u)" = "0" ]; then
fail "This script must not be run as root."
fi
if [ "$(sudo whoami)" != "root" ]; then
fail "Passwordless sudo access required."
fi
#
# Constrain the available binaries to a known set.
#
constrain_path
#
# Check if ksh exists
#
if [ "$UNAME" = "FreeBSD" ]; then
sudo ln -fs /usr/local/bin/ksh93 /bin/ksh
fi
[ -e "$STF_PATH/ksh" ] || fail "This test suite requires ksh."
[ -e "$STF_SUITE/include/default.cfg" ] || fail \
"Missing $STF_SUITE/include/default.cfg file."
#
# Verify the ZFS module stack is loaded.
#
if [ "$STACK_TRACER" = "yes" ]; then
sudo "${ZFS_SH}" -S >/dev/null 2>&1
else
sudo "${ZFS_SH}" >/dev/null 2>&1
fi
#
# Attempt to cleanup all previous state for a new test run.
#
if [ "$CLEANUPALL" = "yes" ]; then
cleanup_all
fi
#
# By default preserve any existing pools
# NOTE: Since 'zpool list' outputs a newline-delimited list convert $KEEP from
# space-delimited to newline-delimited.
#
if [ -z "${KEEP}" ]; then
KEEP="$(sudo "$ZPOOL" list -H -o name)"
if [ -z "${KEEP}" ]; then
KEEP="rpool"
fi
else
KEEP="$(echo "$KEEP" | tr '[:blank:]' '\n')"
fi
#
# NOTE: The following environment variables are undocumented
# and should be used for testing purposes only:
#
# __ZFS_POOL_EXCLUDE - don't iterate over the pools it lists
# __ZFS_POOL_RESTRICT - iterate only over the pools it lists
#
# See libzfs/libzfs_config.c for more information.
#
if [ "$UNAME" = "FreeBSD" ] ; then
__ZFS_POOL_EXCLUDE="$(echo "$KEEP" | tr -s '\n' ' ')"
else
__ZFS_POOL_EXCLUDE="$(echo "$KEEP" | sed ':a;N;s/\n/ /g;ba')"
fi
. "$STF_SUITE/include/default.cfg"
-msg
-msg "--- Configuration ---"
-msg "Runfiles: $RUNFILES"
-msg "STF_TOOLS: $STF_TOOLS"
-msg "STF_SUITE: $STF_SUITE"
-msg "STF_PATH: $STF_PATH"
-
#
# No DISKS have been provided so a basic file or loopback based devices
# must be created for the test suite to use.
#
if [ -z "${DISKS}" ]; then
+ #
+ # If this is a performance run, prevent accidental use of
+ # loopback devices.
+ #
+ [ "$TAGS" = "perf" ] && fail "Running perf tests without disks."
+
#
# Create sparse files for the test suite. These may be used
# directory or have loopback devices layered on them.
#
for TEST_FILE in ${FILES}; do
[ -f "$TEST_FILE" ] && fail "Failed file exists: ${TEST_FILE}"
truncate -s "${FILESIZE}" "${TEST_FILE}" ||
fail "Failed creating: ${TEST_FILE} ($?)"
done
#
# If requested setup loopback devices backed by the sparse files.
#
if [ "$LOOPBACK" = "yes" ]; then
test -x "$LOSETUP" || fail "$LOSETUP utility must be installed"
for TEST_FILE in ${FILES}; do
if [ "$UNAME" = "FreeBSD" ] ; then
MDDEVICE=$(sudo "${LOSETUP}" -a -t vnode -f "${TEST_FILE}")
if [ -z "$MDDEVICE" ] ; then
fail "Failed: ${TEST_FILE} -> loopback"
fi
DISKS="$DISKS $MDDEVICE"
LOOPBACKS="$LOOPBACKS $MDDEVICE"
else
TEST_LOOPBACK=$(sudo "${LOSETUP}" -f)
sudo "${LOSETUP}" "${TEST_LOOPBACK}" "${TEST_FILE}" ||
fail "Failed: ${TEST_FILE} -> ${TEST_LOOPBACK}"
BASELOOPBACK=$(basename "$TEST_LOOPBACK")
DISKS="$DISKS $BASELOOPBACK"
LOOPBACKS="$LOOPBACKS $TEST_LOOPBACK"
fi
done
DISKS=${DISKS# }
LOOPBACKS=${LOOPBACKS# }
else
DISKS="$FILES"
fi
fi
+#
+# It may be desirable to test with fewer disks than the default when running
+# the performance tests, but the functional tests require at least three.
+#
NUM_DISKS=$(echo "${DISKS}" | awk '{print NF}')
-[ "$NUM_DISKS" -lt 3 ] && fail "Not enough disks ($NUM_DISKS/3 minimum)"
+if [ "$TAGS" != "perf" ]; then
+ [ "$NUM_DISKS" -lt 3 ] && fail "Not enough disks ($NUM_DISKS/3 minimum)"
+fi
#
# Disable SELinux until the ZFS Test Suite has been updated accordingly.
#
if [ -x "$STF_PATH/setenforce" ]; then
sudo setenforce permissive >/dev/null 2>&1
fi
#
# Enable internal ZFS debug log and clear it.
#
if [ -e /sys/module/zfs/parameters/zfs_dbgmsg_enable ]; then
sudo /bin/sh -c "echo 1 >/sys/module/zfs/parameters/zfs_dbgmsg_enable"
sudo /bin/sh -c "echo 0 >/proc/spl/kstat/zfs/dbgmsg"
fi
+msg
+msg "--- Configuration ---"
+msg "Runfiles: $RUNFILES"
+msg "STF_TOOLS: $STF_TOOLS"
+msg "STF_SUITE: $STF_SUITE"
+msg "STF_PATH: $STF_PATH"
msg "FILEDIR: $FILEDIR"
msg "FILES: $FILES"
msg "LOOPBACKS: $LOOPBACKS"
msg "DISKS: $DISKS"
msg "NUM_DISKS: $NUM_DISKS"
msg "FILESIZE: $FILESIZE"
msg "ITERATIONS: $ITERATIONS"
msg "TAGS: $TAGS"
msg "STACK_TRACER: $STACK_TRACER"
msg "Keep pool(s): $KEEP"
msg "Missing util(s): $STF_MISSING_BIN"
msg ""
export STF_TOOLS
export STF_SUITE
export STF_PATH
export DISKS
export FILEDIR
export KEEP
export __ZFS_POOL_EXCLUDE
export TESTFAIL_CALLBACKS
export PATH=$STF_PATH
if [ "$UNAME" = "FreeBSD" ] ; then
mkdir -p "$FILEDIR" || true
RESULTS_FILE=$(mktemp -u "${FILEDIR}/zts-results.XXXXXX")
REPORT_FILE=$(mktemp -u "${FILEDIR}/zts-report.XXXXXX")
else
RESULTS_FILE=$(mktemp -u -t zts-results.XXXXXX -p "$FILEDIR")
REPORT_FILE=$(mktemp -u -t zts-report.XXXXXX -p "$FILEDIR")
fi
#
# Run all the tests as specified.
#
msg "${TEST_RUNNER} ${QUIET:+-q}" \
"-c \"${RUNFILES}\"" \
"-T \"${TAGS}\"" \
"-i \"${STF_SUITE}\"" \
"-I \"${ITERATIONS}\""
${TEST_RUNNER} ${QUIET:+-q} \
-c "${RUNFILES}" \
-T "${TAGS}" \
-i "${STF_SUITE}" \
-I "${ITERATIONS}" \
2>&1 | tee "$RESULTS_FILE"
#
# Analyze the results.
#
${ZTS_REPORT} "$RESULTS_FILE" >"$REPORT_FILE"
RESULT=$?
cat "$REPORT_FILE"
RESULTS_DIR=$(awk '/^Log directory/ { print $3 }' "$RESULTS_FILE")
if [ -d "$RESULTS_DIR" ]; then
cat "$RESULTS_FILE" "$REPORT_FILE" >"$RESULTS_DIR/results"
fi
rm -f "$RESULTS_FILE" "$REPORT_FILE"
if [ -n "$SINGLETEST" ]; then
rm -f "$RUNFILES" >/dev/null 2>&1
fi
exit ${RESULT}
diff --git a/sys/contrib/openzfs/tests/runfiles/common.run b/sys/contrib/openzfs/tests/runfiles/common.run
index 996e5f615cd4..536788f2eeed 100644
--- a/sys/contrib/openzfs/tests/runfiles/common.run
+++ b/sys/contrib/openzfs/tests/runfiles/common.run
@@ -1,944 +1,943 @@
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
# This run file contains all of the common functional tests. When
# adding a new test consider also adding it to the sanity.run file
# if the new test runs to completion in only a few seconds.
#
# Approximate run time: 4-5 hours
#
[DEFAULT]
pre = setup
quiet = False
pre_user = root
user = root
timeout = 600
post_user = root
post = cleanup
failsafe_user = root
failsafe = callbacks/zfs_failsafe
outputdir = /var/tmp/test_results
tags = ['functional']
[tests/functional/acl/off]
tests = ['posixmode']
tags = ['functional', 'acl']
[tests/functional/alloc_class]
tests = ['alloc_class_001_pos', 'alloc_class_002_neg', 'alloc_class_003_pos',
'alloc_class_004_pos', 'alloc_class_005_pos', 'alloc_class_006_pos',
'alloc_class_007_pos', 'alloc_class_008_pos', 'alloc_class_009_pos',
'alloc_class_010_pos', 'alloc_class_011_neg', 'alloc_class_012_pos',
'alloc_class_013_pos']
tags = ['functional', 'alloc_class']
[tests/functional/arc]
tests = ['dbufstats_001_pos', 'dbufstats_002_pos', 'dbufstats_003_pos',
'arcstats_runtime_tuning']
tags = ['functional', 'arc']
[tests/functional/atime]
tests = ['atime_001_pos', 'atime_002_neg', 'root_atime_off', 'root_atime_on']
tags = ['functional', 'atime']
[tests/functional/bootfs]
tests = ['bootfs_001_pos', 'bootfs_002_neg', 'bootfs_003_pos',
'bootfs_004_neg', 'bootfs_005_neg', 'bootfs_006_pos', 'bootfs_007_pos',
'bootfs_008_pos']
tags = ['functional', 'bootfs']
[tests/functional/btree]
tests = ['btree_positive', 'btree_negative']
tags = ['functional', 'btree']
pre =
post =
[tests/functional/cache]
tests = ['cache_001_pos', 'cache_002_pos', 'cache_003_pos', 'cache_004_neg',
'cache_005_neg', 'cache_006_pos', 'cache_007_neg', 'cache_008_neg',
'cache_009_pos', 'cache_010_pos', 'cache_011_pos', 'cache_012_pos']
tags = ['functional', 'cache']
[tests/functional/cachefile]
tests = ['cachefile_001_pos', 'cachefile_002_pos', 'cachefile_003_pos',
'cachefile_004_pos']
tags = ['functional', 'cachefile']
[tests/functional/casenorm]
tests = ['case_all_values', 'norm_all_values', 'mixed_create_failure',
'sensitive_none_lookup', 'sensitive_none_delete',
'sensitive_formd_lookup', 'sensitive_formd_delete',
'insensitive_none_lookup', 'insensitive_none_delete',
'insensitive_formd_lookup', 'insensitive_formd_delete',
'mixed_none_lookup', 'mixed_none_lookup_ci', 'mixed_none_delete',
'mixed_formd_lookup', 'mixed_formd_lookup_ci', 'mixed_formd_delete']
tags = ['functional', 'casenorm']
[tests/functional/channel_program/lua_core]
tests = ['tst.args_to_lua', 'tst.divide_by_zero', 'tst.exists',
'tst.integer_illegal', 'tst.integer_overflow', 'tst.language_functions_neg',
'tst.language_functions_pos', 'tst.large_prog', 'tst.libraries',
'tst.memory_limit', 'tst.nested_neg', 'tst.nested_pos', 'tst.nvlist_to_lua',
'tst.recursive_neg', 'tst.recursive_pos', 'tst.return_large',
'tst.return_nvlist_neg', 'tst.return_nvlist_pos',
'tst.return_recursive_table', 'tst.stack_gsub', 'tst.timeout']
tags = ['functional', 'channel_program', 'lua_core']
[tests/functional/channel_program/synctask_core]
tests = ['tst.destroy_fs', 'tst.destroy_snap', 'tst.get_count_and_limit',
'tst.get_index_props', 'tst.get_mountpoint', 'tst.get_neg',
'tst.get_number_props', 'tst.get_string_props', 'tst.get_type',
'tst.get_userquota', 'tst.get_written', 'tst.inherit', 'tst.list_bookmarks',
'tst.list_children', 'tst.list_clones', 'tst.list_holds',
'tst.list_snapshots', 'tst.list_system_props',
'tst.list_user_props', 'tst.parse_args_neg','tst.promote_conflict',
'tst.promote_multiple', 'tst.promote_simple', 'tst.rollback_mult',
'tst.rollback_one', 'tst.set_props', 'tst.snapshot_destroy', 'tst.snapshot_neg',
'tst.snapshot_recursive', 'tst.snapshot_simple',
'tst.bookmark.create', 'tst.bookmark.copy',
'tst.terminate_by_signal'
]
tags = ['functional', 'channel_program', 'synctask_core']
[tests/functional/checksum]
tests = ['run_sha2_test', 'run_skein_test', 'filetest_001_pos',
'filetest_002_pos']
tags = ['functional', 'checksum']
[tests/functional/clean_mirror]
tests = [ 'clean_mirror_001_pos', 'clean_mirror_002_pos',
'clean_mirror_003_pos', 'clean_mirror_004_pos']
tags = ['functional', 'clean_mirror']
[tests/functional/cli_root/zdb]
tests = ['zdb_002_pos', 'zdb_003_pos', 'zdb_004_pos', 'zdb_005_pos',
'zdb_006_pos', 'zdb_args_neg', 'zdb_args_pos',
'zdb_block_size_histogram', 'zdb_checksum', 'zdb_decompress',
'zdb_display_block', 'zdb_object_range_neg', 'zdb_object_range_pos',
'zdb_objset_id', 'zdb_decompress_zstd', 'zdb_recover', 'zdb_recover_2']
pre =
post =
tags = ['functional', 'cli_root', 'zdb']
[tests/functional/cli_root/zfs]
tests = ['zfs_001_neg', 'zfs_002_pos']
tags = ['functional', 'cli_root', 'zfs']
[tests/functional/cli_root/zfs_bookmark]
tests = ['zfs_bookmark_cliargs']
tags = ['functional', 'cli_root', 'zfs_bookmark']
[tests/functional/cli_root/zfs_change-key]
tests = ['zfs_change-key', 'zfs_change-key_child', 'zfs_change-key_format',
'zfs_change-key_inherit', 'zfs_change-key_load', 'zfs_change-key_location',
'zfs_change-key_pbkdf2iters', 'zfs_change-key_clones']
tags = ['functional', 'cli_root', 'zfs_change-key']
[tests/functional/cli_root/zfs_clone]
tests = ['zfs_clone_001_neg', 'zfs_clone_002_pos', 'zfs_clone_003_pos',
'zfs_clone_004_pos', 'zfs_clone_005_pos', 'zfs_clone_006_pos',
'zfs_clone_007_pos', 'zfs_clone_008_neg', 'zfs_clone_009_neg',
'zfs_clone_010_pos', 'zfs_clone_encrypted', 'zfs_clone_deeply_nested']
tags = ['functional', 'cli_root', 'zfs_clone']
[tests/functional/cli_root/zfs_copies]
tests = ['zfs_copies_001_pos', 'zfs_copies_002_pos', 'zfs_copies_003_pos',
'zfs_copies_004_neg', 'zfs_copies_005_neg', 'zfs_copies_006_pos']
tags = ['functional', 'cli_root', 'zfs_copies']
[tests/functional/cli_root/zfs_create]
tests = ['zfs_create_001_pos', 'zfs_create_002_pos', 'zfs_create_003_pos',
'zfs_create_004_pos', 'zfs_create_005_pos', 'zfs_create_006_pos',
'zfs_create_007_pos', 'zfs_create_008_neg', 'zfs_create_009_neg',
'zfs_create_010_neg', 'zfs_create_011_pos', 'zfs_create_012_pos',
'zfs_create_013_pos', 'zfs_create_014_pos', 'zfs_create_encrypted',
'zfs_create_crypt_combos', 'zfs_create_dryrun', 'zfs_create_nomount',
'zfs_create_verbose']
tags = ['functional', 'cli_root', 'zfs_create']
[tests/functional/cli_root/zfs_destroy]
tests = ['zfs_clone_livelist_condense_and_disable',
'zfs_clone_livelist_condense_races', 'zfs_clone_livelist_dedup',
'zfs_destroy_001_pos', 'zfs_destroy_002_pos', 'zfs_destroy_003_pos',
'zfs_destroy_004_pos', 'zfs_destroy_005_neg', 'zfs_destroy_006_neg',
'zfs_destroy_007_neg', 'zfs_destroy_008_pos', 'zfs_destroy_009_pos',
'zfs_destroy_010_pos', 'zfs_destroy_011_pos', 'zfs_destroy_012_pos',
'zfs_destroy_013_neg', 'zfs_destroy_014_pos', 'zfs_destroy_015_pos',
'zfs_destroy_016_pos', 'zfs_destroy_clone_livelist',
'zfs_destroy_dev_removal', 'zfs_destroy_dev_removal_condense']
tags = ['functional', 'cli_root', 'zfs_destroy']
[tests/functional/cli_root/zfs_diff]
tests = ['zfs_diff_changes', 'zfs_diff_cliargs', 'zfs_diff_timestamp',
'zfs_diff_types', 'zfs_diff_encrypted']
tags = ['functional', 'cli_root', 'zfs_diff']
[tests/functional/cli_root/zfs_get]
tests = ['zfs_get_001_pos', 'zfs_get_002_pos', 'zfs_get_003_pos',
'zfs_get_004_pos', 'zfs_get_005_neg', 'zfs_get_006_neg', 'zfs_get_007_neg',
'zfs_get_008_pos', 'zfs_get_009_pos', 'zfs_get_010_neg']
tags = ['functional', 'cli_root', 'zfs_get']
[tests/functional/cli_root/zfs_ids_to_path]
tests = ['zfs_ids_to_path_001_pos']
tags = ['functional', 'cli_root', 'zfs_ids_to_path']
[tests/functional/cli_root/zfs_inherit]
tests = ['zfs_inherit_001_neg', 'zfs_inherit_002_neg', 'zfs_inherit_003_pos',
'zfs_inherit_mountpoint']
tags = ['functional', 'cli_root', 'zfs_inherit']
[tests/functional/cli_root/zfs_load-key]
tests = ['zfs_load-key', 'zfs_load-key_all', 'zfs_load-key_file',
'zfs_load-key_https', 'zfs_load-key_location', 'zfs_load-key_noop',
'zfs_load-key_recursive']
tags = ['functional', 'cli_root', 'zfs_load-key']
[tests/functional/cli_root/zfs_mount]
tests = ['zfs_mount_001_pos', 'zfs_mount_002_pos', 'zfs_mount_003_pos',
'zfs_mount_004_pos', 'zfs_mount_005_pos', 'zfs_mount_007_pos',
'zfs_mount_009_neg', 'zfs_mount_010_neg', 'zfs_mount_011_neg',
'zfs_mount_012_pos', 'zfs_mount_all_001_pos', 'zfs_mount_encrypted',
'zfs_mount_remount', 'zfs_mount_all_fail', 'zfs_mount_all_mountpoints',
'zfs_mount_test_race']
tags = ['functional', 'cli_root', 'zfs_mount']
[tests/functional/cli_root/zfs_program]
tests = ['zfs_program_json']
tags = ['functional', 'cli_root', 'zfs_program']
[tests/functional/cli_root/zfs_promote]
tests = ['zfs_promote_001_pos', 'zfs_promote_002_pos', 'zfs_promote_003_pos',
'zfs_promote_004_pos', 'zfs_promote_005_pos', 'zfs_promote_006_neg',
'zfs_promote_007_neg', 'zfs_promote_008_pos', 'zfs_promote_encryptionroot']
tags = ['functional', 'cli_root', 'zfs_promote']
[tests/functional/cli_root/zfs_property]
tests = ['zfs_written_property_001_pos']
tags = ['functional', 'cli_root', 'zfs_property']
[tests/functional/cli_root/zfs_receive]
tests = ['zfs_receive_001_pos', 'zfs_receive_002_pos', 'zfs_receive_003_pos',
'zfs_receive_004_neg', 'zfs_receive_005_neg', 'zfs_receive_006_pos',
'zfs_receive_007_neg', 'zfs_receive_008_pos', 'zfs_receive_009_neg',
'zfs_receive_010_pos', 'zfs_receive_011_pos', 'zfs_receive_012_pos',
'zfs_receive_013_pos', 'zfs_receive_014_pos', 'zfs_receive_015_pos',
'zfs_receive_016_pos', 'receive-o-x_props_override',
'zfs_receive_from_encrypted', 'zfs_receive_to_encrypted',
'zfs_receive_raw', 'zfs_receive_raw_incremental', 'zfs_receive_-e',
'zfs_receive_raw_-d', 'zfs_receive_from_zstd', 'zfs_receive_new_props']
tags = ['functional', 'cli_root', 'zfs_receive']
[tests/functional/cli_root/zfs_rename]
tests = ['zfs_rename_001_pos', 'zfs_rename_002_pos', 'zfs_rename_003_pos',
'zfs_rename_004_neg', 'zfs_rename_005_neg', 'zfs_rename_006_pos',
'zfs_rename_007_pos', 'zfs_rename_008_pos', 'zfs_rename_009_neg',
'zfs_rename_010_neg', 'zfs_rename_011_pos', 'zfs_rename_012_neg',
'zfs_rename_013_pos', 'zfs_rename_014_neg', 'zfs_rename_encrypted_child',
'zfs_rename_to_encrypted', 'zfs_rename_mountpoint', 'zfs_rename_nounmount']
tags = ['functional', 'cli_root', 'zfs_rename']
[tests/functional/cli_root/zfs_reservation]
tests = ['zfs_reservation_001_pos', 'zfs_reservation_002_pos']
tags = ['functional', 'cli_root', 'zfs_reservation']
[tests/functional/cli_root/zfs_rollback]
tests = ['zfs_rollback_001_pos', 'zfs_rollback_002_pos',
'zfs_rollback_003_neg', 'zfs_rollback_004_neg']
tags = ['functional', 'cli_root', 'zfs_rollback']
[tests/functional/cli_root/zfs_send]
tests = ['zfs_send_001_pos', 'zfs_send_002_pos', 'zfs_send_003_pos',
'zfs_send_004_neg', 'zfs_send_005_pos', 'zfs_send_006_pos',
'zfs_send_007_pos', 'zfs_send_encrypted', 'zfs_send_raw',
'zfs_send_sparse', 'zfs_send-b', 'zfs_send_skip_missing']
tags = ['functional', 'cli_root', 'zfs_send']
[tests/functional/cli_root/zfs_set]
tests = ['cache_001_pos', 'cache_002_neg', 'canmount_001_pos',
'canmount_002_pos', 'canmount_003_pos', 'canmount_004_pos',
'checksum_001_pos', 'compression_001_pos', 'mountpoint_001_pos',
'mountpoint_002_pos', 'reservation_001_neg', 'user_property_002_pos',
'share_mount_001_neg', 'snapdir_001_pos', 'onoffs_001_pos',
'user_property_001_pos', 'user_property_003_neg', 'readonly_001_pos',
'user_property_004_pos', 'version_001_neg', 'zfs_set_001_neg',
'zfs_set_002_neg', 'zfs_set_003_neg', 'property_alias_001_pos',
'mountpoint_003_pos', 'ro_props_001_pos', 'zfs_set_keylocation',
'zfs_set_feature_activation']
tags = ['functional', 'cli_root', 'zfs_set']
[tests/functional/cli_root/zfs_share]
tests = ['zfs_share_001_pos', 'zfs_share_002_pos', 'zfs_share_003_pos',
'zfs_share_004_pos', 'zfs_share_006_pos', 'zfs_share_008_neg',
'zfs_share_010_neg', 'zfs_share_011_pos', 'zfs_share_concurrent_shares']
tags = ['functional', 'cli_root', 'zfs_share']
[tests/functional/cli_root/zfs_snapshot]
tests = ['zfs_snapshot_001_neg', 'zfs_snapshot_002_neg',
'zfs_snapshot_003_neg', 'zfs_snapshot_004_neg', 'zfs_snapshot_005_neg',
'zfs_snapshot_006_pos', 'zfs_snapshot_007_neg', 'zfs_snapshot_008_neg',
'zfs_snapshot_009_pos']
tags = ['functional', 'cli_root', 'zfs_snapshot']
[tests/functional/cli_root/zfs_unload-key]
tests = ['zfs_unload-key', 'zfs_unload-key_all', 'zfs_unload-key_recursive']
tags = ['functional', 'cli_root', 'zfs_unload-key']
[tests/functional/cli_root/zfs_unmount]
tests = ['zfs_unmount_001_pos', 'zfs_unmount_002_pos', 'zfs_unmount_003_pos',
'zfs_unmount_004_pos', 'zfs_unmount_005_pos', 'zfs_unmount_006_pos',
'zfs_unmount_007_neg', 'zfs_unmount_008_neg', 'zfs_unmount_009_pos',
'zfs_unmount_all_001_pos', 'zfs_unmount_nested', 'zfs_unmount_unload_keys']
tags = ['functional', 'cli_root', 'zfs_unmount']
[tests/functional/cli_root/zfs_unshare]
tests = ['zfs_unshare_001_pos', 'zfs_unshare_002_pos', 'zfs_unshare_003_pos',
'zfs_unshare_004_neg', 'zfs_unshare_005_neg', 'zfs_unshare_006_pos',
'zfs_unshare_007_pos']
tags = ['functional', 'cli_root', 'zfs_unshare']
[tests/functional/cli_root/zfs_upgrade]
tests = ['zfs_upgrade_001_pos', 'zfs_upgrade_002_pos', 'zfs_upgrade_003_pos',
'zfs_upgrade_004_pos', 'zfs_upgrade_005_pos', 'zfs_upgrade_006_neg',
'zfs_upgrade_007_neg']
tags = ['functional', 'cli_root', 'zfs_upgrade']
[tests/functional/cli_root/zfs_wait]
tests = ['zfs_wait_deleteq']
tags = ['functional', 'cli_root', 'zfs_wait']
[tests/functional/cli_root/zpool]
tests = ['zpool_001_neg', 'zpool_002_pos', 'zpool_003_pos', 'zpool_colors']
tags = ['functional', 'cli_root', 'zpool']
[tests/functional/cli_root/zpool_add]
tests = ['zpool_add_001_pos', 'zpool_add_002_pos', 'zpool_add_003_pos',
'zpool_add_004_pos', 'zpool_add_006_pos', 'zpool_add_007_neg',
'zpool_add_008_neg', 'zpool_add_009_neg', 'zpool_add_010_pos',
'add-o_ashift', 'add_prop_ashift', 'zpool_add_dryrun_output']
tags = ['functional', 'cli_root', 'zpool_add']
[tests/functional/cli_root/zpool_attach]
tests = ['zpool_attach_001_neg', 'attach-o_ashift']
tags = ['functional', 'cli_root', 'zpool_attach']
[tests/functional/cli_root/zpool_clear]
tests = ['zpool_clear_001_pos', 'zpool_clear_002_neg', 'zpool_clear_003_neg',
'zpool_clear_readonly']
tags = ['functional', 'cli_root', 'zpool_clear']
[tests/functional/cli_root/zpool_create]
tests = ['zpool_create_001_pos', 'zpool_create_002_pos',
'zpool_create_003_pos', 'zpool_create_004_pos', 'zpool_create_005_pos',
'zpool_create_006_pos', 'zpool_create_007_neg', 'zpool_create_008_pos',
'zpool_create_009_neg', 'zpool_create_010_neg', 'zpool_create_011_neg',
'zpool_create_012_neg', 'zpool_create_014_neg', 'zpool_create_015_neg',
'zpool_create_017_neg', 'zpool_create_018_pos', 'zpool_create_019_pos',
'zpool_create_020_pos', 'zpool_create_021_pos', 'zpool_create_022_pos',
'zpool_create_023_neg', 'zpool_create_024_pos',
'zpool_create_encrypted', 'zpool_create_crypt_combos',
'zpool_create_draid_001_pos', 'zpool_create_draid_002_pos',
'zpool_create_draid_003_pos', 'zpool_create_draid_004_pos',
'zpool_create_features_001_pos', 'zpool_create_features_002_pos',
'zpool_create_features_003_pos', 'zpool_create_features_004_neg',
'zpool_create_features_005_pos', 'zpool_create_features_006_pos',
'zpool_create_features_007_pos', 'zpool_create_features_008_pos',
'zpool_create_features_009_pos', 'create-o_ashift',
'zpool_create_tempname', 'zpool_create_dryrun_output']
tags = ['functional', 'cli_root', 'zpool_create']
[tests/functional/cli_root/zpool_destroy]
tests = ['zpool_destroy_001_pos', 'zpool_destroy_002_pos',
'zpool_destroy_003_neg']
pre =
post =
tags = ['functional', 'cli_root', 'zpool_destroy']
[tests/functional/cli_root/zpool_detach]
tests = ['zpool_detach_001_neg']
tags = ['functional', 'cli_root', 'zpool_detach']
[tests/functional/cli_root/zpool_events]
tests = ['zpool_events_clear', 'zpool_events_cliargs', 'zpool_events_follow',
'zpool_events_poolname', 'zpool_events_errors', 'zpool_events_duplicates',
'zpool_events_clear_retained']
tags = ['functional', 'cli_root', 'zpool_events']
[tests/functional/cli_root/zpool_export]
tests = ['zpool_export_001_pos', 'zpool_export_002_pos',
'zpool_export_003_neg', 'zpool_export_004_pos']
tags = ['functional', 'cli_root', 'zpool_export']
[tests/functional/cli_root/zpool_get]
tests = ['zpool_get_001_pos', 'zpool_get_002_pos', 'zpool_get_003_pos',
'zpool_get_004_neg', 'zpool_get_005_pos']
tags = ['functional', 'cli_root', 'zpool_get']
[tests/functional/cli_root/zpool_history]
tests = ['zpool_history_001_neg', 'zpool_history_002_pos']
tags = ['functional', 'cli_root', 'zpool_history']
[tests/functional/cli_root/zpool_import]
tests = ['zpool_import_001_pos', 'zpool_import_002_pos',
'zpool_import_003_pos', 'zpool_import_004_pos', 'zpool_import_005_pos',
'zpool_import_006_pos', 'zpool_import_007_pos', 'zpool_import_008_pos',
'zpool_import_009_neg', 'zpool_import_010_pos', 'zpool_import_011_neg',
'zpool_import_012_pos', 'zpool_import_013_neg', 'zpool_import_014_pos',
'zpool_import_015_pos', 'zpool_import_016_pos', 'zpool_import_017_pos',
'zpool_import_features_001_pos', 'zpool_import_features_002_neg',
'zpool_import_features_003_pos', 'zpool_import_missing_001_pos',
'zpool_import_missing_002_pos', 'zpool_import_missing_003_pos',
'zpool_import_rename_001_pos', 'zpool_import_all_001_pos',
'zpool_import_encrypted', 'zpool_import_encrypted_load',
'zpool_import_errata3', 'zpool_import_errata4',
'import_cachefile_device_added',
'import_cachefile_device_removed',
'import_cachefile_device_replaced',
'import_cachefile_mirror_attached',
'import_cachefile_mirror_detached',
'import_cachefile_paths_changed',
'import_cachefile_shared_device',
'import_devices_missing',
'import_paths_changed',
'import_rewind_config_changed',
'import_rewind_device_replaced']
tags = ['functional', 'cli_root', 'zpool_import']
timeout = 1200
[tests/functional/cli_root/zpool_labelclear]
tests = ['zpool_labelclear_active', 'zpool_labelclear_exported',
'zpool_labelclear_removed', 'zpool_labelclear_valid']
pre =
post =
tags = ['functional', 'cli_root', 'zpool_labelclear']
[tests/functional/cli_root/zpool_initialize]
tests = ['zpool_initialize_attach_detach_add_remove',
'zpool_initialize_fault_export_import_online',
'zpool_initialize_import_export',
'zpool_initialize_offline_export_import_online',
'zpool_initialize_online_offline',
'zpool_initialize_split',
'zpool_initialize_start_and_cancel_neg',
'zpool_initialize_start_and_cancel_pos',
'zpool_initialize_suspend_resume',
'zpool_initialize_unsupported_vdevs',
'zpool_initialize_verify_checksums',
'zpool_initialize_verify_initialized']
pre =
tags = ['functional', 'cli_root', 'zpool_initialize']
[tests/functional/cli_root/zpool_offline]
tests = ['zpool_offline_001_pos', 'zpool_offline_002_neg',
'zpool_offline_003_pos']
tags = ['functional', 'cli_root', 'zpool_offline']
[tests/functional/cli_root/zpool_online]
tests = ['zpool_online_001_pos', 'zpool_online_002_neg']
tags = ['functional', 'cli_root', 'zpool_online']
[tests/functional/cli_root/zpool_remove]
tests = ['zpool_remove_001_neg', 'zpool_remove_002_pos',
'zpool_remove_003_pos']
tags = ['functional', 'cli_root', 'zpool_remove']
[tests/functional/cli_root/zpool_replace]
tests = ['zpool_replace_001_neg', 'replace-o_ashift', 'replace_prop_ashift']
tags = ['functional', 'cli_root', 'zpool_replace']
[tests/functional/cli_root/zpool_resilver]
tests = ['zpool_resilver_bad_args', 'zpool_resilver_restart']
tags = ['functional', 'cli_root', 'zpool_resilver']
[tests/functional/cli_root/zpool_scrub]
tests = ['zpool_scrub_001_neg', 'zpool_scrub_002_pos', 'zpool_scrub_003_pos',
'zpool_scrub_004_pos', 'zpool_scrub_005_pos',
'zpool_scrub_encrypted_unloaded', 'zpool_scrub_print_repairing',
'zpool_scrub_offline_device', 'zpool_scrub_multiple_copies']
tags = ['functional', 'cli_root', 'zpool_scrub']
[tests/functional/cli_root/zpool_set]
tests = ['zpool_set_001_pos', 'zpool_set_002_neg', 'zpool_set_003_neg',
'zpool_set_ashift', 'zpool_set_features']
tags = ['functional', 'cli_root', 'zpool_set']
[tests/functional/cli_root/zpool_split]
tests = ['zpool_split_cliargs', 'zpool_split_devices',
'zpool_split_encryption', 'zpool_split_props', 'zpool_split_vdevs',
'zpool_split_resilver', 'zpool_split_indirect',
'zpool_split_dryrun_output']
tags = ['functional', 'cli_root', 'zpool_split']
[tests/functional/cli_root/zpool_status]
tests = ['zpool_status_001_pos', 'zpool_status_002_pos',
'zpool_status_features_001_pos']
tags = ['functional', 'cli_root', 'zpool_status']
[tests/functional/cli_root/zpool_sync]
tests = ['zpool_sync_001_pos', 'zpool_sync_002_neg']
tags = ['functional', 'cli_root', 'zpool_sync']
[tests/functional/cli_root/zpool_trim]
tests = ['zpool_trim_attach_detach_add_remove',
'zpool_trim_fault_export_import_online',
'zpool_trim_import_export', 'zpool_trim_multiple', 'zpool_trim_neg',
'zpool_trim_offline_export_import_online', 'zpool_trim_online_offline',
'zpool_trim_partial', 'zpool_trim_rate', 'zpool_trim_rate_neg',
'zpool_trim_secure', 'zpool_trim_split', 'zpool_trim_start_and_cancel_neg',
'zpool_trim_start_and_cancel_pos', 'zpool_trim_suspend_resume',
'zpool_trim_unsupported_vdevs', 'zpool_trim_verify_checksums',
'zpool_trim_verify_trimmed']
tags = ['functional', 'zpool_trim']
[tests/functional/cli_root/zpool_upgrade]
tests = ['zpool_upgrade_001_pos', 'zpool_upgrade_002_pos',
'zpool_upgrade_003_pos', 'zpool_upgrade_004_pos',
'zpool_upgrade_005_neg', 'zpool_upgrade_006_neg',
'zpool_upgrade_007_pos', 'zpool_upgrade_008_pos',
'zpool_upgrade_009_neg', 'zpool_upgrade_features_001_pos']
tags = ['functional', 'cli_root', 'zpool_upgrade']
[tests/functional/cli_root/zpool_wait]
tests = ['zpool_wait_discard', 'zpool_wait_freeing',
'zpool_wait_initialize_basic', 'zpool_wait_initialize_cancel',
'zpool_wait_initialize_flag', 'zpool_wait_multiple',
'zpool_wait_no_activity', 'zpool_wait_remove', 'zpool_wait_remove_cancel',
'zpool_wait_trim_basic', 'zpool_wait_trim_cancel', 'zpool_wait_trim_flag',
'zpool_wait_usage']
tags = ['functional', 'cli_root', 'zpool_wait']
[tests/functional/cli_root/zpool_wait/scan]
tests = ['zpool_wait_replace_cancel', 'zpool_wait_rebuild',
'zpool_wait_resilver', 'zpool_wait_scrub_cancel',
'zpool_wait_replace', 'zpool_wait_scrub_basic', 'zpool_wait_scrub_flag']
tags = ['functional', 'cli_root', 'zpool_wait']
[tests/functional/cli_user/misc]
tests = ['zdb_001_neg', 'zfs_001_neg', 'zfs_allow_001_neg',
'zfs_clone_001_neg', 'zfs_create_001_neg', 'zfs_destroy_001_neg',
'zfs_get_001_neg', 'zfs_inherit_001_neg', 'zfs_mount_001_neg',
'zfs_promote_001_neg', 'zfs_receive_001_neg', 'zfs_rename_001_neg',
'zfs_rollback_001_neg', 'zfs_send_001_neg', 'zfs_set_001_neg',
'zfs_share_001_neg', 'zfs_snapshot_001_neg', 'zfs_unallow_001_neg',
'zfs_unmount_001_neg', 'zfs_unshare_001_neg', 'zfs_upgrade_001_neg',
'zpool_001_neg', 'zpool_add_001_neg', 'zpool_attach_001_neg',
'zpool_clear_001_neg', 'zpool_create_001_neg', 'zpool_destroy_001_neg',
'zpool_detach_001_neg', 'zpool_export_001_neg', 'zpool_get_001_neg',
'zpool_history_001_neg', 'zpool_import_001_neg', 'zpool_import_002_neg',
'zpool_offline_001_neg', 'zpool_online_001_neg', 'zpool_remove_001_neg',
'zpool_replace_001_neg', 'zpool_scrub_001_neg', 'zpool_set_001_neg',
'zpool_status_001_neg', 'zpool_upgrade_001_neg', 'arcstat_001_pos',
'arc_summary_001_pos', 'arc_summary_002_neg', 'zpool_wait_privilege']
user =
tags = ['functional', 'cli_user', 'misc']
[tests/functional/cli_user/zfs_list]
tests = ['zfs_list_001_pos', 'zfs_list_002_pos', 'zfs_list_003_pos',
'zfs_list_004_neg', 'zfs_list_007_pos', 'zfs_list_008_neg']
user =
tags = ['functional', 'cli_user', 'zfs_list']
[tests/functional/cli_user/zpool_iostat]
tests = ['zpool_iostat_001_neg', 'zpool_iostat_002_pos',
'zpool_iostat_003_neg', 'zpool_iostat_004_pos',
'zpool_iostat_005_pos', 'zpool_iostat_-c_disable',
'zpool_iostat_-c_homedir', 'zpool_iostat_-c_searchpath']
user =
tags = ['functional', 'cli_user', 'zpool_iostat']
[tests/functional/cli_user/zpool_list]
tests = ['zpool_list_001_pos', 'zpool_list_002_neg']
user =
tags = ['functional', 'cli_user', 'zpool_list']
[tests/functional/cli_user/zpool_status]
tests = ['zpool_status_003_pos', 'zpool_status_-c_disable',
'zpool_status_-c_homedir', 'zpool_status_-c_searchpath']
user =
tags = ['functional', 'cli_user', 'zpool_status']
[tests/functional/compression]
tests = ['compress_001_pos', 'compress_002_pos', 'compress_003_pos',
'l2arc_compressed_arc', 'l2arc_compressed_arc_disabled',
'l2arc_encrypted', 'l2arc_encrypted_no_compressed_arc']
tags = ['functional', 'compression']
[tests/functional/cp_files]
tests = ['cp_files_001_pos']
tags = ['functional', 'cp_files']
[tests/functional/ctime]
tests = ['ctime_001_pos' ]
tags = ['functional', 'ctime']
[tests/functional/deadman]
tests = ['deadman_ratelimit', 'deadman_sync', 'deadman_zio']
pre =
post =
tags = ['functional', 'deadman']
[tests/functional/delegate]
tests = ['zfs_allow_001_pos', 'zfs_allow_002_pos', 'zfs_allow_003_pos',
'zfs_allow_004_pos', 'zfs_allow_005_pos', 'zfs_allow_006_pos',
'zfs_allow_007_pos', 'zfs_allow_008_pos', 'zfs_allow_009_neg',
'zfs_allow_010_pos', 'zfs_allow_011_neg', 'zfs_allow_012_neg',
'zfs_unallow_001_pos', 'zfs_unallow_002_pos', 'zfs_unallow_003_pos',
'zfs_unallow_004_pos', 'zfs_unallow_005_pos', 'zfs_unallow_006_pos',
'zfs_unallow_007_neg', 'zfs_unallow_008_neg']
tags = ['functional', 'delegate']
[tests/functional/exec]
tests = ['exec_001_pos', 'exec_002_neg']
tags = ['functional', 'exec']
[tests/functional/features/async_destroy]
tests = ['async_destroy_001_pos']
tags = ['functional', 'features', 'async_destroy']
[tests/functional/features/large_dnode]
tests = ['large_dnode_001_pos', 'large_dnode_003_pos', 'large_dnode_004_neg',
'large_dnode_005_pos', 'large_dnode_007_neg', 'large_dnode_009_pos']
tags = ['functional', 'features', 'large_dnode']
[tests/functional/grow]
pre =
post =
tests = ['grow_pool_001_pos', 'grow_replicas_001_pos']
tags = ['functional', 'grow']
[tests/functional/history]
tests = ['history_001_pos', 'history_002_pos', 'history_003_pos',
'history_004_pos', 'history_005_neg', 'history_006_neg',
'history_007_pos', 'history_008_pos', 'history_009_pos',
'history_010_pos']
tags = ['functional', 'history']
[tests/functional/hkdf]
tests = ['run_hkdf_test']
tags = ['functional', 'hkdf']
[tests/functional/inheritance]
tests = ['inherit_001_pos']
pre =
tags = ['functional', 'inheritance']
[tests/functional/io]
tests = ['sync', 'psync', 'posixaio', 'mmap']
tags = ['functional', 'io']
[tests/functional/inuse]
tests = ['inuse_004_pos', 'inuse_005_pos', 'inuse_008_pos', 'inuse_009_pos']
post =
tags = ['functional', 'inuse']
[tests/functional/large_files]
tests = ['large_files_001_pos', 'large_files_002_pos']
tags = ['functional', 'large_files']
[tests/functional/largest_pool]
tests = ['largest_pool_001_pos']
pre =
post =
tags = ['functional', 'largest_pool']
[tests/functional/limits]
tests = ['filesystem_count', 'filesystem_limit', 'snapshot_count',
'snapshot_limit']
tags = ['functional', 'limits']
[tests/functional/link_count]
tests = ['link_count_001', 'link_count_root_inode']
tags = ['functional', 'link_count']
[tests/functional/migration]
tests = ['migration_001_pos', 'migration_002_pos', 'migration_003_pos',
'migration_004_pos', 'migration_005_pos', 'migration_006_pos',
'migration_007_pos', 'migration_008_pos', 'migration_009_pos',
'migration_010_pos', 'migration_011_pos', 'migration_012_pos']
tags = ['functional', 'migration']
[tests/functional/mmap]
tests = ['mmap_write_001_pos', 'mmap_read_001_pos']
tags = ['functional', 'mmap']
[tests/functional/mount]
tests = ['umount_001', 'umountall_001']
tags = ['functional', 'mount']
[tests/functional/mv_files]
tests = ['mv_files_001_pos', 'mv_files_002_pos', 'random_creation']
tags = ['functional', 'mv_files']
[tests/functional/nestedfs]
tests = ['nestedfs_001_pos']
tags = ['functional', 'nestedfs']
[tests/functional/no_space]
tests = ['enospc_001_pos', 'enospc_002_pos', 'enospc_003_pos',
'enospc_df']
tags = ['functional', 'no_space']
[tests/functional/nopwrite]
tests = ['nopwrite_copies', 'nopwrite_mtime', 'nopwrite_negative',
'nopwrite_promoted_clone', 'nopwrite_recsize', 'nopwrite_sync',
'nopwrite_varying_compression', 'nopwrite_volume']
tags = ['functional', 'nopwrite']
[tests/functional/online_offline]
tests = ['online_offline_001_pos', 'online_offline_002_neg',
'online_offline_003_neg']
tags = ['functional', 'online_offline']
[tests/functional/pool_checkpoint]
tests = ['checkpoint_after_rewind', 'checkpoint_big_rewind',
'checkpoint_capacity', 'checkpoint_conf_change', 'checkpoint_discard',
'checkpoint_discard_busy', 'checkpoint_discard_many',
'checkpoint_indirect', 'checkpoint_invalid', 'checkpoint_lun_expsz',
'checkpoint_open', 'checkpoint_removal', 'checkpoint_rewind',
'checkpoint_ro_rewind', 'checkpoint_sm_scale', 'checkpoint_twice',
'checkpoint_vdev_add', 'checkpoint_zdb', 'checkpoint_zhack_feat']
tags = ['functional', 'pool_checkpoint']
timeout = 1800
[tests/functional/pool_names]
tests = ['pool_names_001_pos', 'pool_names_002_neg']
pre =
post =
tags = ['functional', 'pool_names']
[tests/functional/poolversion]
tests = ['poolversion_001_pos', 'poolversion_002_pos']
tags = ['functional', 'poolversion']
[tests/functional/pyzfs]
tests = ['pyzfs_unittest']
pre =
post =
tags = ['functional', 'pyzfs']
[tests/functional/quota]
tests = ['quota_001_pos', 'quota_002_pos', 'quota_003_pos',
'quota_004_pos', 'quota_005_pos', 'quota_006_neg']
tags = ['functional', 'quota']
[tests/functional/redacted_send]
tests = ['redacted_compressed', 'redacted_contents', 'redacted_deleted',
'redacted_disabled_feature', 'redacted_embedded', 'redacted_holes',
'redacted_incrementals', 'redacted_largeblocks', 'redacted_many_clones',
'redacted_mixed_recsize', 'redacted_mounts', 'redacted_negative',
'redacted_origin', 'redacted_panic', 'redacted_props', 'redacted_resume',
'redacted_size', 'redacted_volume']
tags = ['functional', 'redacted_send']
[tests/functional/raidz]
tests = ['raidz_001_neg', 'raidz_002_pos', 'raidz_003_pos', 'raidz_004_pos']
tags = ['functional', 'raidz']
[tests/functional/redundancy]
tests = ['redundancy_draid', 'redundancy_draid1', 'redundancy_draid2',
'redundancy_draid3', 'redundancy_draid_damaged', 'redundancy_draid_spare1',
'redundancy_draid_spare2', 'redundancy_draid_spare3', 'redundancy_mirror',
'redundancy_raidz', 'redundancy_raidz1', 'redundancy_raidz2',
'redundancy_raidz3', 'redundancy_stripe']
tags = ['functional', 'redundancy']
timeout = 1200
[tests/functional/refquota]
tests = ['refquota_001_pos', 'refquota_002_pos', 'refquota_003_pos',
'refquota_004_pos', 'refquota_005_pos', 'refquota_006_neg',
'refquota_007_neg', 'refquota_008_neg']
tags = ['functional', 'refquota']
[tests/functional/refreserv]
tests = ['refreserv_001_pos', 'refreserv_002_pos', 'refreserv_003_pos',
'refreserv_004_pos', 'refreserv_005_pos', 'refreserv_multi_raidz',
'refreserv_raidz']
tags = ['functional', 'refreserv']
[tests/functional/removal]
pre =
tests = ['removal_all_vdev', 'removal_cancel', 'removal_check_space',
'removal_condense_export', 'removal_multiple_indirection',
'removal_nopwrite', 'removal_remap_deadlists',
'removal_resume_export', 'removal_sanity', 'removal_with_add',
'removal_with_create_fs', 'removal_with_dedup',
'removal_with_errors', 'removal_with_export',
'removal_with_ganging', 'removal_with_faulted',
'removal_with_remove', 'removal_with_scrub', 'removal_with_send',
'removal_with_send_recv', 'removal_with_snapshot',
'removal_with_write', 'removal_with_zdb', 'remove_expanded',
'remove_mirror', 'remove_mirror_sanity', 'remove_raidz',
'remove_indirect', 'remove_attach_mirror']
tags = ['functional', 'removal']
[tests/functional/rename_dirs]
tests = ['rename_dirs_001_pos']
tags = ['functional', 'rename_dirs']
[tests/functional/replacement]
tests = ['attach_import', 'attach_multiple', 'attach_rebuild',
'attach_resilver', 'detach', 'rebuild_disabled_feature',
'rebuild_multiple', 'rebuild_raidz', 'replace_import', 'replace_rebuild',
'replace_resilver', 'resilver_restart_001', 'resilver_restart_002',
'scrub_cancel']
tags = ['functional', 'replacement']
[tests/functional/reservation]
tests = ['reservation_001_pos', 'reservation_002_pos', 'reservation_003_pos',
'reservation_004_pos', 'reservation_005_pos', 'reservation_006_pos',
'reservation_007_pos', 'reservation_008_pos', 'reservation_009_pos',
'reservation_010_pos', 'reservation_011_pos', 'reservation_012_pos',
'reservation_013_pos', 'reservation_014_pos', 'reservation_015_pos',
'reservation_016_pos', 'reservation_017_pos', 'reservation_018_pos',
'reservation_019_pos', 'reservation_020_pos', 'reservation_021_neg',
'reservation_022_pos']
tags = ['functional', 'reservation']
[tests/functional/rootpool]
tests = ['rootpool_002_neg', 'rootpool_003_neg', 'rootpool_007_pos']
tags = ['functional', 'rootpool']
[tests/functional/rsend]
tests = ['recv_dedup', 'recv_dedup_encrypted_zvol', 'rsend_001_pos',
'rsend_002_pos', 'rsend_003_pos', 'rsend_004_pos', 'rsend_005_pos',
'rsend_006_pos', 'rsend_007_pos', 'rsend_008_pos', 'rsend_009_pos',
'rsend_010_pos', 'rsend_011_pos', 'rsend_012_pos', 'rsend_013_pos',
'rsend_014_pos', 'rsend_016_neg', 'rsend_019_pos', 'rsend_020_pos',
'rsend_021_pos', 'rsend_022_pos', 'rsend_024_pos',
'send-c_verify_ratio', 'send-c_verify_contents', 'send-c_props',
'send-c_incremental', 'send-c_volume', 'send-c_zstreamdump',
'send-c_lz4_disabled', 'send-c_recv_lz4_disabled',
'send-c_mixed_compression', 'send-c_stream_size_estimate',
'send-c_embedded_blocks', 'send-c_resume', 'send-cpL_varied_recsize',
'send-c_recv_dedup', 'send-L_toggle', 'send_encrypted_hierarchy',
'send_encrypted_props', 'send_encrypted_truncated_files',
'send_freeobjects', 'send_realloc_files',
'send_realloc_encrypted_files', 'send_spill_block', 'send_holds',
'send_hole_birth', 'send_mixed_raw', 'send-wR_encrypted_zvol',
'send_partial_dataset', 'send_invalid', 'send_doall']
tags = ['functional', 'rsend']
[tests/functional/scrub_mirror]
tests = ['scrub_mirror_001_pos', 'scrub_mirror_002_pos',
'scrub_mirror_003_pos', 'scrub_mirror_004_pos']
tags = ['functional', 'scrub_mirror']
[tests/functional/slog]
tests = ['slog_001_pos', 'slog_002_pos', 'slog_003_pos', 'slog_004_pos',
'slog_005_pos', 'slog_006_pos', 'slog_007_pos', 'slog_008_neg',
'slog_009_neg', 'slog_010_neg', 'slog_011_neg', 'slog_012_neg',
'slog_013_pos', 'slog_014_pos', 'slog_015_neg', 'slog_replay_fs_001',
'slog_replay_fs_002', 'slog_replay_volume']
tags = ['functional', 'slog']
[tests/functional/snapshot]
tests = ['clone_001_pos', 'rollback_001_pos', 'rollback_002_pos',
'rollback_003_pos', 'snapshot_001_pos', 'snapshot_002_pos',
'snapshot_003_pos', 'snapshot_004_pos', 'snapshot_005_pos',
'snapshot_006_pos', 'snapshot_007_pos', 'snapshot_008_pos',
'snapshot_009_pos', 'snapshot_010_pos', 'snapshot_011_pos',
'snapshot_012_pos', 'snapshot_013_pos', 'snapshot_014_pos',
'snapshot_017_pos']
tags = ['functional', 'snapshot']
[tests/functional/snapused]
tests = ['snapused_001_pos', 'snapused_002_pos', 'snapused_003_pos',
'snapused_004_pos', 'snapused_005_pos']
tags = ['functional', 'snapused']
[tests/functional/sparse]
tests = ['sparse_001_pos']
tags = ['functional', 'sparse']
[tests/functional/suid]
tests = ['suid_write_to_suid', 'suid_write_to_sgid', 'suid_write_to_suid_sgid',
'suid_write_to_none']
tags = ['functional', 'suid']
[tests/functional/threadsappend]
tests = ['threadsappend_001_pos']
tags = ['functional', 'threadsappend']
[tests/functional/trim]
tests = ['autotrim_integrity', 'autotrim_config', 'autotrim_trim_integrity',
'trim_integrity', 'trim_config', 'trim_l2arc']
tags = ['functional', 'trim']
[tests/functional/truncate]
tests = ['truncate_001_pos', 'truncate_002_pos', 'truncate_timestamps']
tags = ['functional', 'truncate']
[tests/functional/upgrade]
tests = ['upgrade_userobj_001_pos', 'upgrade_readonly_pool']
tags = ['functional', 'upgrade']
[tests/functional/userquota]
tests = [
'userquota_001_pos', 'userquota_002_pos', 'userquota_003_pos',
'userquota_004_pos', 'userquota_005_neg', 'userquota_006_pos',
'userquota_007_pos', 'userquota_008_pos', 'userquota_009_pos',
'userquota_010_pos', 'userquota_011_pos', 'userquota_012_neg',
'userspace_001_pos', 'userspace_002_pos', 'userspace_encrypted']
tags = ['functional', 'userquota']
[tests/functional/vdev_zaps]
tests = ['vdev_zaps_001_pos', 'vdev_zaps_002_pos', 'vdev_zaps_003_pos',
'vdev_zaps_004_pos', 'vdev_zaps_005_pos', 'vdev_zaps_006_pos',
'vdev_zaps_007_pos']
tags = ['functional', 'vdev_zaps']
[tests/functional/write_dirs]
tests = ['write_dirs_001_pos', 'write_dirs_002_pos']
tags = ['functional', 'write_dirs']
[tests/functional/xattr]
tests = ['xattr_001_pos', 'xattr_002_neg', 'xattr_003_neg', 'xattr_004_pos',
'xattr_005_pos', 'xattr_006_pos', 'xattr_007_neg',
'xattr_011_pos', 'xattr_012_pos', 'xattr_013_pos']
tags = ['functional', 'xattr']
[tests/functional/zvol/zvol_ENOSPC]
tests = ['zvol_ENOSPC_001_pos']
tags = ['functional', 'zvol', 'zvol_ENOSPC']
[tests/functional/zvol/zvol_cli]
tests = ['zvol_cli_001_pos', 'zvol_cli_002_pos', 'zvol_cli_003_neg']
tags = ['functional', 'zvol', 'zvol_cli']
[tests/functional/zvol/zvol_misc]
tests = ['zvol_misc_002_pos', 'zvol_misc_hierarchy', 'zvol_misc_rename_inuse',
'zvol_misc_snapdev', 'zvol_misc_volmode', 'zvol_misc_zil']
tags = ['functional', 'zvol', 'zvol_misc']
[tests/functional/zvol/zvol_swap]
tests = ['zvol_swap_001_pos', 'zvol_swap_002_pos', 'zvol_swap_004_pos']
tags = ['functional', 'zvol', 'zvol_swap']
[tests/functional/libzfs]
tests = ['many_fds', 'libzfs_input']
tags = ['functional', 'libzfs']
[tests/functional/log_spacemap]
tests = ['log_spacemap_import_logs']
pre =
post =
tags = ['functional', 'log_spacemap']
[tests/functional/l2arc]
tests = ['l2arc_arcstats_pos', 'l2arc_mfuonly_pos', 'l2arc_l2miss_pos',
'persist_l2arc_001_pos', 'persist_l2arc_002_pos',
- 'persist_l2arc_003_neg', 'persist_l2arc_004_pos', 'persist_l2arc_005_pos',
- 'persist_l2arc_006_pos', 'persist_l2arc_007_pos', 'persist_l2arc_008_pos']
+ 'persist_l2arc_003_neg', 'persist_l2arc_004_pos', 'persist_l2arc_005_pos']
tags = ['functional', 'l2arc']
[tests/functional/zpool_influxdb]
tests = ['zpool_influxdb']
tags = ['functional', 'zpool_influxdb']
diff --git a/sys/contrib/openzfs/tests/test-runner/bin/zts-report.py.in b/sys/contrib/openzfs/tests/test-runner/bin/zts-report.py.in
index 8c3bce13491b..4661a47f55a9 100755
--- a/sys/contrib/openzfs/tests/test-runner/bin/zts-report.py.in
+++ b/sys/contrib/openzfs/tests/test-runner/bin/zts-report.py.in
@@ -1,449 +1,447 @@
#!/usr/bin/env @PYTHON_SHEBANG@
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2017 by Delphix. All rights reserved.
# Copyright (c) 2018 by Lawrence Livermore National Security, LLC.
#
# This script must remain compatible with Python 2.6+ and Python 3.4+.
#
import os
import re
import sys
#
# This script parses the stdout of zfstest, which has this format:
#
# Test: /path/to/testa (run as root) [00:00] [PASS]
# Test: /path/to/testb (run as jkennedy) [00:00] [PASS]
# Test: /path/to/testc (run as root) [00:00] [FAIL]
# [...many more results...]
#
# Results Summary
# FAIL 22
# SKIP 32
# PASS 1156
#
# Running Time: 02:50:31
# Percent passed: 95.5%
# Log directory: /var/tmp/test_results/20180615T205926
#
#
# Common generic reasons for a test or test group to be skipped.
#
# Some test cases are known to fail in ways which are not harmful or dangerous.
# In these cases simply mark the test as a known failure until it can be
# updated and the issue resolved. Note that it's preferable to open a unique
# issue on the GitHub issue tracker for each test case failure.
#
known_reason = 'Known issue'
#
# Some tests require that a test user be able to execute the zfs utilities.
# This may not be possible when testing in-tree due to the default permissions
# on the user's home directory. When testing this can be resolved by granting
# group read access.
#
# chmod 0750 $HOME
#
exec_reason = 'Test user execute permissions required for utilities'
#
# Some tests require a minimum python version of 3.5 and will be skipped when
# the default system version is too old. There may also be tests which require
# additional python modules be installed, for example python-cffi is required
# by the pyzfs tests.
#
python_reason = 'Python v3.5 or newer required'
python_deps_reason = 'Python modules missing: python-cffi'
#
# Some tests require the O_TMPFILE flag which was first introduced in the
# 3.11 kernel.
#
tmpfile_reason = 'Kernel O_TMPFILE support required'
#
# Some tests require that the NFS client and server utilities be installed.
#
share_reason = 'NFS client and server utilities required'
#
# Some tests require that the lsattr utility support the project id feature.
#
project_id_reason = 'lsattr with set/show project ID required'
#
# Some tests require that the kernel support user namespaces.
#
user_ns_reason = 'Kernel user namespace support required'
#
# Some rewind tests can fail since nothing guarantees that old MOS blocks
# are not overwritten. Snapshots protect datasets and data files but not
# the MOS. Reasonable efforts are made in the test case to increase the
# odds that some txgs will have their MOS data left untouched, but it is
# never a sure thing.
#
rewind_reason = 'Arbitrary pool rewind is not guaranteed'
#
# Some tests may by structured in a way that relies on exact knowledge
# of how much free space in available in a pool. These tests cannot be
# made completely reliable because the internal details of how free space
# is managed are not exposed to user space.
#
enospc_reason = 'Exact free space reporting is not guaranteed'
#
# Some tests require a minimum version of the fio benchmark utility.
# Older distributions such as CentOS 6.x only provide fio-2.0.13.
#
fio_reason = 'Fio v2.3 or newer required'
#
# Some tests require that the DISKS provided support the discard operation.
# Normally this is not an issue because loop back devices are used for DISKS
# and they support discard (TRIM/UNMAP).
#
trim_reason = 'DISKS must support discard (TRIM/UNMAP)'
#
# Some tests are not applicable to a platform or need to be updated to operate
# in the manor required by the platform. Any tests which are skipped for this
# reason will be suppressed in the final analysis output.
#
na_reason = "Not applicable"
#
# Some test cases doesn't have all requirements to run on Github actions CI.
#
ci_reason = 'CI runner doesn\'t have all requirements'
summary = {
'total': float(0),
'passed': float(0),
'logfile': "Could not determine logfile location."
}
#
# These tests are known to fail, thus we use this list to prevent these
# failures from failing the job as a whole; only unexpected failures
# bubble up to cause this script to exit with a non-zero exit status.
#
# Format: { 'test-name': ['expected result', 'issue-number | reason'] }
#
# For each known failure it is recommended to link to a GitHub issue by
# setting the reason to the issue number. Alternately, one of the generic
# reasons listed above can be used.
#
known = {
'casenorm/mixed_none_lookup_ci': ['FAIL', '7633'],
'casenorm/mixed_formd_lookup_ci': ['FAIL', '7633'],
'cli_root/zfs_unshare/zfs_unshare_002_pos': ['SKIP', na_reason],
'cli_root/zfs_unshare/zfs_unshare_006_pos': ['SKIP', na_reason],
'cli_user/misc/zfs_share_001_neg': ['SKIP', na_reason],
'cli_user/misc/zfs_unshare_001_neg': ['SKIP', na_reason],
'privilege/setup': ['SKIP', na_reason],
'refreserv/refreserv_004_pos': ['FAIL', known_reason],
'rootpool/setup': ['SKIP', na_reason],
'rsend/rsend_008_pos': ['SKIP', '6066'],
'vdev_zaps/vdev_zaps_007_pos': ['FAIL', known_reason],
}
if sys.platform.startswith('freebsd'):
known.update({
'cli_root/zpool_wait/zpool_wait_trim_basic': ['SKIP', trim_reason],
'cli_root/zpool_wait/zpool_wait_trim_cancel': ['SKIP', trim_reason],
'cli_root/zpool_wait/zpool_wait_trim_flag': ['SKIP', trim_reason],
'link_count/link_count_001': ['SKIP', na_reason],
})
elif sys.platform.startswith('linux'):
known.update({
'casenorm/mixed_formd_lookup': ['FAIL', '7633'],
'casenorm/mixed_formd_delete': ['FAIL', '7633'],
'casenorm/sensitive_formd_lookup': ['FAIL', '7633'],
'casenorm/sensitive_formd_delete': ['FAIL', '7633'],
'removal/removal_with_zdb': ['SKIP', known_reason],
})
#
# These tests may occasionally fail or be skipped. We want there failures
# to be reported but only unexpected failures should bubble up to cause
# this script to exit with a non-zero exit status.
#
# Format: { 'test-name': ['expected result', 'issue-number | reason'] }
#
# For each known failure it is recommended to link to a GitHub issue by
# setting the reason to the issue number. Alternately, one of the generic
# reasons listed above can be used.
#
maybe = {
'chattr/setup': ['SKIP', exec_reason],
'cli_root/zdb/zdb_006_pos': ['FAIL', known_reason],
'cli_root/zfs_destroy/zfs_destroy_dev_removal_condense':
['FAIL', known_reason],
'cli_root/zfs_get/zfs_get_004_pos': ['FAIL', known_reason],
'cli_root/zfs_get/zfs_get_009_pos': ['SKIP', '5479'],
'cli_root/zfs_rollback/zfs_rollback_001_pos': ['FAIL', known_reason],
'cli_root/zfs_rollback/zfs_rollback_002_pos': ['FAIL', known_reason],
'cli_root/zfs_share/setup': ['SKIP', share_reason],
'cli_root/zfs_snapshot/zfs_snapshot_002_neg': ['FAIL', known_reason],
'cli_root/zfs_unshare/setup': ['SKIP', share_reason],
'cli_root/zpool_add/zpool_add_004_pos': ['FAIL', known_reason],
'cli_root/zpool_destroy/zpool_destroy_001_pos': ['SKIP', '6145'],
'cli_root/zpool_import/import_rewind_device_replaced':
['FAIL', rewind_reason],
'cli_root/zpool_import/import_rewind_config_changed':
['FAIL', rewind_reason],
'cli_root/zpool_import/zpool_import_missing_003_pos': ['SKIP', '6839'],
'cli_root/zpool_initialize/zpool_initialize_import_export':
['FAIL', '11948'],
'cli_root/zpool_labelclear/zpool_labelclear_removed':
['FAIL', known_reason],
'cli_root/zpool_trim/setup': ['SKIP', trim_reason],
'cli_root/zpool_upgrade/zpool_upgrade_004_pos': ['FAIL', '6141'],
'delegate/setup': ['SKIP', exec_reason],
'history/history_004_pos': ['FAIL', '7026'],
'history/history_005_neg': ['FAIL', '6680'],
'history/history_006_neg': ['FAIL', '5657'],
'history/history_008_pos': ['FAIL', known_reason],
'history/history_010_pos': ['SKIP', exec_reason],
'io/mmap': ['SKIP', fio_reason],
- 'l2arc/persist_l2arc_005_pos': ['FAIL', known_reason],
- 'l2arc/persist_l2arc_007_pos': ['FAIL', '11887'],
'largest_pool/largest_pool_001_pos': ['FAIL', known_reason],
'mmp/mmp_on_uberblocks': ['FAIL', known_reason],
'pyzfs/pyzfs_unittest': ['SKIP', python_deps_reason],
'no_space/enospc_002_pos': ['FAIL', enospc_reason],
'pool_checkpoint/checkpoint_discard_busy': ['FAIL', '11946'],
'projectquota/setup': ['SKIP', exec_reason],
'redundancy/redundancy_004_neg': ['FAIL', '7290'],
'redundancy/redundancy_draid_spare3': ['SKIP', known_reason],
'removal/removal_condense_export': ['FAIL', known_reason],
'reservation/reservation_008_pos': ['FAIL', '7741'],
'reservation/reservation_018_pos': ['FAIL', '5642'],
'rsend/rsend_019_pos': ['FAIL', '6086'],
'rsend/rsend_020_pos': ['FAIL', '6446'],
'rsend/rsend_021_pos': ['FAIL', '6446'],
'rsend/rsend_024_pos': ['FAIL', '5665'],
'rsend/send-c_volume': ['FAIL', '6087'],
'rsend/send_partial_dataset': ['FAIL', known_reason],
'snapshot/clone_001_pos': ['FAIL', known_reason],
'snapshot/snapshot_009_pos': ['FAIL', '7961'],
'snapshot/snapshot_010_pos': ['FAIL', '7961'],
'snapused/snapused_004_pos': ['FAIL', '5513'],
'tmpfile/setup': ['SKIP', tmpfile_reason],
'threadsappend/threadsappend_001_pos': ['FAIL', '6136'],
'trim/setup': ['SKIP', trim_reason],
'upgrade/upgrade_projectquota_001_pos': ['SKIP', project_id_reason],
'user_namespace/setup': ['SKIP', user_ns_reason],
'userquota/setup': ['SKIP', exec_reason],
'vdev_zaps/vdev_zaps_004_pos': ['FAIL', '6935'],
'zvol/zvol_ENOSPC/zvol_ENOSPC_001_pos': ['FAIL', '5848'],
'pam/setup': ['SKIP', "pamtester might be not available"],
}
if sys.platform.startswith('freebsd'):
maybe.update({
'cli_root/zfs_copies/zfs_copies_002_pos': ['FAIL', known_reason],
'cli_root/zfs_inherit/zfs_inherit_001_neg': ['FAIL', known_reason],
'cli_root/zfs_receive/receive-o-x_props_override':
['FAIL', known_reason],
'cli_root/zfs_share/zfs_share_011_pos': ['FAIL', known_reason],
'cli_root/zfs_share/zfs_share_concurrent_shares':
['FAIL', known_reason],
'cli_root/zpool_import/zpool_import_012_pos': ['FAIL', known_reason],
'cli_root/zpool_import/zpool_import_features_001_pos':
['FAIL', '11854'],
'cli_root/zpool_import/zpool_import_features_002_neg':
['FAIL', '11854'],
'cli_root/zpool_import/zpool_import_features_003_pos':
['FAIL', '11854'],
'delegate/zfs_allow_003_pos': ['FAIL', known_reason],
'inheritance/inherit_001_pos': ['FAIL', '11829'],
'pool_checkpoint/checkpoint_zhack_feat': ['FAIL', '11854'],
'resilver/resilver_restart_001': ['FAIL', known_reason],
'zvol/zvol_misc/zvol_misc_volmode': ['FAIL', known_reason],
})
elif sys.platform.startswith('linux'):
maybe.update({
'alloc_class/alloc_class_009_pos': ['FAIL', known_reason],
'alloc_class/alloc_class_010_pos': ['FAIL', known_reason],
'alloc_class/alloc_class_011_neg': ['FAIL', known_reason],
'alloc_class/alloc_class_012_pos': ['FAIL', known_reason],
'alloc_class/alloc_class_013_pos': ['FAIL', '11888'],
'cli_root/zfs_rename/zfs_rename_002_pos': ['FAIL', known_reason],
'cli_root/zpool_expand/zpool_expand_001_pos': ['FAIL', known_reason],
'cli_root/zpool_expand/zpool_expand_005_pos': ['FAIL', known_reason],
'cli_root/zpool_reopen/zpool_reopen_003_pos': ['FAIL', known_reason],
'fault/auto_spare_shared': ['FAIL', '11889'],
'io/io_uring': ['SKIP', 'io_uring support required'],
'limits/filesystem_limit': ['SKIP', known_reason],
'limits/snapshot_limit': ['SKIP', known_reason],
'mmp/mmp_active_import': ['FAIL', known_reason],
'mmp/mmp_exported_import': ['FAIL', known_reason],
'mmp/mmp_inactive_import': ['FAIL', known_reason],
'refreserv/refreserv_raidz': ['FAIL', known_reason],
'rsend/rsend_007_pos': ['FAIL', known_reason],
'rsend/rsend_010_pos': ['FAIL', known_reason],
'rsend/rsend_011_pos': ['FAIL', known_reason],
'snapshot/rollback_003_pos': ['FAIL', known_reason],
})
# Not all Github actions runners have scsi_debug module, so we may skip
# some tests which use it.
if os.environ.get('CI') == 'true':
known.update({
'cli_root/zpool_expand/zpool_expand_001_pos': ['SKIP', ci_reason],
'cli_root/zpool_expand/zpool_expand_003_neg': ['SKIP', ci_reason],
'cli_root/zpool_expand/zpool_expand_005_pos': ['SKIP', ci_reason],
'cli_root/zpool_reopen/setup': ['SKIP', ci_reason],
'cli_root/zpool_reopen/zpool_reopen_001_pos': ['SKIP', ci_reason],
'cli_root/zpool_reopen/zpool_reopen_002_pos': ['SKIP', ci_reason],
'cli_root/zpool_reopen/zpool_reopen_003_pos': ['SKIP', ci_reason],
'cli_root/zpool_reopen/zpool_reopen_004_pos': ['SKIP', ci_reason],
'cli_root/zpool_reopen/zpool_reopen_005_pos': ['SKIP', ci_reason],
'cli_root/zpool_reopen/zpool_reopen_006_neg': ['SKIP', ci_reason],
'cli_root/zpool_reopen/zpool_reopen_007_pos': ['SKIP', ci_reason],
'cli_root/zpool_split/zpool_split_wholedisk': ['SKIP', ci_reason],
'fault/auto_offline_001_pos': ['SKIP', ci_reason],
'fault/auto_online_001_pos': ['SKIP', ci_reason],
'fault/auto_online_002_pos': ['SKIP', ci_reason],
'fault/auto_replace_001_pos': ['SKIP', ci_reason],
'fault/auto_spare_ashift': ['SKIP', ci_reason],
'fault/auto_spare_shared': ['SKIP', ci_reason],
'procfs/pool_state': ['SKIP', ci_reason],
})
maybe.update({
'events/events_002_pos': ['FAIL', '11546'],
})
def usage(s):
print(s)
sys.exit(1)
def process_results(pathname):
try:
f = open(pathname)
except IOError as e:
print('Error opening file: %s' % e)
sys.exit(1)
prefix = '/zfs-tests/tests/functional/'
pattern = \
r'^Test(?:\s+\(\S+\))?:' + \
r'\s*\S*%s(\S+)\s*\(run as (\S+)\)\s*\[(\S+)\]\s*\[(\S+)\]' \
% prefix
pattern_log = r'^\s*Log directory:\s*(\S*)'
d = {}
for line in f.readlines():
m = re.match(pattern, line)
if m and len(m.groups()) == 4:
summary['total'] += 1
if m.group(4) == "PASS":
summary['passed'] += 1
d[m.group(1)] = m.group(4)
continue
m = re.match(pattern_log, line)
if m:
summary['logfile'] = m.group(1)
return d
if __name__ == "__main__":
if len(sys.argv) != 2:
usage('usage: %s <pathname>' % sys.argv[0])
results = process_results(sys.argv[1])
if summary['total'] == 0:
print("\n\nNo test results were found.")
print("Log directory: %s" % summary['logfile'])
sys.exit(0)
expected = []
unexpected = []
for test in list(results.keys()):
if results[test] == "PASS":
continue
setup = test.replace(os.path.basename(test), "setup")
if results[test] == "SKIP" and test != setup:
if setup in known and known[setup][0] == "SKIP":
continue
if setup in maybe and maybe[setup][0] == "SKIP":
continue
if ((test not in known or results[test] not in known[test][0]) and
(test not in maybe or results[test] not in maybe[test][0])):
unexpected.append(test)
else:
expected.append(test)
print("\nTests with results other than PASS that are expected:")
for test in sorted(expected):
issue_url = 'https://github.com/openzfs/zfs/issues/'
# Include the reason why the result is expected, given the following:
# 1. Suppress test results which set the "Not applicable" reason.
# 2. Numerical reasons are assumed to be GitHub issue numbers.
# 3. When an entire test group is skipped only report the setup reason.
if test in known:
if known[test][1] == na_reason:
continue
elif known[test][1].isdigit():
expect = issue_url + known[test][1]
else:
expect = known[test][1]
elif test in maybe:
if maybe[test][1].isdigit():
expect = issue_url + maybe[test][1]
else:
expect = maybe[test][1]
elif setup in known and known[setup][0] == "SKIP" and setup != test:
continue
elif setup in maybe and maybe[setup][0] == "SKIP" and setup != test:
continue
else:
expect = "UNKNOWN REASON"
print(" %s %s (%s)" % (results[test], test, expect))
print("\nTests with result of PASS that are unexpected:")
for test in sorted(known.keys()):
# We probably should not be silently ignoring the case
# where "test" is not in "results".
if test not in results or results[test] != "PASS":
continue
print(" %s %s (expected %s)" % (results[test], test,
known[test][0]))
print("\nTests with results other than PASS that are unexpected:")
for test in sorted(unexpected):
expect = "PASS" if test not in known else known[test][0]
print(" %s %s (expected %s)" % (results[test], test, expect))
if len(unexpected) == 0:
sys.exit(0)
else:
sys.exit(1)
diff --git a/sys/contrib/openzfs/tests/zfs-tests/cmd/mkbusy/Makefile.am b/sys/contrib/openzfs/tests/zfs-tests/cmd/mkbusy/Makefile.am
index abae69dea8c7..8d5885e08447 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/cmd/mkbusy/Makefile.am
+++ b/sys/contrib/openzfs/tests/zfs-tests/cmd/mkbusy/Makefile.am
@@ -1,6 +1,8 @@
include $(top_srcdir)/config/Rules.am
pkgexecdir = $(datadir)/@PACKAGE@/zfs-tests/bin
pkgexec_PROGRAMS = mkbusy
mkbusy_SOURCES = mkbusy.c
+
+mkbusy_LDADD = $(abs_top_builddir)/lib/libzfs_core/libzfs_core.la
diff --git a/sys/contrib/openzfs/tests/zfs-tests/cmd/mkbusy/mkbusy.c b/sys/contrib/openzfs/tests/zfs-tests/cmd/mkbusy/mkbusy.c
index a03076ffc003..ab2856b3f7c7 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/cmd/mkbusy/mkbusy.c
+++ b/sys/contrib/openzfs/tests/zfs-tests/cmd/mkbusy/mkbusy.c
@@ -1,177 +1,153 @@
/*
* This file and its contents are supplied under the terms of the
* Common Development and Distribution License ("CDDL"), version 1.0.
* You may only use this file in accordance with the terms of version
* 1.0 of the CDDL.
*
* A full copy of the text of the CDDL should have accompanied this
* source. A copy of the CDDL is also available via the Internet at
* http://www.illumos.org/license/CDDL.
*/
/*
* Copyright (c) 2012 by Delphix. All rights reserved.
*/
/*
* Make a directory busy. If the argument is an existing file or directory,
* simply open it directly and pause. If not, verify that the parent directory
* exists, and create a new file in that directory.
*/
#include <stdio.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <dirent.h>
#include <strings.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
+#include <libzutil.h>
-static void
+
+static __attribute__((noreturn)) void
usage(char *progname)
{
(void) fprintf(stderr, "Usage: %s <dirname|filename>\n", progname);
exit(1);
}
-static void
-fail(char *err, int rval)
+static __attribute__((noreturn)) void
+fail(char *err)
{
perror(err);
- exit(rval);
+ exit(1);
}
static void
daemonize(void)
{
pid_t pid;
if ((pid = fork()) < 0) {
- fail("fork", 1);
+ fail("fork");
} else if (pid != 0) {
(void) fprintf(stdout, "%ld\n", (long)pid);
exit(0);
}
(void) setsid();
(void) close(0);
(void) close(1);
(void) close(2);
}
int
main(int argc, char *argv[])
{
- int ret, c;
+ int c;
boolean_t isdir = B_FALSE;
- boolean_t fflag = B_FALSE;
- boolean_t rflag = B_FALSE;
struct stat sbuf;
char *fpath = NULL;
char *prog = argv[0];
- while ((c = getopt(argc, argv, "fr")) != -1) {
+ while ((c = getopt(argc, argv, "")) != -1) {
switch (c) {
- /* Open the file or directory read only */
- case 'r':
- rflag = B_TRUE;
- break;
- /* Run in the foreground */
- case 'f':
- fflag = B_TRUE;
- break;
default:
usage(prog);
}
}
argc -= optind;
argv += optind;
if (argc != 1)
usage(prog);
- if ((ret = stat(argv[0], &sbuf)) != 0) {
- char *arg, *dname, *fname;
- int arglen;
- char *slash;
- int rc;
+ if (stat(argv[0], &sbuf) != 0) {
+ char *arg;
+ const char *dname, *fname;
+ size_t arglen;
+ ssize_t dnamelen;
/*
* The argument supplied doesn't exist. Copy the path, and
* remove the trailing slash if present.
*/
if ((arg = strdup(argv[0])) == NULL)
- fail("strdup", 1);
+ fail("strdup");
arglen = strlen(arg);
if (arg[arglen - 1] == '/')
arg[arglen - 1] = '\0';
- /*
- * Get the directory and file names, using the current directory
- * if the provided path doesn't specify a directory at all.
- */
- if ((slash = strrchr(arg, '/')) == NULL) {
- dname = strdup(".");
- fname = strdup(arg);
- } else {
- *slash = '\0';
- dname = strdup(arg);
- fname = strdup(slash + 1);
- }
- free(arg);
- if (dname == NULL || fname == NULL)
- fail("strdup", 1);
+ /* Get the directory and file names. */
+ fname = zfs_basename(arg);
+ dname = arg;
+ if ((dnamelen = zfs_dirnamelen(arg)) != -1)
+ arg[dnamelen] = '\0';
+ else
+ dname = ".";
/* The directory portion of the path must exist */
- if ((ret = stat(dname, &sbuf)) != 0 || !(sbuf.st_mode &
- S_IFDIR))
+ if (stat(dname, &sbuf) != 0 || !(sbuf.st_mode & S_IFDIR))
usage(prog);
- rc = asprintf(&fpath, "%s/%s", dname, fname);
- free(dname);
- free(fname);
- if (rc == -1 || fpath == NULL)
- fail("asprintf", 1);
-
- } else if ((sbuf.st_mode & S_IFMT) == S_IFREG ||
- (sbuf.st_mode & S_IFMT) == S_IFLNK ||
- (sbuf.st_mode & S_IFMT) == S_IFCHR ||
- (sbuf.st_mode & S_IFMT) == S_IFBLK) {
- fpath = strdup(argv[0]);
- } else if ((sbuf.st_mode & S_IFMT) == S_IFDIR) {
- fpath = strdup(argv[0]);
- isdir = B_TRUE;
- } else {
- usage(prog);
- }
+ if (asprintf(&fpath, "%s/%s", dname, fname) == -1)
+ fail("asprintf");
- if (fpath == NULL)
- fail("strdup", 1);
-
- if (isdir == B_FALSE) {
- int fd, flags;
- mode_t mode = S_IRUSR | S_IWUSR;
+ free(arg);
+ } else
+ switch (sbuf.st_mode & S_IFMT) {
+ case S_IFDIR:
+ isdir = B_TRUE;
+ /* FALLTHROUGH */
+ case S_IFLNK:
+ case S_IFCHR:
+ case S_IFBLK:
+ if ((fpath = strdup(argv[0])) == NULL)
+ fail("strdup");
+ break;
+ default:
+ usage(prog);
+ }
- flags = rflag == B_FALSE ? O_CREAT | O_RDWR : O_RDONLY;
+ if (!isdir) {
+ int fd;
- if ((fd = open(fpath, flags, mode)) < 0)
- fail("open", 1);
+ if ((fd = open(fpath, O_CREAT | O_RDWR, 0600)) < 0)
+ fail("open");
} else {
DIR *dp;
if ((dp = opendir(fpath)) == NULL)
- fail("opendir", 1);
+ fail("opendir");
}
free(fpath);
- if (fflag == B_FALSE)
- daemonize();
+ daemonize();
(void) pause();
- /* NOTREACHED */
return (0);
}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/cmd/mkfile/mkfile.c b/sys/contrib/openzfs/tests/zfs-tests/cmd/mkfile/mkfile.c
index 4cf3755faa6a..673cbf9e0069 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/cmd/mkfile/mkfile.c
+++ b/sys/contrib/openzfs/tests/zfs-tests/cmd/mkfile/mkfile.c
@@ -1,282 +1,281 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <stdio.h>
#include <ctype.h>
#include <unistd.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdlib.h>
#include <string.h>
#include <libintl.h>
#include <errno.h>
#include <sys/stdtypes.h>
#include <sys/sysmacros.h>
#define BLOCKSIZE 512 /* bytes */
#define KILOBYTE 1024
#define MEGABYTE (KILOBYTE * KILOBYTE)
#define GIGABYTE (KILOBYTE * MEGABYTE)
#define FILE_MODE (S_ISVTX + S_IRUSR + S_IWUSR)
-static void usage(void);
+static void usage(void) __attribute__((noreturn));
int
main(int argc, char **argv)
{
char *opts;
off_t size;
size_t len;
size_t mult = 1;
char *buf = NULL;
size_t bufsz = 0;
int errors = 0;
int i;
int verbose = 0; /* option variable */
int nobytes = 0; /* option variable */
int saverr;
if (argc == 1)
usage();
while (argv[1] && argv[1][0] == '-') {
opts = &argv[1][0];
while (*(++opts)) {
switch (*opts) {
case 'v':
verbose++;
break;
case 'n':
nobytes++;
break;
default:
usage();
}
}
argc--;
argv++;
}
if (argc < 3)
usage();
len = strlen(argv[1]);
if (len && isalpha(argv[1][len-1])) {
switch (argv[1][len-1]) {
case 'k':
case 'K':
mult = KILOBYTE;
break;
case 'b':
case 'B':
mult = BLOCKSIZE;
break;
case 'm':
case 'M':
mult = MEGABYTE;
break;
case 'g':
case 'G':
mult = GIGABYTE;
break;
default:
(void) fprintf(stderr,
gettext("unknown size %s\n"), argv[1]);
usage();
}
for (i = 0; i <= (len-2); i++) {
if (!isdigit(argv[1][i])) {
(void) fprintf(stderr,
gettext("unknown size %s\n"), argv[1]);
usage();
}
}
argv[1][len-1] = '\0';
}
size = ((off_t)atoll(argv[1]) * (off_t)mult);
argv++;
argc--;
while (argc > 1) {
int fd;
if (verbose)
(void) fprintf(stdout, gettext("%s %lld bytes\n"),
argv[1], (offset_t)size);
fd = open(argv[1], O_CREAT|O_TRUNC|O_RDWR, FILE_MODE);
if (fd < 0) {
saverr = errno;
(void) fprintf(stderr,
gettext("Could not open %s: %s\n"),
argv[1], strerror(saverr));
errors++;
argv++;
argc--;
continue;
} else if (fchown(fd, getuid(), getgid()) < 0) {
saverr = errno;
(void) fprintf(stderr, gettext(
"Could not set owner/group of %s: %s\n"),
argv[1], strerror(saverr));
(void) close(fd);
errors++;
argv++;
argc--;
continue;
} else if (lseek(fd, (off_t)size-1, SEEK_SET) < 0) {
saverr = errno;
(void) fprintf(stderr, gettext(
"Could not seek to offset %ld in %s: %s\n"),
(unsigned long)size-1, argv[1], strerror(saverr));
(void) close(fd);
errors++;
argv++;
argc--;
continue;
} else if (write(fd, "", 1) != 1) {
saverr = errno;
(void) fprintf(stderr, gettext(
"Could not set length of %s: %s\n"),
argv[1], strerror(saverr));
(void) close(fd);
errors++;
argv++;
argc--;
continue;
}
if (!nobytes) {
off_t written = 0;
struct stat64 st;
if (lseek(fd, (off_t)0, SEEK_SET) < 0) {
saverr = errno;
(void) fprintf(stderr, gettext(
"Could not seek to beginning of %s: %s\n"),
argv[1], strerror(saverr));
(void) close(fd);
errors++;
argv++;
argc--;
continue;
}
if (fstat64(fd, &st) < 0) {
saverr = errno;
(void) fprintf(stderr, gettext(
"Could not fstat64 %s: %s\n"),
argv[1], strerror(saverr));
(void) close(fd);
errors++;
argv++;
argc--;
continue;
}
if (bufsz != st.st_blksize) {
if (buf)
free(buf);
bufsz = (size_t)st.st_blksize;
buf = calloc(1, bufsz);
if (buf == NULL) {
(void) fprintf(stderr, gettext(
"Could not allocate buffer of"
" size %d\n"), (int)bufsz);
(void) close(fd);
bufsz = 0;
errors++;
argv++;
argc--;
continue;
}
}
while (written < size) {
ssize_t result;
size_t bytes = (size_t)MIN(bufsz, size-written);
if ((result = write(fd, buf, bytes)) !=
(ssize_t)bytes) {
saverr = errno;
if (result < 0)
result = 0;
written += result;
(void) fprintf(stderr, gettext(
"%s: initialized %lu of %lu bytes: %s\n"),
argv[1], (unsigned long)written,
(unsigned long)size,
strerror(saverr));
errors++;
break;
}
written += bytes;
}
/*
* A write(2) call in the above loop failed so
* close out this file and go on (error was
* already incremented when the write(2) failed).
*/
if (written < size) {
(void) close(fd);
argv++;
argc--;
continue;
}
}
if (close(fd) < 0) {
saverr = errno;
(void) fprintf(stderr, gettext(
"Error encountered when closing %s: %s\n"),
argv[1], strerror(saverr));
errors++;
argv++;
argc--;
continue;
}
/*
* Only set the modes (including the sticky bit) if we
* had no problems. It is not an error for the chmod(2)
* to fail, but do issue a warning.
*/
if (chmod(argv[1], FILE_MODE) < 0)
(void) fprintf(stderr, gettext(
"warning: couldn't set mode to %#o\n"), FILE_MODE);
argv++;
argc--;
}
return (errors);
}
static void usage()
{
(void) fprintf(stderr, gettext(
"Usage: mkfile [-nv] <size>[g|k|b|m] <name1> [<name2>] ...\n"));
exit(1);
- /* NOTREACHED */
}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/cmd/mmapwrite/mmapwrite.c b/sys/contrib/openzfs/tests/zfs-tests/cmd/mmapwrite/mmapwrite.c
index 152f5ba90ed0..1f344534d53e 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/cmd/mmapwrite/mmapwrite.c
+++ b/sys/contrib/openzfs/tests/zfs-tests/cmd/mmapwrite/mmapwrite.c
@@ -1,162 +1,157 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <unistd.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <pthread.h>
#include <errno.h>
#include <err.h>
/*
* --------------------------------------------------------------------
* Bug Issue Id: #7512
* The bug time sequence:
* 1. context #1, zfs_write assign a txg "n".
* 2. In the same process, context #2, mmap page fault (which means the mm_sem
* is hold) occurred, zfs_dirty_inode open a txg failed, and wait previous
* txg "n" completed.
* 3. context #1 call zfs_uiomove to write, however page fault is occurred in
* zfs_uiomove, which means it needs mm_sem, but mm_sem is hold by
* context #2, so it stuck and can't complete, then txg "n" will not
* complete.
*
* So context #1 and context #2 trap into the "dead lock".
* --------------------------------------------------------------------
*/
#define NORMAL_WRITE_TH_NUM 2
static void *
normal_writer(void *filename)
{
char *file_path = filename;
int fd = -1;
ssize_t write_num = 0;
int page_size = getpagesize();
fd = open(file_path, O_RDWR | O_CREAT, 0777);
if (fd == -1) {
err(1, "failed to open %s", file_path);
}
- char *buf = malloc(1);
+ char buf;
while (1) {
- write_num = write(fd, buf, 1);
+ write_num = write(fd, &buf, 1);
if (write_num == 0) {
err(1, "write failed!");
break;
}
lseek(fd, page_size, SEEK_CUR);
}
-
- if (buf) {
- free(buf);
- }
}
static void *
map_writer(void *filename)
{
int fd = -1;
int ret = 0;
char *buf = NULL;
int page_size = getpagesize();
int op_errno = 0;
char *file_path = filename;
while (1) {
ret = access(file_path, F_OK);
if (ret) {
op_errno = errno;
if (op_errno == ENOENT) {
fd = open(file_path, O_RDWR | O_CREAT, 0777);
if (fd == -1) {
err(1, "open file failed");
}
ret = ftruncate(fd, page_size);
if (ret == -1) {
err(1, "truncate file failed");
}
} else {
err(1, "access file failed!");
}
} else {
fd = open(file_path, O_RDWR, 0777);
if (fd == -1) {
err(1, "open file failed");
}
}
if ((buf = mmap(NULL, page_size, PROT_READ | PROT_WRITE,
MAP_SHARED, fd, 0)) == MAP_FAILED) {
err(1, "map file failed");
}
if (fd != -1)
close(fd);
char s[10] = {0, };
memcpy(buf, s, 10);
ret = munmap(buf, page_size);
if (ret != 0) {
err(1, "unmap file failed");
}
}
}
int
main(int argc, char **argv)
{
pthread_t map_write_tid;
pthread_t normal_write_tid[NORMAL_WRITE_TH_NUM];
int i = 0;
if (argc != 3) {
- (void) printf("usage: %s <normal write file name>"
+ (void) printf("usage: %s <normal write file name> "
"<map write file name>\n", argv[0]);
exit(1);
}
for (i = 0; i < NORMAL_WRITE_TH_NUM; i++) {
if (pthread_create(&normal_write_tid[i], NULL, normal_writer,
argv[1])) {
err(1, "pthread_create normal_writer failed.");
}
}
if (pthread_create(&map_write_tid, NULL, map_writer, argv[2])) {
err(1, "pthread_create map_writer failed.");
}
- /* NOTREACHED */
pthread_join(map_write_tid, NULL);
return (0);
}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/include/tunables.cfg b/sys/contrib/openzfs/tests/zfs-tests/include/tunables.cfg
index a1b75a48292f..56d430a39875 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/include/tunables.cfg
+++ b/sys/contrib/openzfs/tests/zfs-tests/include/tunables.cfg
@@ -1,95 +1,95 @@
# This file exports variables for each tunable used in the test suite.
#
# Different platforms use different names for most tunables. To avoid littering
# the tests with conditional logic for deciding how to set each tunable, the
# logic is instead consolidated to this one file.
#
# Any use of tunables in tests must use a name defined here. New entries
# should be added to the table as needed. Please keep the table sorted
# alphabetically for ease of maintenance.
#
# Platform-specific tunables should still use a NAME from this table for
# consistency. Enter UNSUPPORTED in the column for platforms on which the
# tunable is not implemented.
UNAME=$(uname)
# NAME FreeBSD tunable Linux tunable
cat <<%%%% |
ADMIN_SNAPSHOT UNSUPPORTED zfs_admin_snapshot
ALLOW_REDACTED_DATASET_MOUNT allow_redacted_dataset_mount zfs_allow_redacted_dataset_mount
ARC_MAX arc.max zfs_arc_max
ARC_MIN arc.min zfs_arc_min
ASYNC_BLOCK_MAX_BLOCKS async_block_max_blocks zfs_async_block_max_blocks
CHECKSUM_EVENTS_PER_SECOND checksum_events_per_second zfs_checksum_events_per_second
COMMIT_TIMEOUT_PCT commit_timeout_pct zfs_commit_timeout_pct
COMPRESSED_ARC_ENABLED compressed_arc_enabled zfs_compressed_arc_enabled
CONDENSE_INDIRECT_COMMIT_ENTRY_DELAY_MS condense.indirect_commit_entry_delay_ms zfs_condense_indirect_commit_entry_delay_ms
CONDENSE_INDIRECT_OBSOLETE_PCT condense.indirect_obsolete_pct zfs_condense_indirect_obsolete_pct
CONDENSE_MIN_MAPPING_BYTES condense.min_mapping_bytes zfs_condense_min_mapping_bytes
-DBUF_CACHE_MAX_BYTES dbuf_cache.max_bytes dbuf_cache_max_bytes
+DBUF_CACHE_SHIFT dbuf.cache_shift dbuf_cache_shift
DEADMAN_CHECKTIME_MS deadman.checktime_ms zfs_deadman_checktime_ms
DEADMAN_FAILMODE deadman.failmode zfs_deadman_failmode
DEADMAN_SYNCTIME_MS deadman.synctime_ms zfs_deadman_synctime_ms
DEADMAN_ZIOTIME_MS deadman.ziotime_ms zfs_deadman_ziotime_ms
DISABLE_IVSET_GUID_CHECK disable_ivset_guid_check zfs_disable_ivset_guid_check
INITIALIZE_CHUNK_SIZE initialize_chunk_size zfs_initialize_chunk_size
INITIALIZE_VALUE initialize_value zfs_initialize_value
KEEP_LOG_SPACEMAPS_AT_EXPORT keep_log_spacemaps_at_export zfs_keep_log_spacemaps_at_export
LUA_MAX_MEMLIMIT lua.max_memlimit zfs_lua_max_memlimit
L2ARC_MFUONLY l2arc.mfuonly l2arc_mfuonly
L2ARC_NOPREFETCH l2arc.noprefetch l2arc_noprefetch
L2ARC_REBUILD_BLOCKS_MIN_L2SIZE l2arc.rebuild_blocks_min_l2size l2arc_rebuild_blocks_min_l2size
L2ARC_REBUILD_ENABLED l2arc.rebuild_enabled l2arc_rebuild_enabled
L2ARC_TRIM_AHEAD l2arc.trim_ahead l2arc_trim_ahead
L2ARC_WRITE_BOOST l2arc.write_boost l2arc_write_boost
L2ARC_WRITE_MAX l2arc.write_max l2arc_write_max
LIVELIST_CONDENSE_NEW_ALLOC livelist.condense.new_alloc zfs_livelist_condense_new_alloc
LIVELIST_CONDENSE_SYNC_CANCEL livelist.condense.sync_cancel zfs_livelist_condense_sync_cancel
LIVELIST_CONDENSE_SYNC_PAUSE livelist.condense.sync_pause zfs_livelist_condense_sync_pause
LIVELIST_CONDENSE_ZTHR_CANCEL livelist.condense.zthr_cancel zfs_livelist_condense_zthr_cancel
LIVELIST_CONDENSE_ZTHR_PAUSE livelist.condense.zthr_pause zfs_livelist_condense_zthr_pause
LIVELIST_MAX_ENTRIES livelist.max_entries zfs_livelist_max_entries
LIVELIST_MIN_PERCENT_SHARED livelist.min_percent_shared zfs_livelist_min_percent_shared
MAX_DATASET_NESTING max_dataset_nesting zfs_max_dataset_nesting
MAX_MISSING_TVDS max_missing_tvds zfs_max_missing_tvds
METASLAB_DEBUG_LOAD metaslab.debug_load metaslab_debug_load
METASLAB_FORCE_GANGING metaslab.force_ganging metaslab_force_ganging
MULTIHOST_FAIL_INTERVALS multihost.fail_intervals zfs_multihost_fail_intervals
MULTIHOST_HISTORY multihost.history zfs_multihost_history
MULTIHOST_IMPORT_INTERVALS multihost.import_intervals zfs_multihost_import_intervals
MULTIHOST_INTERVAL multihost.interval zfs_multihost_interval
OVERRIDE_ESTIMATE_RECORDSIZE send.override_estimate_recordsize zfs_override_estimate_recordsize
PREFETCH_DISABLE prefetch.disable zfs_prefetch_disable
REBUILD_SCRUB_ENABLED rebuild_scrub_enabled zfs_rebuild_scrub_enabled
REMOVAL_SUSPEND_PROGRESS removal_suspend_progress zfs_removal_suspend_progress
REMOVE_MAX_SEGMENT remove_max_segment zfs_remove_max_segment
RESILVER_MIN_TIME_MS resilver_min_time_ms zfs_resilver_min_time_ms
SCAN_LEGACY scan_legacy zfs_scan_legacy
SCAN_SUSPEND_PROGRESS scan_suspend_progress zfs_scan_suspend_progress
SCAN_VDEV_LIMIT scan_vdev_limit zfs_scan_vdev_limit
SEND_HOLES_WITHOUT_BIRTH_TIME send_holes_without_birth_time send_holes_without_birth_time
SLOW_IO_EVENTS_PER_SECOND slow_io_events_per_second zfs_slow_io_events_per_second
SPA_ASIZE_INFLATION spa.asize_inflation spa_asize_inflation
SPA_DISCARD_MEMORY_LIMIT spa.discard_memory_limit zfs_spa_discard_memory_limit
SPA_LOAD_VERIFY_DATA spa.load_verify_data spa_load_verify_data
SPA_LOAD_VERIFY_METADATA spa.load_verify_metadata spa_load_verify_metadata
TRIM_EXTENT_BYTES_MIN trim.extent_bytes_min zfs_trim_extent_bytes_min
TRIM_METASLAB_SKIP trim.metaslab_skip zfs_trim_metaslab_skip
TRIM_TXG_BATCH trim.txg_batch zfs_trim_txg_batch
TXG_HISTORY txg.history zfs_txg_history
TXG_TIMEOUT txg.timeout zfs_txg_timeout
UNLINK_SUSPEND_PROGRESS UNSUPPORTED zfs_unlink_suspend_progress
VDEV_FILE_PHYSICAL_ASHIFT vdev.file.physical_ashift vdev_file_physical_ashift
VDEV_MIN_MS_COUNT vdev.min_ms_count zfs_vdev_min_ms_count
VDEV_VALIDATE_SKIP vdev.validate_skip vdev_validate_skip
VOL_INHIBIT_DEV UNSUPPORTED zvol_inhibit_dev
VOL_MODE vol.mode zvol_volmode
VOL_RECURSIVE vol.recursive UNSUPPORTED
ZEVENT_LEN_MAX zevent.len_max zfs_zevent_len_max
ZEVENT_RETAIN_MAX zevent.retain_max zfs_zevent_retain_max
ZIO_SLOW_IO_MS zio.slow_io_ms zio_slow_io_ms
%%%%
while read name FreeBSD Linux; do
eval "export ${name}=\$${UNAME}"
done
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/checksum/edonr_test.c b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/checksum/edonr_test.c
index a88756091e3d..d8585ea4cf7e 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/checksum/edonr_test.c
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/checksum/edonr_test.c
@@ -1,218 +1,215 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2013 Saso Kiselkov. All rights reserved.
*/
/*
* This is just to keep the compiler happy about sys/time.h not declaring
* gettimeofday due to -D_KERNEL (we can do this since we're actually
* running in userspace, but we need -D_KERNEL for the remaining Edon-R code).
*/
#ifdef _KERNEL
#undef _KERNEL
#endif
#include <sys/edonr.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
-#include <sys/note.h>
#include <sys/time.h>
#include <sys/stdtypes.h>
/*
* Test messages from:
* http://csrc.nist.gov/groups/ST/toolkit/documents/Examples/SHA_All.pdf
*/
const char *test_msg0 = "abc";
const char *test_msg1 = "abcdbcdecdefdefgefghfghighijhijkijkljklmklmn"
"lmnomnopnopq";
const char *test_msg2 = "abcdefghbcdefghicdefghijdefghijkefghijklfghi"
"jklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu";
/*
* Test digests computed by hand. There's no formal standard or spec for edonr.
*/
const uint8_t edonr_224_test_digests[][28] = {
{
/* for test_msg0 */
0x56, 0x63, 0xc4, 0x93, 0x95, 0x20, 0xfa, 0xf6,
0x12, 0x31, 0x65, 0xa4, 0x66, 0xf2, 0x56, 0x01,
0x95, 0x2e, 0xa9, 0xe4, 0x24, 0xdd, 0xc9, 0x6b,
0xef, 0xd0, 0x40, 0x94
},
{
/* for test_msg1 */
0xd0, 0x13, 0xe4, 0x87, 0x4d, 0x06, 0x8d, 0xca,
0x4e, 0x14, 0xb9, 0x37, 0x2f, 0xce, 0x12, 0x20,
0x60, 0xf8, 0x5c, 0x0a, 0xfd, 0x7a, 0x7d, 0x97,
0x88, 0x2b, 0x05, 0x75
}
/* no test vector for test_msg2 */
};
const uint8_t edonr_256_test_digests[][32] = {
{
/* for test_msg0 */
0x54, 0xd7, 0x8b, 0x13, 0xc7, 0x4e, 0xda, 0x5a,
0xed, 0xc2, 0x71, 0xcc, 0x88, 0x1f, 0xb2, 0x2f,
0x83, 0x99, 0xaf, 0xd3, 0x04, 0x0b, 0x6a, 0x39,
0x2d, 0x73, 0x94, 0x05, 0x50, 0x8d, 0xd8, 0x51
},
{
/* for test_msg1 */
0x49, 0x2d, 0x0b, 0x19, 0xab, 0x1e, 0xde, 0x3a,
0xea, 0x9b, 0xf2, 0x39, 0x3a, 0xb1, 0x21, 0xde,
0x21, 0xf6, 0x80, 0x1f, 0xad, 0xbe, 0x8b, 0x07,
0xc7, 0xfb, 0xe6, 0x99, 0x0e, 0x4d, 0x73, 0x63
}
/* no test vectorfor test_msg2 */
};
const uint8_t edonr_384_test_digests[][48] = {
{
/* for test_msg0 */
0x0e, 0x7c, 0xd7, 0x85, 0x78, 0x77, 0xe0, 0x89,
0x5b, 0x1c, 0xdf, 0x49, 0xf4, 0x1d, 0x20, 0x9c,
0x72, 0x7d, 0x2e, 0x57, 0x9b, 0x9b, 0x9a, 0xdc,
0x60, 0x27, 0x97, 0x82, 0xb9, 0x90, 0x72, 0xec,
0x7e, 0xce, 0xd3, 0x16, 0x5f, 0x47, 0x75, 0x48,
0xfa, 0x60, 0x72, 0x7e, 0x01, 0xc7, 0x7c, 0xc6
},
{
/* no test vector for test_msg1 */
0
},
{
/* for test_msg2 */
0xe2, 0x34, 0xa1, 0x02, 0x83, 0x76, 0xae, 0xe6,
0x82, 0xd9, 0x38, 0x32, 0x0e, 0x00, 0x78, 0xd2,
0x34, 0xdb, 0xb9, 0xbd, 0xf0, 0x08, 0xa8, 0x0f,
0x63, 0x1c, 0x3d, 0x4a, 0xfd, 0x0a, 0xe9, 0x59,
0xdc, 0xd4, 0xce, 0xcd, 0x8d, 0x67, 0x6c, 0xea,
0xbb, 0x1a, 0x32, 0xed, 0x5c, 0x6b, 0xf1, 0x7f
}
};
const uint8_t edonr_512_test_digests[][64] = {
{
/* for test_msg0 */
0x1b, 0x14, 0xdb, 0x15, 0x5f, 0x1d, 0x40, 0x65,
0x94, 0xb8, 0xce, 0xf7, 0x0a, 0x43, 0x62, 0xec,
0x6b, 0x5d, 0xe6, 0xa5, 0xda, 0xf5, 0x0e, 0xc9,
0x99, 0xe9, 0x87, 0xc1, 0x9d, 0x30, 0x49, 0xe2,
0xde, 0x59, 0x77, 0xbb, 0x05, 0xb1, 0xbb, 0x22,
0x00, 0x50, 0xa1, 0xea, 0x5b, 0x46, 0xa9, 0xf1,
0x74, 0x0a, 0xca, 0xfb, 0xf6, 0xb4, 0x50, 0x32,
0xad, 0xc9, 0x0c, 0x62, 0x83, 0x72, 0xc2, 0x2b
},
{
/* no test vector for test_msg1 */
0
},
{
/* for test_msg2 */
0x53, 0x51, 0x07, 0x0d, 0xc5, 0x1c, 0x3b, 0x2b,
0xac, 0xa5, 0xa6, 0x0d, 0x02, 0x52, 0xcc, 0xb4,
0xe4, 0x92, 0x1a, 0x96, 0xfe, 0x5a, 0x69, 0xe7,
0x6d, 0xad, 0x48, 0xfd, 0x21, 0xa0, 0x84, 0x5a,
0xd5, 0x7f, 0x88, 0x0b, 0x3e, 0x4a, 0x90, 0x7b,
0xc5, 0x03, 0x15, 0x18, 0x42, 0xbb, 0x94, 0x9e,
0x1c, 0xba, 0x74, 0x39, 0xa6, 0x40, 0x9a, 0x34,
0xb8, 0x43, 0x6c, 0xb4, 0x69, 0x21, 0x58, 0x3c
}
};
int
main(int argc, char *argv[])
{
boolean_t failed = B_FALSE;
uint64_t cpu_mhz = 0;
if (argc == 2)
cpu_mhz = atoi(argv[1]);
#define EDONR_ALGO_TEST(_m, mode, testdigest) \
do { \
EdonRState ctx; \
uint8_t digest[mode / 8]; \
EdonRInit(&ctx, mode); \
EdonRUpdate(&ctx, (const uint8_t *) _m, strlen(_m) * 8);\
EdonRFinal(&ctx, digest); \
(void) printf("Edon-R-%-6sMessage: " #_m \
"\tResult: ", #mode); \
if (bcmp(digest, testdigest, mode / 8) == 0) { \
(void) printf("OK\n"); \
} else { \
(void) printf("FAILED!\n"); \
failed = B_TRUE; \
} \
- NOTE(CONSTCOND) \
} while (0)
#define EDONR_PERF_TEST(mode) \
do { \
EdonRState ctx; \
uint8_t digest[mode / 8]; \
uint8_t block[131072]; \
uint64_t delta; \
double cpb = 0; \
int i; \
struct timeval start, end; \
bzero(block, sizeof (block)); \
(void) gettimeofday(&start, NULL); \
EdonRInit(&ctx, mode); \
for (i = 0; i < 8192; i++) \
EdonRUpdate(&ctx, block, sizeof (block) * 8); \
EdonRFinal(&ctx, digest); \
(void) gettimeofday(&end, NULL); \
delta = (end.tv_sec * 1000000llu + end.tv_usec) - \
(start.tv_sec * 1000000llu + start.tv_usec); \
if (cpu_mhz != 0) { \
cpb = (cpu_mhz * 1e6 * ((double)delta / \
1000000)) / (8192 * 128 * 1024); \
} \
(void) printf("Edon-R-%-6s%llu us (%.02f CPB)\n", #mode,\
(u_longlong_t)delta, cpb); \
- NOTE(CONSTCOND) \
} while (0)
(void) printf("Running algorithm correctness tests:\n");
EDONR_ALGO_TEST(test_msg0, 224, edonr_224_test_digests[0]);
EDONR_ALGO_TEST(test_msg1, 224, edonr_224_test_digests[1]);
EDONR_ALGO_TEST(test_msg0, 256, edonr_256_test_digests[0]);
EDONR_ALGO_TEST(test_msg1, 256, edonr_256_test_digests[1]);
EDONR_ALGO_TEST(test_msg0, 384, edonr_384_test_digests[0]);
EDONR_ALGO_TEST(test_msg2, 384, edonr_384_test_digests[2]);
EDONR_ALGO_TEST(test_msg0, 512, edonr_512_test_digests[0]);
EDONR_ALGO_TEST(test_msg2, 512, edonr_512_test_digests[2]);
if (failed)
return (1);
(void) printf("Running performance tests (hashing 1024 MiB of "
"data):\n");
EDONR_PERF_TEST(256);
EDONR_PERF_TEST(512);
return (0);
}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/checksum/sha2_test.c b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/checksum/sha2_test.c
index 5800002a6ef7..c7561b54f29e 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/checksum/sha2_test.c
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/checksum/sha2_test.c
@@ -1,250 +1,247 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2013 Saso Kiselkov. All rights reserved.
*/
/*
* This is just to keep the compiler happy about sys/time.h not declaring
* gettimeofday due to -D_KERNEL (we can do this since we're actually
* running in userspace, but we need -D_KERNEL for the remaining SHA2 code).
*/
#ifdef _KERNEL
#undef _KERNEL
#endif
#include <stdarg.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <sys/time.h>
#define _SHA2_IMPL
#include <sys/sha2.h>
#include <sys/stdtypes.h>
-#define NOTE(x)
/*
* Test messages from:
* http://csrc.nist.gov/groups/ST/toolkit/documents/Examples/SHA_All.pdf
*/
const char *test_msg0 = "abc";
const char *test_msg1 = "abcdbcdecdefdefgefghfghighijhijkijkljklmklmn"
"lmnomnopnopq";
const char *test_msg2 = "abcdefghbcdefghicdefghijdefghijkefghijklfghi"
"jklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu";
/*
* Test digests from:
* http://csrc.nist.gov/groups/ST/toolkit/documents/Examples/SHA_All.pdf
*/
const uint8_t sha256_test_digests[][32] = {
{
/* for test_msg0 */
0xBA, 0x78, 0x16, 0xBF, 0x8F, 0x01, 0xCF, 0xEA,
0x41, 0x41, 0x40, 0xDE, 0x5D, 0xAE, 0x22, 0x23,
0xB0, 0x03, 0x61, 0xA3, 0x96, 0x17, 0x7A, 0x9C,
0xB4, 0x10, 0xFF, 0x61, 0xF2, 0x00, 0x15, 0xAD
},
{
/* for test_msg1 */
0x24, 0x8D, 0x6A, 0x61, 0xD2, 0x06, 0x38, 0xB8,
0xE5, 0xC0, 0x26, 0x93, 0x0C, 0x3E, 0x60, 0x39,
0xA3, 0x3C, 0xE4, 0x59, 0x64, 0xFF, 0x21, 0x67,
0xF6, 0xEC, 0xED, 0xD4, 0x19, 0xDB, 0x06, 0xC1
}
/* no test vector for test_msg2 */
};
const uint8_t sha384_test_digests[][48] = {
{
/* for test_msg0 */
0xCB, 0x00, 0x75, 0x3F, 0x45, 0xA3, 0x5E, 0x8B,
0xB5, 0xA0, 0x3D, 0x69, 0x9A, 0xC6, 0x50, 0x07,
0x27, 0x2C, 0x32, 0xAB, 0x0E, 0xDE, 0xD1, 0x63,
0x1A, 0x8B, 0x60, 0x5A, 0x43, 0xFF, 0x5B, 0xED,
0x80, 0x86, 0x07, 0x2B, 0xA1, 0xE7, 0xCC, 0x23,
0x58, 0xBA, 0xEC, 0xA1, 0x34, 0xC8, 0x25, 0xA7
},
{
/* no test vector for test_msg1 */
0
},
{
/* for test_msg2 */
0x09, 0x33, 0x0C, 0x33, 0xF7, 0x11, 0x47, 0xE8,
0x3D, 0x19, 0x2F, 0xC7, 0x82, 0xCD, 0x1B, 0x47,
0x53, 0x11, 0x1B, 0x17, 0x3B, 0x3B, 0x05, 0xD2,
0x2F, 0xA0, 0x80, 0x86, 0xE3, 0xB0, 0xF7, 0x12,
0xFC, 0xC7, 0xC7, 0x1A, 0x55, 0x7E, 0x2D, 0xB9,
0x66, 0xC3, 0xE9, 0xFA, 0x91, 0x74, 0x60, 0x39
}
};
const uint8_t sha512_test_digests[][64] = {
{
/* for test_msg0 */
0xDD, 0xAF, 0x35, 0xA1, 0x93, 0x61, 0x7A, 0xBA,
0xCC, 0x41, 0x73, 0x49, 0xAE, 0x20, 0x41, 0x31,
0x12, 0xE6, 0xFA, 0x4E, 0x89, 0xA9, 0x7E, 0xA2,
0x0A, 0x9E, 0xEE, 0xE6, 0x4B, 0x55, 0xD3, 0x9A,
0x21, 0x92, 0x99, 0x2A, 0x27, 0x4F, 0xC1, 0xA8,
0x36, 0xBA, 0x3C, 0x23, 0xA3, 0xFE, 0xEB, 0xBD,
0x45, 0x4D, 0x44, 0x23, 0x64, 0x3C, 0xE8, 0x0E,
0x2A, 0x9A, 0xC9, 0x4F, 0xA5, 0x4C, 0xA4, 0x9F
},
{
/* no test vector for test_msg1 */
0
},
{
/* for test_msg2 */
0x8E, 0x95, 0x9B, 0x75, 0xDA, 0xE3, 0x13, 0xDA,
0x8C, 0xF4, 0xF7, 0x28, 0x14, 0xFC, 0x14, 0x3F,
0x8F, 0x77, 0x79, 0xC6, 0xEB, 0x9F, 0x7F, 0xA1,
0x72, 0x99, 0xAE, 0xAD, 0xB6, 0x88, 0x90, 0x18,
0x50, 0x1D, 0x28, 0x9E, 0x49, 0x00, 0xF7, 0xE4,
0x33, 0x1B, 0x99, 0xDE, 0xC4, 0xB5, 0x43, 0x3A,
0xC7, 0xD3, 0x29, 0xEE, 0xB6, 0xDD, 0x26, 0x54,
0x5E, 0x96, 0xE5, 0x5B, 0x87, 0x4B, 0xE9, 0x09
}
};
const uint8_t sha512_224_test_digests[][28] = {
{
/* for test_msg0 */
0x46, 0x34, 0x27, 0x0F, 0x70, 0x7B, 0x6A, 0x54,
0xDA, 0xAE, 0x75, 0x30, 0x46, 0x08, 0x42, 0xE2,
0x0E, 0x37, 0xED, 0x26, 0x5C, 0xEE, 0xE9, 0xA4,
0x3E, 0x89, 0x24, 0xAA
},
{
/* no test vector for test_msg1 */
0
},
{
/* for test_msg2 */
0x23, 0xFE, 0xC5, 0xBB, 0x94, 0xD6, 0x0B, 0x23,
0x30, 0x81, 0x92, 0x64, 0x0B, 0x0C, 0x45, 0x33,
0x35, 0xD6, 0x64, 0x73, 0x4F, 0xE4, 0x0E, 0x72,
0x68, 0x67, 0x4A, 0xF9
}
};
const uint8_t sha512_256_test_digests[][32] = {
{
/* for test_msg0 */
0x53, 0x04, 0x8E, 0x26, 0x81, 0x94, 0x1E, 0xF9,
0x9B, 0x2E, 0x29, 0xB7, 0x6B, 0x4C, 0x7D, 0xAB,
0xE4, 0xC2, 0xD0, 0xC6, 0x34, 0xFC, 0x6D, 0x46,
0xE0, 0xE2, 0xF1, 0x31, 0x07, 0xE7, 0xAF, 0x23
},
{
/* no test vector for test_msg1 */
0
},
{
/* for test_msg2 */
0x39, 0x28, 0xE1, 0x84, 0xFB, 0x86, 0x90, 0xF8,
0x40, 0xDA, 0x39, 0x88, 0x12, 0x1D, 0x31, 0xBE,
0x65, 0xCB, 0x9D, 0x3E, 0xF8, 0x3E, 0xE6, 0x14,
0x6F, 0xEA, 0xC8, 0x61, 0xE1, 0x9B, 0x56, 0x3A
}
};
int
main(int argc, char *argv[])
{
boolean_t failed = B_FALSE;
uint64_t cpu_mhz = 0;
if (argc == 2)
cpu_mhz = atoi(argv[1]);
#define SHA2_ALGO_TEST(_m, mode, diglen, testdigest) \
do { \
SHA2_CTX ctx; \
uint8_t digest[diglen / 8]; \
SHA2Init(SHA ## mode ## _MECH_INFO_TYPE, &ctx); \
SHA2Update(&ctx, _m, strlen(_m)); \
SHA2Final(digest, &ctx); \
(void) printf("SHA%-9sMessage: " #_m \
"\tResult: ", #mode); \
if (bcmp(digest, testdigest, diglen / 8) == 0) { \
(void) printf("OK\n"); \
} else { \
(void) printf("FAILED!\n"); \
failed = B_TRUE; \
} \
- NOTE(CONSTCOND) \
} while (0)
#define SHA2_PERF_TEST(mode, diglen) \
do { \
SHA2_CTX ctx; \
uint8_t digest[diglen / 8]; \
uint8_t block[131072]; \
uint64_t delta; \
double cpb = 0; \
int i; \
struct timeval start, end; \
bzero(block, sizeof (block)); \
(void) gettimeofday(&start, NULL); \
SHA2Init(SHA ## mode ## _MECH_INFO_TYPE, &ctx); \
for (i = 0; i < 8192; i++) \
SHA2Update(&ctx, block, sizeof (block)); \
SHA2Final(digest, &ctx); \
(void) gettimeofday(&end, NULL); \
delta = (end.tv_sec * 1000000llu + end.tv_usec) - \
(start.tv_sec * 1000000llu + start.tv_usec); \
if (cpu_mhz != 0) { \
cpb = (cpu_mhz * 1e6 * ((double)delta / \
1000000)) / (8192 * 128 * 1024); \
} \
(void) printf("SHA%-9s%llu us (%.02f CPB)\n", #mode, \
(u_longlong_t)delta, cpb); \
- NOTE(CONSTCOND) \
} while (0)
(void) printf("Running algorithm correctness tests:\n");
SHA2_ALGO_TEST(test_msg0, 256, 256, sha256_test_digests[0]);
SHA2_ALGO_TEST(test_msg1, 256, 256, sha256_test_digests[1]);
SHA2_ALGO_TEST(test_msg0, 384, 384, sha384_test_digests[0]);
SHA2_ALGO_TEST(test_msg2, 384, 384, sha384_test_digests[2]);
SHA2_ALGO_TEST(test_msg0, 512, 512, sha512_test_digests[0]);
SHA2_ALGO_TEST(test_msg2, 512, 512, sha512_test_digests[2]);
SHA2_ALGO_TEST(test_msg0, 512_224, 224, sha512_224_test_digests[0]);
SHA2_ALGO_TEST(test_msg2, 512_224, 224, sha512_224_test_digests[2]);
SHA2_ALGO_TEST(test_msg0, 512_256, 256, sha512_256_test_digests[0]);
SHA2_ALGO_TEST(test_msg2, 512_256, 256, sha512_256_test_digests[2]);
if (failed)
return (1);
(void) printf("Running performance tests (hashing 1024 MiB of "
"data):\n");
SHA2_PERF_TEST(256, 256);
SHA2_PERF_TEST(512, 512);
return (0);
}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/checksum/skein_test.c b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/checksum/skein_test.c
index 55df9075c66b..484fad844b73 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/checksum/skein_test.c
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/checksum/skein_test.c
@@ -1,340 +1,337 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2013 Saso Kiselkov. All rights reserved.
*/
/*
* This is just to keep the compiler happy about sys/time.h not declaring
* gettimeofday due to -D_KERNEL (we can do this since we're actually
* running in userspace, but we need -D_KERNEL for the remaining Skein code).
*/
#ifdef _KERNEL
#undef _KERNEL
#endif
#include <sys/skein.h>
#include <stdlib.h>
#include <strings.h>
#include <stdio.h>
#include <sys/time.h>
#include <sys/stdtypes.h>
-#define NOTE(x)
/*
* Skein test suite using values from the Skein V1.3 specification found at:
* http://www.skein-hash.info/sites/default/files/skein1.3.pdf
*/
/*
* Test messages from the Skein spec, Appendix C.
*/
const uint8_t test_msg0[] = {
0xFF
};
const uint8_t test_msg1[] = {
0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8,
0xF7, 0xF6, 0xF5, 0xF4, 0xF3, 0xF2, 0xF1, 0xF0,
0xEF, 0xEE, 0xED, 0xEC, 0xEB, 0xEA, 0xE9, 0xE8,
0xE7, 0xE6, 0xE5, 0xE4, 0xE3, 0xE2, 0xE1, 0xE0
};
const uint8_t test_msg2[] = {
0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8,
0xF7, 0xF6, 0xF5, 0xF4, 0xF3, 0xF2, 0xF1, 0xF0,
0xEF, 0xEE, 0xED, 0xEC, 0xEB, 0xEA, 0xE9, 0xE8,
0xE7, 0xE6, 0xE5, 0xE4, 0xE3, 0xE2, 0xE1, 0xE0,
0xDF, 0xDE, 0xDD, 0xDC, 0xDB, 0xDA, 0xD9, 0xD8,
0xD7, 0xD6, 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xD0,
0xCF, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA, 0xC9, 0xC8,
0xC7, 0xC6, 0xC5, 0xC4, 0xC3, 0xC2, 0xC1, 0xC0
};
const uint8_t test_msg3[] = {
0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8,
0xF7, 0xF6, 0xF5, 0xF4, 0xF3, 0xF2, 0xF1, 0xF0,
0xEF, 0xEE, 0xED, 0xEC, 0xEB, 0xEA, 0xE9, 0xE8,
0xE7, 0xE6, 0xE5, 0xE4, 0xE3, 0xE2, 0xE1, 0xE0,
0xDF, 0xDE, 0xDD, 0xDC, 0xDB, 0xDA, 0xD9, 0xD8,
0xD7, 0xD6, 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xD0,
0xCF, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA, 0xC9, 0xC8,
0xC7, 0xC6, 0xC5, 0xC4, 0xC3, 0xC2, 0xC1, 0xC0,
0xBF, 0xBE, 0xBD, 0xBC, 0xBB, 0xBA, 0xB9, 0xB8,
0xB7, 0xB6, 0xB5, 0xB4, 0xB3, 0xB2, 0xB1, 0xB0,
0xAF, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, 0xA8,
0xA7, 0xA6, 0xA5, 0xA4, 0xA3, 0xA2, 0xA1, 0xA0,
0x9F, 0x9E, 0x9D, 0x9C, 0x9B, 0x9A, 0x99, 0x98,
0x97, 0x96, 0x95, 0x94, 0x93, 0x92, 0x91, 0x90,
0x8F, 0x8E, 0x8D, 0x8C, 0x8B, 0x8A, 0x89, 0x88,
0x87, 0x86, 0x85, 0x84, 0x83, 0x82, 0x81, 0x80
};
const uint8_t test_msg4[] = {
0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8,
0xF7, 0xF6, 0xF5, 0xF4, 0xF3, 0xF2, 0xF1, 0xF0,
0xEF, 0xEE, 0xED, 0xEC, 0xEB, 0xEA, 0xE9, 0xE8,
0xE7, 0xE6, 0xE5, 0xE4, 0xE3, 0xE2, 0xE1, 0xE0,
0xDF, 0xDE, 0xDD, 0xDC, 0xDB, 0xDA, 0xD9, 0xD8,
0xD7, 0xD6, 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xD0,
0xCF, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA, 0xC9, 0xC8,
0xC7, 0xC6, 0xC5, 0xC4, 0xC3, 0xC2, 0xC1, 0xC0,
0xBF, 0xBE, 0xBD, 0xBC, 0xBB, 0xBA, 0xB9, 0xB8,
0xB7, 0xB6, 0xB5, 0xB4, 0xB3, 0xB2, 0xB1, 0xB0,
0xAF, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, 0xA8,
0xA7, 0xA6, 0xA5, 0xA4, 0xA3, 0xA2, 0xA1, 0xA0,
0x9F, 0x9E, 0x9D, 0x9C, 0x9B, 0x9A, 0x99, 0x98,
0x97, 0x96, 0x95, 0x94, 0x93, 0x92, 0x91, 0x90,
0x8F, 0x8E, 0x8D, 0x8C, 0x8B, 0x8A, 0x89, 0x88,
0x87, 0x86, 0x85, 0x84, 0x83, 0x82, 0x81, 0x80,
0x7F, 0x7E, 0x7D, 0x7C, 0x7B, 0x7A, 0x79, 0x78,
0x77, 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x70,
0x6F, 0x6E, 0x6D, 0x6C, 0x6B, 0x6A, 0x69, 0x68,
0x67, 0x66, 0x65, 0x64, 0x63, 0x62, 0x61, 0x60,
0x5F, 0x5E, 0x5D, 0x5C, 0x5B, 0x5A, 0x59, 0x58,
0x57, 0x56, 0x55, 0x54, 0x53, 0x52, 0x51, 0x50,
0x4F, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A, 0x49, 0x48,
0x47, 0x46, 0x45, 0x44, 0x43, 0x42, 0x41, 0x40,
0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38,
0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x30,
0x2F, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x28,
0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20,
0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18,
0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08,
0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00
};
/*
* Test digests from the Skein spec, Appendix C.
*/
const uint8_t skein_256_test_digests[][32] = {
{
/* for test_msg0 */
0x0B, 0x98, 0xDC, 0xD1, 0x98, 0xEA, 0x0E, 0x50,
0xA7, 0xA2, 0x44, 0xC4, 0x44, 0xE2, 0x5C, 0x23,
0xDA, 0x30, 0xC1, 0x0F, 0xC9, 0xA1, 0xF2, 0x70,
0xA6, 0x63, 0x7F, 0x1F, 0x34, 0xE6, 0x7E, 0xD2
},
{
/* for test_msg1 */
0x8D, 0x0F, 0xA4, 0xEF, 0x77, 0x7F, 0xD7, 0x59,
0xDF, 0xD4, 0x04, 0x4E, 0x6F, 0x6A, 0x5A, 0xC3,
0xC7, 0x74, 0xAE, 0xC9, 0x43, 0xDC, 0xFC, 0x07,
0x92, 0x7B, 0x72, 0x3B, 0x5D, 0xBF, 0x40, 0x8B
},
{
/* for test_msg2 */
0xDF, 0x28, 0xE9, 0x16, 0x63, 0x0D, 0x0B, 0x44,
0xC4, 0xA8, 0x49, 0xDC, 0x9A, 0x02, 0xF0, 0x7A,
0x07, 0xCB, 0x30, 0xF7, 0x32, 0x31, 0x82, 0x56,
0xB1, 0x5D, 0x86, 0x5A, 0xC4, 0xAE, 0x16, 0x2F
}
/* no test digests for test_msg3 and test_msg4 */
};
const uint8_t skein_512_test_digests[][64] = {
{
/* for test_msg0 */
0x71, 0xB7, 0xBC, 0xE6, 0xFE, 0x64, 0x52, 0x22,
0x7B, 0x9C, 0xED, 0x60, 0x14, 0x24, 0x9E, 0x5B,
0xF9, 0xA9, 0x75, 0x4C, 0x3A, 0xD6, 0x18, 0xCC,
0xC4, 0xE0, 0xAA, 0xE1, 0x6B, 0x31, 0x6C, 0xC8,
0xCA, 0x69, 0x8D, 0x86, 0x43, 0x07, 0xED, 0x3E,
0x80, 0xB6, 0xEF, 0x15, 0x70, 0x81, 0x2A, 0xC5,
0x27, 0x2D, 0xC4, 0x09, 0xB5, 0xA0, 0x12, 0xDF,
0x2A, 0x57, 0x91, 0x02, 0xF3, 0x40, 0x61, 0x7A
},
{
/* no test vector for test_msg1 */
0,
},
{
/* for test_msg2 */
0x45, 0x86, 0x3B, 0xA3, 0xBE, 0x0C, 0x4D, 0xFC,
0x27, 0xE7, 0x5D, 0x35, 0x84, 0x96, 0xF4, 0xAC,
0x9A, 0x73, 0x6A, 0x50, 0x5D, 0x93, 0x13, 0xB4,
0x2B, 0x2F, 0x5E, 0xAD, 0xA7, 0x9F, 0xC1, 0x7F,
0x63, 0x86, 0x1E, 0x94, 0x7A, 0xFB, 0x1D, 0x05,
0x6A, 0xA1, 0x99, 0x57, 0x5A, 0xD3, 0xF8, 0xC9,
0xA3, 0xCC, 0x17, 0x80, 0xB5, 0xE5, 0xFA, 0x4C,
0xAE, 0x05, 0x0E, 0x98, 0x98, 0x76, 0x62, 0x5B
},
{
/* for test_msg3 */
0x91, 0xCC, 0xA5, 0x10, 0xC2, 0x63, 0xC4, 0xDD,
0xD0, 0x10, 0x53, 0x0A, 0x33, 0x07, 0x33, 0x09,
0x62, 0x86, 0x31, 0xF3, 0x08, 0x74, 0x7E, 0x1B,
0xCB, 0xAA, 0x90, 0xE4, 0x51, 0xCA, 0xB9, 0x2E,
0x51, 0x88, 0x08, 0x7A, 0xF4, 0x18, 0x87, 0x73,
0xA3, 0x32, 0x30, 0x3E, 0x66, 0x67, 0xA7, 0xA2,
0x10, 0x85, 0x6F, 0x74, 0x21, 0x39, 0x00, 0x00,
0x71, 0xF4, 0x8E, 0x8B, 0xA2, 0xA5, 0xAD, 0xB7
}
/* no test digests for test_msg4 */
};
const uint8_t skein_1024_test_digests[][128] = {
{
/* for test_msg0 */
0xE6, 0x2C, 0x05, 0x80, 0x2E, 0xA0, 0x15, 0x24,
0x07, 0xCD, 0xD8, 0x78, 0x7F, 0xDA, 0x9E, 0x35,
0x70, 0x3D, 0xE8, 0x62, 0xA4, 0xFB, 0xC1, 0x19,
0xCF, 0xF8, 0x59, 0x0A, 0xFE, 0x79, 0x25, 0x0B,
0xCC, 0xC8, 0xB3, 0xFA, 0xF1, 0xBD, 0x24, 0x22,
0xAB, 0x5C, 0x0D, 0x26, 0x3F, 0xB2, 0xF8, 0xAF,
0xB3, 0xF7, 0x96, 0xF0, 0x48, 0x00, 0x03, 0x81,
0x53, 0x1B, 0x6F, 0x00, 0xD8, 0x51, 0x61, 0xBC,
0x0F, 0xFF, 0x4B, 0xEF, 0x24, 0x86, 0xB1, 0xEB,
0xCD, 0x37, 0x73, 0xFA, 0xBF, 0x50, 0xAD, 0x4A,
0xD5, 0x63, 0x9A, 0xF9, 0x04, 0x0E, 0x3F, 0x29,
0xC6, 0xC9, 0x31, 0x30, 0x1B, 0xF7, 0x98, 0x32,
0xE9, 0xDA, 0x09, 0x85, 0x7E, 0x83, 0x1E, 0x82,
0xEF, 0x8B, 0x46, 0x91, 0xC2, 0x35, 0x65, 0x65,
0x15, 0xD4, 0x37, 0xD2, 0xBD, 0xA3, 0x3B, 0xCE,
0xC0, 0x01, 0xC6, 0x7F, 0xFD, 0xE1, 0x5B, 0xA8
},
{
/* no test vector for test_msg1 */
0
},
{
/* no test vector for test_msg2 */
0
},
{
/* for test_msg3 */
0x1F, 0x3E, 0x02, 0xC4, 0x6F, 0xB8, 0x0A, 0x3F,
0xCD, 0x2D, 0xFB, 0xBC, 0x7C, 0x17, 0x38, 0x00,
0xB4, 0x0C, 0x60, 0xC2, 0x35, 0x4A, 0xF5, 0x51,
0x18, 0x9E, 0xBF, 0x43, 0x3C, 0x3D, 0x85, 0xF9,
0xFF, 0x18, 0x03, 0xE6, 0xD9, 0x20, 0x49, 0x31,
0x79, 0xED, 0x7A, 0xE7, 0xFC, 0xE6, 0x9C, 0x35,
0x81, 0xA5, 0xA2, 0xF8, 0x2D, 0x3E, 0x0C, 0x7A,
0x29, 0x55, 0x74, 0xD0, 0xCD, 0x7D, 0x21, 0x7C,
0x48, 0x4D, 0x2F, 0x63, 0x13, 0xD5, 0x9A, 0x77,
0x18, 0xEA, 0xD0, 0x7D, 0x07, 0x29, 0xC2, 0x48,
0x51, 0xD7, 0xE7, 0xD2, 0x49, 0x1B, 0x90, 0x2D,
0x48, 0x91, 0x94, 0xE6, 0xB7, 0xD3, 0x69, 0xDB,
0x0A, 0xB7, 0xAA, 0x10, 0x6F, 0x0E, 0xE0, 0xA3,
0x9A, 0x42, 0xEF, 0xC5, 0x4F, 0x18, 0xD9, 0x37,
0x76, 0x08, 0x09, 0x85, 0xF9, 0x07, 0x57, 0x4F,
0x99, 0x5E, 0xC6, 0xA3, 0x71, 0x53, 0xA5, 0x78
},
{
/* for test_msg4 */
0x84, 0x2A, 0x53, 0xC9, 0x9C, 0x12, 0xB0, 0xCF,
0x80, 0xCF, 0x69, 0x49, 0x1B, 0xE5, 0xE2, 0xF7,
0x51, 0x5D, 0xE8, 0x73, 0x3B, 0x6E, 0xA9, 0x42,
0x2D, 0xFD, 0x67, 0x66, 0x65, 0xB5, 0xFA, 0x42,
0xFF, 0xB3, 0xA9, 0xC4, 0x8C, 0x21, 0x77, 0x77,
0x95, 0x08, 0x48, 0xCE, 0xCD, 0xB4, 0x8F, 0x64,
0x0F, 0x81, 0xFB, 0x92, 0xBE, 0xF6, 0xF8, 0x8F,
0x7A, 0x85, 0xC1, 0xF7, 0xCD, 0x14, 0x46, 0xC9,
0x16, 0x1C, 0x0A, 0xFE, 0x8F, 0x25, 0xAE, 0x44,
0x4F, 0x40, 0xD3, 0x68, 0x00, 0x81, 0xC3, 0x5A,
0xA4, 0x3F, 0x64, 0x0F, 0xD5, 0xFA, 0x3C, 0x3C,
0x03, 0x0B, 0xCC, 0x06, 0xAB, 0xAC, 0x01, 0xD0,
0x98, 0xBC, 0xC9, 0x84, 0xEB, 0xD8, 0x32, 0x27,
0x12, 0x92, 0x1E, 0x00, 0xB1, 0xBA, 0x07, 0xD6,
0xD0, 0x1F, 0x26, 0x90, 0x70, 0x50, 0x25, 0x5E,
0xF2, 0xC8, 0xE2, 0x4F, 0x71, 0x6C, 0x52, 0xA5
}
};
int
main(int argc, char *argv[])
{
boolean_t failed = B_FALSE;
uint64_t cpu_mhz = 0;
if (argc == 2)
cpu_mhz = atoi(argv[1]);
#define SKEIN_ALGO_TEST(_m, mode, diglen, testdigest) \
do { \
Skein ## mode ## _Ctxt_t ctx; \
uint8_t digest[diglen / 8]; \
(void) Skein ## mode ## _Init(&ctx, diglen); \
(void) Skein ## mode ## _Update(&ctx, _m, sizeof (_m)); \
(void) Skein ## mode ## _Final(&ctx, digest); \
(void) printf("Skein" #mode "/" #diglen \
"\tMessage: " #_m "\tResult: "); \
if (bcmp(digest, testdigest, diglen / 8) == 0) { \
(void) printf("OK\n"); \
} else { \
(void) printf("FAILED!\n"); \
failed = B_TRUE; \
} \
- NOTE(CONSTCOND) \
} while (0)
#define SKEIN_PERF_TEST(mode, diglen) \
do { \
Skein ## mode ## _Ctxt_t ctx; \
uint8_t digest[diglen / 8]; \
uint8_t block[131072]; \
uint64_t delta; \
double cpb = 0; \
int i; \
struct timeval start, end; \
bzero(block, sizeof (block)); \
(void) gettimeofday(&start, NULL); \
(void) Skein ## mode ## _Init(&ctx, diglen); \
for (i = 0; i < 8192; i++) { \
(void) Skein ## mode ## _Update(&ctx, block, \
sizeof (block)); \
} \
(void) Skein ## mode ## _Final(&ctx, digest); \
(void) gettimeofday(&end, NULL); \
delta = (end.tv_sec * 1000000llu + end.tv_usec) - \
(start.tv_sec * 1000000llu + start.tv_usec); \
if (cpu_mhz != 0) { \
cpb = (cpu_mhz * 1e6 * ((double)delta / \
1000000)) / (8192 * 128 * 1024); \
} \
(void) printf("Skein" #mode "/" #diglen "\t%llu us " \
"(%.02f CPB)\n", (u_longlong_t)delta, cpb); \
- NOTE(CONSTCOND) \
} while (0)
(void) printf("Running algorithm correctness tests:\n");
SKEIN_ALGO_TEST(test_msg0, _256, 256, skein_256_test_digests[0]);
SKEIN_ALGO_TEST(test_msg1, _256, 256, skein_256_test_digests[1]);
SKEIN_ALGO_TEST(test_msg2, _256, 256, skein_256_test_digests[2]);
SKEIN_ALGO_TEST(test_msg0, _512, 512, skein_512_test_digests[0]);
SKEIN_ALGO_TEST(test_msg2, _512, 512, skein_512_test_digests[2]);
SKEIN_ALGO_TEST(test_msg3, _512, 512, skein_512_test_digests[3]);
SKEIN_ALGO_TEST(test_msg0, 1024, 1024, skein_1024_test_digests[0]);
SKEIN_ALGO_TEST(test_msg3, 1024, 1024, skein_1024_test_digests[3]);
SKEIN_ALGO_TEST(test_msg4, 1024, 1024, skein_1024_test_digests[4]);
if (failed)
return (1);
(void) printf("Running performance tests (hashing 1024 MiB of "
"data):\n");
SKEIN_PERF_TEST(_256, 256);
SKEIN_PERF_TEST(_512, 512);
SKEIN_PERF_TEST(1024, 1024);
return (0);
}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/l2arc/Makefile.am b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/l2arc/Makefile.am
index 9baf580eeadb..09f4c1d0d74f 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/l2arc/Makefile.am
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/l2arc/Makefile.am
@@ -1,18 +1,15 @@
pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/functional/l2arc
dist_pkgdata_SCRIPTS = \
cleanup.ksh \
setup.ksh \
l2arc_arcstats_pos.ksh \
l2arc_l2miss_pos.ksh \
l2arc_mfuonly_pos.ksh \
persist_l2arc_001_pos.ksh \
persist_l2arc_002_pos.ksh \
persist_l2arc_003_neg.ksh \
persist_l2arc_004_pos.ksh \
- persist_l2arc_005_pos.ksh \
- persist_l2arc_006_pos.ksh \
- persist_l2arc_007_pos.ksh \
- persist_l2arc_008_pos.ksh
+ persist_l2arc_005_pos.ksh
dist_pkgdata_DATA = \
l2arc.cfg
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/l2arc/l2arc_arcstats_pos.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/l2arc/l2arc_arcstats_pos.ksh
index 24fcefadfd07..3e76347b029a 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/l2arc/l2arc_arcstats_pos.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/l2arc/l2arc_arcstats_pos.ksh
@@ -1,107 +1,106 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
# CDDL HEADER END
#
#
# Copyright (c) 2020, George Amanakis. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/l2arc/l2arc.cfg
#
# DESCRIPTION:
# L2ARC MFU/MRU arcstats do not leak
#
# STRATEGY:
# 1. Create pool with a cache device.
# 2. Create a random file in that pool, smaller than the cache device
# and random read for 10 sec.
# 3. Read l2arc_mfu_asize and l2arc_mru_asize
# 4. Export pool.
# 5. Verify l2arc_mfu_asize and l2arc_mru_asize are 0.
# 6. Import pool.
# 7. Read random read for 10 sec.
# 8. Read l2arc_mfu_asize and l2arc_mru_asize
# 9. Verify that L2ARC MFU increased and MFU+MRU = L2_asize.
#
verify_runnable "global"
log_assert "L2ARC MFU/MRU arcstats do not leak."
function cleanup
{
if poolexists $TESTPOOL ; then
destroy_pool $TESTPOOL
fi
log_must set_tunable32 L2ARC_NOPREFETCH $noprefetch
}
log_onexit cleanup
# L2ARC_NOPREFETCH is set to 0 to let L2ARC handle prefetches
typeset noprefetch=$(get_tunable L2ARC_NOPREFETCH)
log_must set_tunable32 L2ARC_NOPREFETCH 0
typeset fill_mb=800
typeset cache_sz=$(( 1.4 * $fill_mb ))
export FILE_SIZE=$(( floor($fill_mb / $NUMJOBS) ))M
log_must truncate -s ${cache_sz}M $VDEV_CACHE
log_must zpool create -f $TESTPOOL $VDEV cache $VDEV_CACHE
log_must fio $FIO_SCRIPTS/mkfiles.fio
log_must fio $FIO_SCRIPTS/random_reads.fio
arcstat_quiescence_noecho l2_size
log_must zpool offline $TESTPOOL $VDEV_CACHE
arcstat_quiescence_noecho l2_size
typeset l2_mfu_init=$(get_arcstat l2_mfu_asize)
typeset l2_mru_init=$(get_arcstat l2_mru_asize)
typeset l2_prefetch_init=$(get_arcstat l2_prefetch_asize)
typeset l2_asize_init=$(get_arcstat l2_asize)
log_must zpool online $TESTPOOL $VDEV_CACHE
arcstat_quiescence_noecho l2_size
log_must zpool export $TESTPOOL
arcstat_quiescence_noecho l2_feeds
log_must test $(get_arcstat l2_mfu_asize) -eq 0
log_must test $(get_arcstat l2_mru_asize) -eq 0
log_must zpool import -d $VDIR $TESTPOOL
arcstat_quiescence_noecho l2_size
log_must fio $FIO_SCRIPTS/random_reads.fio
arcstat_quiescence_noecho l2_size
log_must zpool offline $TESTPOOL $VDEV_CACHE
arcstat_quiescence_noecho l2_size
typeset l2_mfu_end=$(get_arcstat l2_mfu_asize)
typeset l2_mru_end=$(get_arcstat l2_mru_asize)
typeset l2_prefetch_end=$(get_arcstat l2_prefetch_asize)
typeset l2_asize_end=$(get_arcstat l2_asize)
-log_must test $(( $l2_mfu_end - $l2_mfu_init )) -gt 0
log_must test $(( $l2_mru_end + $l2_mfu_end + $l2_prefetch_end - \
$l2_asize_end )) -eq 0
log_must test $(( $l2_mru_init + $l2_mfu_init + $l2_prefetch_init - \
$l2_asize_init )) -eq 0
log_must zpool destroy -f $TESTPOOL
log_pass "L2ARC MFU/MRU arcstats do not leak."
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/l2arc/persist_l2arc_004_pos.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/l2arc/persist_l2arc_004_pos.ksh
index 544e9291de29..b40703180687 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/l2arc/persist_l2arc_004_pos.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/l2arc/persist_l2arc_004_pos.ksh
@@ -1,102 +1,101 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
# CDDL HEADER END
#
#
# Copyright (c) 2020, George Amanakis. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/l2arc/l2arc.cfg
#
# DESCRIPTION:
-# Persistent L2ARC restores all written log blocks
+# Off/onlining an L2ARC device results in rebuilding L2ARC, vdev not
+# present.
#
# STRATEGY:
# 1. Create pool with a cache device.
-# 2. Create a random file in that pool, smaller than the cache device
-# and random read for 10 sec.
-# 3. Export pool.
-# 4. Read amount of log blocks written.
-# 5. Import pool.
-# 6. Read amount of log blocks built.
-# 7. Compare the two amounts.
-# 8. Read the file written in (2) and check if l2_hits in
-# /proc/spl/kstat/zfs/arcstats increased.
-# 9. Check if the labels of the L2ARC device are intact.
+# 2. Create a random file in that pool and random read for 10 sec.
+# 3. Read the amount of log blocks written from the header of the
+# L2ARC device.
+# 4. Offline the L2ARC device and export pool.
+# 5. Import pool and online the L2ARC device.
+# 6. Read the amount of log blocks rebuilt in arcstats and compare to
+# (3).
+# 7. Check if the labels of the L2ARC device are intact.
#
verify_runnable "global"
-log_assert "Persistent L2ARC restores all written log blocks."
+log_assert "Off/onlining an L2ARC device results in rebuilding L2ARC, vdev not present."
function cleanup
{
if poolexists $TESTPOOL ; then
destroy_pool $TESTPOOL
fi
log_must set_tunable32 L2ARC_NOPREFETCH $noprefetch
+ log_must set_tunable32 L2ARC_REBUILD_BLOCKS_MIN_L2SIZE \
+ $rebuild_blocks_min_l2size
}
log_onexit cleanup
# L2ARC_NOPREFETCH is set to 0 to let L2ARC handle prefetches
typeset noprefetch=$(get_tunable L2ARC_NOPREFETCH)
+typeset rebuild_blocks_min_l2size=$(get_tunable L2ARC_REBUILD_BLOCKS_MIN_L2SIZE)
log_must set_tunable32 L2ARC_NOPREFETCH 0
+log_must set_tunable32 L2ARC_REBUILD_BLOCKS_MIN_L2SIZE 0
typeset fill_mb=800
-typeset cache_sz=$(( 2 * $fill_mb ))
+typeset cache_sz=$(( floor($fill_mb / 2) ))
export FILE_SIZE=$(( floor($fill_mb / $NUMJOBS) ))M
log_must truncate -s ${cache_sz}M $VDEV_CACHE
-typeset log_blk_start=$(get_arcstat l2_log_blk_writes)
-
log_must zpool create -f $TESTPOOL $VDEV cache $VDEV_CACHE
log_must fio $FIO_SCRIPTS/mkfiles.fio
log_must fio $FIO_SCRIPTS/random_reads.fio
+arcstat_quiescence_noecho l2_size
+log_must zpool offline $TESTPOOL $VDEV_CACHE
arcstat_quiescence_noecho l2_size
log_must zpool export $TESTPOOL
arcstat_quiescence_noecho l2_feeds
-typeset log_blk_end=$(get_arcstat l2_log_blk_writes)
-typeset log_blk_rebuild_start=$(get_arcstat l2_rebuild_log_blks)
+typeset l2_rebuild_log_blk_start=$(get_arcstat l2_rebuild_log_blks)
+typeset l2_dh_log_blk=$(zdb -l $VDEV_CACHE | grep log_blk_count | \
+ awk '{print $2}')
log_must zpool import -d $VDIR $TESTPOOL
-
-typeset l2_hits_start=$(get_arcstat l2_hits)
-
-log_must fio $FIO_SCRIPTS/random_reads.fio
+log_must zpool online $TESTPOOL $VDEV_CACHE
arcstat_quiescence_noecho l2_size
-typeset log_blk_rebuild_end=$(arcstat_quiescence_echo l2_rebuild_log_blks)
-typeset l2_hits_end=$(get_arcstat l2_hits)
-
-log_must test $(( $log_blk_rebuild_end - $log_blk_rebuild_start )) -eq \
- $(( $log_blk_end - $log_blk_start ))
+typeset l2_rebuild_log_blk_end=$(arcstat_quiescence_echo l2_rebuild_log_blks)
-log_must test $l2_hits_end -gt $l2_hits_start
+log_must test $l2_dh_log_blk -eq $(( $l2_rebuild_log_blk_end - \
+ $l2_rebuild_log_blk_start ))
+log_must test $l2_dh_log_blk -gt 0
log_must zpool offline $TESTPOOL $VDEV_CACHE
arcstat_quiescence_noecho l2_size
log_must zdb -lq $VDEV_CACHE
log_must zpool destroy -f $TESTPOOL
-log_pass "Persistent L2ARC restores all written log blocks."
+log_pass "Off/onlining an L2ARC device results in rebuilding L2ARC, vdev not present."
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/l2arc/persist_l2arc_005_pos.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/l2arc/persist_l2arc_005_pos.ksh
index ee46e7b8cad6..8ad648519f5c 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/l2arc/persist_l2arc_005_pos.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/l2arc/persist_l2arc_005_pos.ksh
@@ -1,109 +1,102 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
# CDDL HEADER END
#
#
# Copyright (c) 2020, George Amanakis. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/l2arc/l2arc.cfg
-. $STF_SUITE/tests/functional/cli_root/zfs_load-key/zfs_load-key_common.kshlib
#
# DESCRIPTION:
-# Persistent L2ARC restores all written log blocks with encryption
+# Off/onlining an L2ARC device results in rebuilding L2ARC, vdev present.
#
# STRATEGY:
# 1. Create pool with a cache device.
-# 2. Create a an encrypted ZFS file system.
-# 3. Create a random file in the entrypted file system,
-# smaller than the cache device, and random read for 10 sec.
-# 4. Export pool.
-# 5. Read amount of log blocks written.
-# 6. Import pool.
-# 7. Mount the encrypted ZFS file system.
-# 8. Read amount of log blocks built.
-# 9. Compare the two amounts.
-# 10. Read the file written in (3) and check if l2_hits in
-# /proc/spl/kstat/zfs/arcstats increased.
-# 11. Check if the labels of the L2ARC device are intact.
+# 2. Create a random file in that pool and random read for 10 sec.
+# 3. Offline the L2ARC device.
+# 4. Read the amount of log blocks written from the header of the
+# L2ARC device.
+# 5. Online the L2ARC device.
+# 6. Read the amount of log blocks rebuilt in arcstats and compare to
+# (4).
+# 7. Check if the labels of the L2ARC device are intact.
#
verify_runnable "global"
-log_assert "Persistent L2ARC restores all written log blocks with encryption."
+log_assert "Off/onlining an L2ARC device results in rebuilding L2ARC, vdev present."
function cleanup
{
if poolexists $TESTPOOL ; then
destroy_pool $TESTPOOL
fi
log_must set_tunable32 L2ARC_NOPREFETCH $noprefetch
+ log_must set_tunable32 L2ARC_REBUILD_BLOCKS_MIN_L2SIZE \
+ $rebuild_blocks_min_l2size
}
log_onexit cleanup
# L2ARC_NOPREFETCH is set to 0 to let L2ARC handle prefetches
typeset noprefetch=$(get_tunable L2ARC_NOPREFETCH)
+typeset rebuild_blocks_min_l2size=$(get_tunable L2ARC_REBUILD_BLOCKS_MIN_L2SIZE)
log_must set_tunable32 L2ARC_NOPREFETCH 0
+log_must set_tunable32 L2ARC_REBUILD_BLOCKS_MIN_L2SIZE 0
typeset fill_mb=800
-typeset cache_sz=$(( 2 * $fill_mb ))
+typeset cache_sz=$(( floor($fill_mb / 2) ))
export FILE_SIZE=$(( floor($fill_mb / $NUMJOBS) ))M
log_must truncate -s ${cache_sz}M $VDEV_CACHE
-typeset log_blk_start=$(get_arcstat l2_log_blk_writes)
-
log_must zpool create -f $TESTPOOL $VDEV cache $VDEV_CACHE
-log_must eval "echo $PASSPHRASE | zfs create -o encryption=on" \
- "-o keyformat=passphrase $TESTPOOL/$TESTFS1"
-
log_must fio $FIO_SCRIPTS/mkfiles.fio
log_must fio $FIO_SCRIPTS/random_reads.fio
arcstat_quiescence_noecho l2_size
-log_must zpool export $TESTPOOL
-arcstat_quiescence_noecho l2_feeds
-
-typeset log_blk_end=$(get_arcstat l2_log_blk_writes)
-typeset log_blk_rebuild_start=$(get_arcstat l2_rebuild_log_blks)
-
-log_must zpool import -d $VDIR $TESTPOOL
-log_must eval "echo $PASSPHRASE | zfs mount -l $TESTPOOL/$TESTFS1"
+log_must zpool offline $TESTPOOL $VDEV_CACHE
+arcstat_quiescence_noecho l2_size
-typeset l2_hits_start=$(get_arcstat l2_hits)
+typeset l2_rebuild_log_blk_start=$(get_arcstat l2_rebuild_log_blks)
+typeset l2_dh_log_blk=$(zdb -l $VDEV_CACHE | grep log_blk_count | \
+ awk '{print $2}')
-log_must fio $FIO_SCRIPTS/random_reads.fio
+log_must zpool online $TESTPOOL $VDEV_CACHE
arcstat_quiescence_noecho l2_size
-typeset log_blk_rebuild_end=$(arcstat_quiescence_echo l2_rebuild_log_blks)
-typeset l2_hits_end=$(get_arcstat l2_hits)
-
-log_must test $(( $log_blk_rebuild_end - $log_blk_rebuild_start )) -eq \
- $(( $log_blk_end - $log_blk_start ))
+typeset l2_rebuild_log_blk_end=$(arcstat_quiescence_echo l2_rebuild_log_blks)
-log_must test $l2_hits_end -gt $l2_hits_start
+# Upon onlining the cache device we might write additional blocks to it
+# before it is marked for rebuild as the l2ad_* parameters are not cleared
+# when offlining the device. See comment in l2arc_rebuild_vdev().
+# So we cannot compare the amount of rebuilt log blocks to the amount of log
+# blocks read from the header of the device.
+log_must test $(( $l2_rebuild_log_blk_end - \
+ $l2_rebuild_log_blk_start )) -gt 0
+log_must test $l2_dh_log_blk -gt 0
log_must zpool offline $TESTPOOL $VDEV_CACHE
arcstat_quiescence_noecho l2_size
log_must zdb -lq $VDEV_CACHE
log_must zpool destroy -f $TESTPOOL
-log_pass "Persistent L2ARC restores all written log blocks with encryption."
+log_pass "Off/onlining an L2ARC device results in rebuilding L2ARC, vdev present."
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/l2arc/persist_l2arc_006_pos.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/l2arc/persist_l2arc_006_pos.ksh
deleted file mode 100755
index 051773540233..000000000000
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/l2arc/persist_l2arc_006_pos.ksh
+++ /dev/null
@@ -1,101 +0,0 @@
-#!/bin/ksh -p
-#
-# CDDL HEADER START
-#
-# This file and its contents are supplied under the terms of the
-# Common Development and Distribution License ("CDDL"), version 1.0.
-# You may only use this file in accordance with the terms of version
-# 1.0 of the CDDL.
-#
-# A full copy of the text of the CDDL should have accompanied this
-# source. A copy of the CDDL is also available via the Internet at
-# http://www.illumos.org/license/CDDL.
-#
-# CDDL HEADER END
-#
-
-#
-# Copyright (c) 2020, George Amanakis. All rights reserved.
-#
-
-. $STF_SUITE/include/libtest.shlib
-. $STF_SUITE/tests/functional/l2arc/l2arc.cfg
-
-#
-# DESCRIPTION:
-# Off/onlining an L2ARC device results in rebuilding L2ARC, vdev not
-# present.
-#
-# STRATEGY:
-# 1. Create pool with a cache device.
-# 2. Create a random file in that pool and random read for 10 sec.
-# 3. Read the amount of log blocks written from the header of the
-# L2ARC device.
-# 4. Offline the L2ARC device and export pool.
-# 5. Import pool and online the L2ARC device.
-# 6. Read the amount of log blocks rebuilt in arcstats and compare to
-# (3).
-# 7. Check if the labels of the L2ARC device are intact.
-#
-
-verify_runnable "global"
-
-log_assert "Off/onlining an L2ARC device results in rebuilding L2ARC, vdev not present."
-
-function cleanup
-{
- if poolexists $TESTPOOL ; then
- destroy_pool $TESTPOOL
- fi
-
- log_must set_tunable32 L2ARC_NOPREFETCH $noprefetch
- log_must set_tunable32 L2ARC_REBUILD_BLOCKS_MIN_L2SIZE \
- $rebuild_blocks_min_l2size
-}
-log_onexit cleanup
-
-# L2ARC_NOPREFETCH is set to 0 to let L2ARC handle prefetches
-typeset noprefetch=$(get_tunable L2ARC_NOPREFETCH)
-typeset rebuild_blocks_min_l2size=$(get_tunable L2ARC_REBUILD_BLOCKS_MIN_L2SIZE)
-log_must set_tunable32 L2ARC_NOPREFETCH 0
-log_must set_tunable32 L2ARC_REBUILD_BLOCKS_MIN_L2SIZE 0
-
-typeset fill_mb=800
-typeset cache_sz=$(( floor($fill_mb / 2) ))
-export FILE_SIZE=$(( floor($fill_mb / $NUMJOBS) ))M
-
-log_must truncate -s ${cache_sz}M $VDEV_CACHE
-
-log_must zpool create -f $TESTPOOL $VDEV cache $VDEV_CACHE
-
-log_must fio $FIO_SCRIPTS/mkfiles.fio
-log_must fio $FIO_SCRIPTS/random_reads.fio
-
-arcstat_quiescence_noecho l2_size
-log_must zpool offline $TESTPOOL $VDEV_CACHE
-arcstat_quiescence_noecho l2_size
-log_must zpool export $TESTPOOL
-arcstat_quiescence_noecho l2_feeds
-
-typeset l2_rebuild_log_blk_start=$(get_arcstat l2_rebuild_log_blks)
-typeset l2_dh_log_blk=$(zdb -l $VDEV_CACHE | grep log_blk_count | \
- awk '{print $2}')
-
-log_must zpool import -d $VDIR $TESTPOOL
-log_must zpool online $TESTPOOL $VDEV_CACHE
-arcstat_quiescence_noecho l2_size
-
-typeset l2_rebuild_log_blk_end=$(arcstat_quiescence_echo l2_rebuild_log_blks)
-
-log_must test $l2_dh_log_blk -eq $(( $l2_rebuild_log_blk_end - \
- $l2_rebuild_log_blk_start ))
-log_must test $l2_dh_log_blk -gt 0
-
-log must zpool offline $TESTPOOL $VDEV_CACHE
-arcstat_quiescence_noecho l2_size
-
-log_must zdb -lq $VDEV_CACHE
-
-log_must zpool destroy -f $TESTPOOL
-
-log_pass "Off/onlining an L2ARC device results in rebuilding L2ARC, vdev not present."
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/l2arc/persist_l2arc_007_pos.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/l2arc/persist_l2arc_007_pos.ksh
deleted file mode 100755
index 9208b81d4905..000000000000
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/l2arc/persist_l2arc_007_pos.ksh
+++ /dev/null
@@ -1,97 +0,0 @@
-#!/bin/ksh -p
-#
-# CDDL HEADER START
-#
-# This file and its contents are supplied under the terms of the
-# Common Development and Distribution License ("CDDL"), version 1.0.
-# You may only use this file in accordance with the terms of version
-# 1.0 of the CDDL.
-#
-# A full copy of the text of the CDDL should have accompanied this
-# source. A copy of the CDDL is also available via the Internet at
-# http://www.illumos.org/license/CDDL.
-#
-# CDDL HEADER END
-#
-
-#
-# Copyright (c) 2020, George Amanakis. All rights reserved.
-#
-
-. $STF_SUITE/include/libtest.shlib
-. $STF_SUITE/tests/functional/l2arc/l2arc.cfg
-
-#
-# DESCRIPTION:
-# Off/onlining an L2ARC device results in rebuilding L2ARC, vdev present.
-#
-# STRATEGY:
-# 1. Create pool with a cache device.
-# 2. Create a random file in that pool and random read for 10 sec.
-# 3. Offline the L2ARC device.
-# 4. Read the amount of log blocks written from the header of the
-# L2ARC device.
-# 5. Online the L2ARC device.
-# 6. Read the amount of log blocks rebuilt in arcstats and compare to
-# (4).
-# 7. Check if the labels of the L2ARC device are intact.
-#
-
-verify_runnable "global"
-
-log_assert "Off/onlining an L2ARC device results in rebuilding L2ARC, vdev present."
-
-function cleanup
-{
- if poolexists $TESTPOOL ; then
- destroy_pool $TESTPOOL
- fi
-
- log_must set_tunable32 L2ARC_NOPREFETCH $noprefetch
- log_must set_tunable32 L2ARC_REBUILD_BLOCKS_MIN_L2SIZE \
- $rebuild_blocks_min_l2size
-}
-log_onexit cleanup
-
-# L2ARC_NOPREFETCH is set to 0 to let L2ARC handle prefetches
-typeset noprefetch=$(get_tunable L2ARC_NOPREFETCH)
-typeset rebuild_blocks_min_l2size=$(get_tunable L2ARC_REBUILD_BLOCKS_MIN_L2SIZE)
-log_must set_tunable32 L2ARC_NOPREFETCH 0
-log_must set_tunable32 L2ARC_REBUILD_BLOCKS_MIN_L2SIZE 0
-
-typeset fill_mb=800
-typeset cache_sz=$(( floor($fill_mb / 2) ))
-export FILE_SIZE=$(( floor($fill_mb / $NUMJOBS) ))M
-
-log_must truncate -s ${cache_sz}M $VDEV_CACHE
-
-log_must zpool create -f $TESTPOOL $VDEV cache $VDEV_CACHE
-
-log_must fio $FIO_SCRIPTS/mkfiles.fio
-log_must fio $FIO_SCRIPTS/random_reads.fio
-
-arcstat_quiescence_noecho l2_size
-log_must zpool offline $TESTPOOL $VDEV_CACHE
-arcstat_quiescence_noecho l2_size
-
-typeset l2_rebuild_log_blk_start=$(get_arcstat l2_rebuild_log_blks)
-typeset l2_dh_log_blk=$(zdb -l $VDEV_CACHE | grep log_blk_count | \
- awk '{print $2}')
-
-log_must zpool online $TESTPOOL $VDEV_CACHE
-arcstat_quiescence_noecho l2_size
-
-typeset l2_rebuild_log_blk_end=$(arcstat_quiescence_echo l2_rebuild_log_blks)
-
-log_must test $l2_dh_log_blk -eq $(( $l2_rebuild_log_blk_end - \
- $l2_rebuild_log_blk_start ))
-log_must test $l2_dh_log_blk -gt 0
-
-log_must zpool offline $TESTPOOL $VDEV_CACHE
-arcstat_quiescence_noecho l2_size
-
-log_must zdb -lq $VDEV_CACHE
-
-log_must zpool destroy -f $TESTPOOL
-
-log_pass "Off/onlining an L2ARC device results in rebuilding L2ARC, vdev present."
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/l2arc/persist_l2arc_008_pos.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/l2arc/persist_l2arc_008_pos.ksh
deleted file mode 100755
index 5a79ff31ba7e..000000000000
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/l2arc/persist_l2arc_008_pos.ksh
+++ /dev/null
@@ -1,143 +0,0 @@
-#!/bin/ksh -p
-#
-# CDDL HEADER START
-#
-# This file and its contents are supplied under the terms of the
-# Common Development and Distribution License ("CDDL"), version 1.0.
-# You may only use this file in accordance with the terms of version
-# 1.0 of the CDDL.
-#
-# A full copy of the text of the CDDL should have accompanied this
-# source. A copy of the CDDL is also available via the Internet at
-# http://www.illumos.org/license/CDDL.
-#
-# CDDL HEADER END
-#
-
-#
-# Copyright (c) 2020, George Amanakis. All rights reserved.
-#
-
-. $STF_SUITE/include/libtest.shlib
-. $STF_SUITE/tests/functional/l2arc/l2arc.cfg
-
-#
-# DESCRIPTION:
-# Off/onlining an L2ARC device restores all written blocks, vdev present.
-#
-# STRATEGY:
-# 1. Create pool with a cache device.
-# 2. Create a random file in that pool and random read for 10 sec.
-# 3. Read the amount of log blocks written from the header of the
-# L2ARC device.
-# 4. Offline the L2ARC device.
-# 5. Online the L2ARC device.
-# 6. Read the amount of log blocks rebuilt in arcstats and compare to
-# (3).
-# 7. Create another random file in that pool and random read for 10 sec.
-# 8. Read the amount of log blocks written from the header of the
-# L2ARC device.
-# 9. Offline the L2ARC device.
-# 10. Online the L2ARC device.
-# 11. Read the amount of log blocks rebuilt in arcstats and compare to
-# (8).
-# 12. Check if the amount of log blocks on the cache device has
-# increased.
-# 13. Export the pool.
-# 14. Read the amount of log blocks on the cache device.
-# 15. Import the pool.
-# 16. Read the amount of log blocks rebuilt in arcstats and compare to
-# (14).
-# 17. Check if the labels of the L2ARC device are intact.
-#
-
-verify_runnable "global"
-
-log_assert "Off/onlining an L2ARC device restores all written blocks , vdev present."
-
-function cleanup
-{
- if poolexists $TESTPOOL ; then
- destroy_pool $TESTPOOL
- fi
-
- log_must set_tunable32 L2ARC_NOPREFETCH $noprefetch
-}
-log_onexit cleanup
-
-# L2ARC_NOPREFETCH is set to 0 to let L2ARC handle prefetches
-typeset noprefetch=$(get_tunable L2ARC_NOPREFETCH)
-log_must set_tunable32 L2ARC_NOPREFETCH 0
-
-typeset fill_mb=400
-typeset cache_sz=$(( 3 * $fill_mb ))
-export FILE_SIZE=$(( floor($fill_mb / $NUMJOBS) ))M
-
-log_must truncate -s ${cache_sz}M $VDEV_CACHE
-
-log_must zpool create -f $TESTPOOL $VDEV cache $VDEV_CACHE
-
-log_must fio $FIO_SCRIPTS/mkfiles.fio
-log_must fio $FIO_SCRIPTS/random_reads.fio
-
-arcstat_quiescence_noecho l2_size
-log_must zpool offline $TESTPOOL $VDEV_CACHE
-arcstat_quiescence_noecho l2_size
-
-typeset l2_dh_log_blk1=$(zdb -l $VDEV_CACHE | grep log_blk_count | \
- awk '{print $2}')
-typeset l2_rebuild_log_blk_start=$(get_arcstat l2_rebuild_log_blks)
-
-log_must zpool online $TESTPOOL $VDEV_CACHE
-arcstat_quiescence_noecho l2_size
-
-typeset l2_rebuild_log_blk_end=$(arcstat_quiescence_echo l2_rebuild_log_blks)
-
-log_must test $l2_dh_log_blk1 -eq $(( $l2_rebuild_log_blk_end - \
- $l2_rebuild_log_blk_start ))
-log_must test $l2_dh_log_blk1 -gt 0
-
-log_must fio $FIO_SCRIPTS/mkfiles.fio
-log_must fio $FIO_SCRIPTS/random_reads.fio
-
-arcstat_quiescence_noecho l2_size
-log_must zpool offline $TESTPOOL $VDEV_CACHE
-arcstat_quiescence_noecho l2_size
-
-typeset l2_dh_log_blk2=$(zdb -l $VDEV_CACHE | grep log_blk_count | \
- awk '{print $2}')
-typeset l2_rebuild_log_blk_start=$(get_arcstat l2_rebuild_log_blks)
-
-log_must zpool online $TESTPOOL $VDEV_CACHE
-arcstat_quiescence_noecho l2_size
-
-typeset l2_rebuild_log_blk_end=$(arcstat_quiescence_echo l2_rebuild_log_blks)
-
-log_must test $l2_dh_log_blk2 -eq $(( $l2_rebuild_log_blk_end - \
- $l2_rebuild_log_blk_start ))
-log_must test $l2_dh_log_blk2 -gt $l2_dh_log_blk1
-
-log_must zpool export $TESTPOOL
-arcstat_quiescence_noecho l2_feeds
-
-typeset l2_dh_log_blk3=$(zdb -l $VDEV_CACHE | grep log_blk_count | \
- awk '{print $2}')
-typeset l2_rebuild_log_blk_start=$(get_arcstat l2_rebuild_log_blks)
-
-log_must zpool import -d $VDIR $TESTPOOL
-arcstat_quiescence_noecho l2_size
-
-typeset l2_rebuild_log_blk_end=$(arcstat_quiescence_echo l2_rebuild_log_blks)
-
-log_must test $l2_dh_log_blk3 -eq $(( $l2_rebuild_log_blk_end - \
- $l2_rebuild_log_blk_start ))
-log_must test $l2_dh_log_blk3 -gt 0
-
-log must zpool offline $TESTPOOL $VDEV_CACHE
-arcstat_quiescence_noecho l2_size
-
-log_must zdb -lq $VDEV_CACHE
-
-log_must zpool destroy -f $TESTPOOL
-
-log_pass "Off/onlining an L2ARC device restores all written blocks, vdev present."
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/perf/perf.shlib b/sys/contrib/openzfs/tests/zfs-tests/tests/perf/perf.shlib
index 6addd46610c2..6f4fdc94348f 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/perf/perf.shlib
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/perf/perf.shlib
@@ -1,575 +1,602 @@
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
-# Copyright (c) 2015, 2016 by Delphix. All rights reserved.
+# Copyright (c) 2015, 2021 by Delphix. All rights reserved.
# Copyright (c) 2016, Intel Corporation.
#
. $STF_SUITE/include/libtest.shlib
-# If neither is specified, do a nightly run.
-[[ -z $PERF_REGRESSION_WEEKLY ]] && export PERF_REGRESSION_NIGHTLY=1
-
-# Default runtime for each type of test run.
-export PERF_RUNTIME_WEEKLY=$((30 * 60))
-export PERF_RUNTIME_NIGHTLY=$((10 * 60))
+# Defaults common to all the tests in the regression group
+export PERF_RUNTIME=${PERF_RUNTIME:-'180'}
+export PERF_RANDSEED=${PERF_RANDSEED:-'1234'}
+export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'}
+export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'}
+export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
# Default to JSON for fio output
export PERF_FIO_FORMAT=${PERF_FIO_FORMAT:-'json'}
# Default fs creation options
export PERF_FS_OPTS=${PERF_FS_OPTS:-'-o recsize=8k -o compress=lz4' \
' -o checksum=sha256 -o redundant_metadata=most'}
function get_sync_str
{
typeset sync=$1
typeset sync_str=''
[[ $sync -eq 0 ]] && sync_str='async'
[[ $sync -eq 1 ]] && sync_str='sync'
echo $sync_str
}
function get_suffix
{
typeset threads=$1
typeset sync=$2
typeset iosize=$3
typeset sync_str=$(get_sync_str $sync)
typeset filesystems=$(get_nfilesystems)
typeset suffix="$sync_str.$iosize-ios"
suffix="$suffix.$threads-threads.$filesystems-filesystems"
echo $suffix
}
function do_fio_run_impl
{
typeset script=$1
typeset do_recreate=$2
typeset clear_cache=$3
typeset threads=$4
typeset threads_per_fs=$5
typeset sync=$6
typeset iosize=$7
typeset sync_str=$(get_sync_str $sync)
log_note "Running with $threads $sync_str threads, $iosize ios"
if [[ -n $threads_per_fs && $threads_per_fs -ne 0 ]]; then
log_must test $do_recreate
verify_threads_per_fs $threads $threads_per_fs
fi
if $do_recreate; then
recreate_perf_pool
#
# A value of zero for "threads_per_fs" is "special", and
# means a single filesystem should be used, regardless
# of the number of threads.
#
if [[ -n $threads_per_fs && $threads_per_fs -ne 0 ]]; then
populate_perf_filesystems $((threads / threads_per_fs))
else
populate_perf_filesystems 1
fi
fi
if $clear_cache; then
# Clear the ARC
- zpool export $PERFPOOL
- zpool import $PERFPOOL
+ log_must zinject -a
fi
if [[ -n $ZINJECT_DELAYS ]]; then
apply_zinject_delays
else
log_note "No per-device commands to execute."
fi
#
# Allow this to be overridden by the individual test case. This
# can be used to run the FIO job against something other than
# the default filesystem (e.g. against a clone).
#
export DIRECTORY=$(get_directory)
log_note "DIRECTORY: " $DIRECTORY
export RUNTIME=$PERF_RUNTIME
export RANDSEED=$PERF_RANDSEED
export COMPPERCENT=$PERF_COMPPERCENT
export COMPCHUNK=$PERF_COMPCHUNK
export FILESIZE=$((TOTAL_SIZE / threads))
export NUMJOBS=$threads
export SYNC_TYPE=$sync
export BLOCKSIZE=$iosize
sync
# When running locally, we want to keep the default behavior of
# DIRECT == 0, so only set it when we're running over NFS to
# disable client cache for reads.
if [[ $NFS -eq 1 ]]; then
export DIRECT=1
do_setup_nfs $script
else
export DIRECT=0
fi
# This will be part of the output filename.
typeset suffix=$(get_suffix $threads $sync $iosize)
# Start the data collection
do_collect_scripts $suffix
# Define output file
typeset logbase="$(get_perf_output_dir)/$(basename \
$SUDO_COMMAND)"
typeset outfile="$logbase.fio.$suffix"
# Start the load
if [[ $NFS -eq 1 ]]; then
log_must ssh -t $NFS_USER@$NFS_CLIENT "
fio --output-format=${PERF_FIO_FORMAT} \
--output /tmp/fio.out /tmp/test.fio
"
log_must scp $NFS_USER@$NFS_CLIENT:/tmp/fio.out $outfile
log_must ssh -t $NFS_USER@$NFS_CLIENT "sudo -S umount $NFS_MOUNT"
else
log_must fio --output-format=${PERF_FIO_FORMAT} \
--output $outfile $FIO_SCRIPTS/$script
fi
}
#
# This function will run fio in a loop, according to the .fio file passed
# in and a number of environment variables. The following variables can be
# set before launching zfstest to override the defaults.
#
# PERF_RUNTIME: The time in seconds each fio invocation should run.
-# PERF_RUNTYPE: A human readable tag that appears in logs. The defaults are
-# nightly and weekly.
# PERF_NTHREADS: A list of how many threads each fio invocation will use.
# PERF_SYNC_TYPES: Whether to use (O_SYNC) or not. 1 is sync IO, 0 is async IO.
# PERF_IOSIZES: A list of blocksizes in which each fio invocation will do IO.
# PERF_COLLECT_SCRIPTS: A comma delimited list of 'command args, logfile_tag'
# pairs that will be added to the scripts specified in each test.
#
function do_fio_run
{
typeset script=$1
typeset do_recreate=$2
typeset clear_cache=$3
typeset threads threads_per_fs sync iosize
for threads in $PERF_NTHREADS; do
for threads_per_fs in $PERF_NTHREADS_PER_FS; do
for sync in $PERF_SYNC_TYPES; do
for iosize in $PERF_IOSIZES; do
do_fio_run_impl \
$script \
$do_recreate \
$clear_cache \
$threads \
$threads_per_fs \
$sync \
$iosize
done
done
done
done
}
# This function sets NFS mount on the client and make sure all correct
# permissions are in place
#
function do_setup_nfs
{
typeset script=$1
zfs set sharenfs=on $TESTFS
log_must chmod -R 777 /$TESTFS
ssh -t $NFS_USER@$NFS_CLIENT "mkdir -m 777 -p $NFS_MOUNT"
ssh -t $NFS_USER@$NFS_CLIENT "sudo -S umount $NFS_MOUNT"
log_must ssh -t $NFS_USER@$NFS_CLIENT "
sudo -S mount $NFS_OPTIONS $NFS_SERVER:/$TESTFS $NFS_MOUNT
"
#
# The variables in the fio script are only available in our current
# shell session, so we have to evaluate them here before copying
# the resulting script over to the target machine.
#
export jobnum='$jobnum'
while read line; do
eval echo "$line"
done < $FIO_SCRIPTS/$script > /tmp/test.fio
log_must sed -i -e "s%directory.*%directory=$NFS_MOUNT%" /tmp/test.fio
log_must scp /tmp/test.fio $NFS_USER@$NFS_CLIENT:/tmp
log_must rm /tmp/test.fio
}
#
# This function iterates through the value pairs in $PERF_COLLECT_SCRIPTS.
# The script at index N is launched in the background, with its output
# redirected to a logfile containing the tag specified at index N + 1.
#
function do_collect_scripts
{
typeset suffix=$1
[[ -n $collect_scripts ]] || log_fail "No data collection scripts."
[[ -n $PERF_RUNTIME ]] || log_fail "No runtime specified."
# Add in user supplied scripts and logfiles, if any.
typeset oIFS=$IFS
IFS=','
for item in $PERF_COLLECT_SCRIPTS; do
collect_scripts+=($(echo $item | sed 's/^ *//g'))
done
IFS=$oIFS
typeset idx=0
while [[ $idx -lt "${#collect_scripts[@]}" ]]; do
typeset logbase="$(get_perf_output_dir)/$(basename \
$SUDO_COMMAND)"
typeset outfile="$logbase.${collect_scripts[$idx + 1]}.$suffix"
timeout $PERF_RUNTIME ${collect_scripts[$idx]} >$outfile 2>&1 &
((idx += 2))
done
# Need to explicitly return 0 because timeout(1) will kill
# a child process and cause us to return non-zero.
return 0
}
# Find a place to deposit performance data collected while under load.
function get_perf_output_dir
{
typeset dir="$(pwd)/perf_data"
[[ -d $dir ]] || mkdir -p $dir
echo $dir
}
function apply_zinject_delays
{
typeset idx=0
while [[ $idx -lt "${#ZINJECT_DELAYS[@]}" ]]; do
[[ -n ${ZINJECT_DELAYS[$idx]} ]] || \
log_must "No zinject delay found at index: $idx"
for disk in $DISKS; do
log_must zinject \
-d $disk -D ${ZINJECT_DELAYS[$idx]} $PERFPOOL
done
((idx += 1))
done
}
function clear_zinject_delays
{
log_must zinject -c all
}
#
# Destroy and create the pool used for performance tests.
#
function recreate_perf_pool
{
[[ -n $PERFPOOL ]] || log_fail "The \$PERFPOOL variable isn't set."
#
# In case there's been some "leaked" zinject delays, or if the
# performance test injected some delays itself, we clear all
# delays before attempting to destroy the pool. Each delay
# places a hold on the pool, so the destroy will fail if there
# are any outstanding delays.
#
clear_zinject_delays
#
# This function handles the case where the pool already exists,
# and will destroy the previous pool and recreate a new pool.
#
create_pool $PERFPOOL $DISKS
}
function verify_threads_per_fs
{
typeset threads=$1
typeset threads_per_fs=$2
log_must test -n $threads
log_must test -n $threads_per_fs
#
# A value of "0" is treated as a "special value", and it is
# interpreted to mean all threads will run using a single
# filesystem.
#
[[ $threads_per_fs -eq 0 ]] && return
#
# The number of threads per filesystem must be a value greater
# than or equal to zero; since we just verified the value isn't
# 0 above, then it must be greater than zero here.
#
log_must test $threads_per_fs -ge 0
#
# This restriction can be lifted later if needed, but for now,
# we restrict the number of threads per filesystem to a value
# that evenly divides the thread count. This way, the threads
# will be evenly distributed over all the filesystems.
#
log_must test $((threads % threads_per_fs)) -eq 0
}
function populate_perf_filesystems
{
typeset nfilesystems=${1:-1}
export TESTFS=""
for i in $(seq 1 $nfilesystems); do
typeset dataset="$PERFPOOL/fs$i"
create_dataset $dataset $PERF_FS_OPTS
if [[ -z "$TESTFS" ]]; then
TESTFS="$dataset"
else
TESTFS="$TESTFS $dataset"
fi
done
}
function get_nfilesystems
{
typeset filesystems=( $TESTFS )
echo ${#filesystems[@]}
}
function get_directory
{
typeset filesystems=( $TESTFS )
typeset directory=
typeset idx=0
while [[ $idx -lt "${#filesystems[@]}" ]]; do
mountpoint=$(get_prop mountpoint "${filesystems[$idx]}")
if [[ -n $directory ]]; then
directory=$directory:$mountpoint
else
directory=$mountpoint
fi
((idx += 1))
done
echo $directory
}
function get_min_arc_size
{
typeset -l min_arc_size
if is_freebsd; then
min_arc_size=$(sysctl -n kstat.zfs.misc.arcstats.c_min)
elif is_illumos; then
min_arc_size=$(dtrace -qn 'BEGIN {
printf("%u\n", `arc_stats.arcstat_c_min.value.ui64);
exit(0);
}')
elif is_linux; then
min_arc_size=`awk '$1 == "c_min" { print $3 }' \
/proc/spl/kstat/zfs/arcstats`
fi
[[ $? -eq 0 ]] || log_fail "get_min_arc_size failed"
echo $min_arc_size
}
function get_max_arc_size
{
typeset -l max_arc_size
if is_freebsd; then
max_arc_size=$(sysctl -n kstat.zfs.misc.arcstats.c_max)
elif is_illumos; then
max_arc_size=$(dtrace -qn 'BEGIN {
printf("%u\n", `arc_stats.arcstat_c_max.value.ui64);
exit(0);
}')
elif is_linux; then
max_arc_size=`awk '$1 == "c_max" { print $3 }' \
/proc/spl/kstat/zfs/arcstats`
fi
[[ $? -eq 0 ]] || log_fail "get_max_arc_size failed"
echo $max_arc_size
}
-function get_max_dbuf_cache_size
+function get_arc_target
{
- typeset -l max_dbuf_cache_size
+ typeset -l arc_c
+
+ if is_freebsd; then
+ arc_c=$(sysctl -n kstat.zfs.misc.arcstats.c)
+ elif is_illumos; then
+ arc_c=$(dtrace -qn 'BEGIN {
+ printf("%u\n", `arc_stats.arcstat_c.value.ui64);
+ exit(0);
+ }')
+ elif is_linux; then
+ arc_c=`awk '$1 == "c" { print $3 }' \
+ /proc/spl/kstat/zfs/arcstats`
+ fi
+
+ [[ $? -eq 0 ]] || log_fail "get_arc_target failed"
+
+ echo $arc_c
+}
+
+function get_dbuf_cache_size
+{
+ typeset -l dbuf_cache_size dbuf_cache_shift
if is_illumos; then
- max_dbuf_cache_size=$(dtrace -qn 'BEGIN {
+ dbuf_cache_size=$(dtrace -qn 'BEGIN {
printf("%u\n", `dbuf_cache_max_bytes);
exit(0);
}')
else
- max_dbuf_cache_size=$(get_tunable DBUF_CACHE_MAX_BYTES)
+ dbuf_cache_shift=$(get_tunable DBUF_CACHE_SHIFT)
+ dbuf_cache_size=$(($(get_arc_target) / 2**dbuf_cache_shift))
fi
- [[ $? -eq 0 ]] || log_fail "get_max_dbuf_cache_size failed"
+ [[ $? -eq 0 ]] || log_fail "get_dbuf_cache_size failed"
- echo $max_dbuf_cache_size
+ echo $dbuf_cache_size
}
# Create a file with some information about how this system is configured.
function get_system_config
{
typeset config=$PERF_DATA_DIR/$1
echo "{" >>$config
if is_linux; then
echo " \"ncpus\": \"$(nproc --all)\"," >>$config
echo " \"physmem\": \"$(free -b | \
awk '$1 == "Mem:" { print $2 }')\"," >>$config
echo " \"c_max\": \"$(get_max_arc_size)\"," >>$config
echo " \"hostname\": \"$(uname -n)\"," >>$config
echo " \"kernel version\": \"$(uname -sr)\"," >>$config
else
dtrace -qn 'BEGIN{
printf(" \"ncpus\": %d,\n", `ncpus);
printf(" \"physmem\": %u,\n", `physmem * `_pagesize);
printf(" \"c_max\": %u,\n", `arc_stats.arcstat_c_max.value.ui64);
printf(" \"kmem_flags\": \"0x%x\",", `kmem_flags);
exit(0)}' >>$config
echo " \"hostname\": \"$(uname -n)\"," >>$config
echo " \"kernel version\": \"$(uname -v)\"," >>$config
fi
if is_linux; then
lsblk -dino NAME,SIZE | awk 'BEGIN {
printf(" \"disks\": {\n"); first = 1}
{disk = $1} {size = $2;
if (first != 1) {printf(",\n")} else {first = 0}
printf(" \"%s\": \"%s\"", disk, size)}
END {printf("\n },\n")}' >>$config
zfs_tunables="/sys/module/zfs/parameters"
printf " \"tunables\": {\n" >>$config
for tunable in \
zfs_arc_max \
zfs_arc_meta_limit \
zfs_arc_sys_free \
zfs_dirty_data_max \
zfs_flags \
zfs_prefetch_disable \
zfs_txg_timeout \
zfs_vdev_aggregation_limit \
zfs_vdev_async_read_max_active \
zfs_vdev_async_write_max_active \
zfs_vdev_sync_read_max_active \
zfs_vdev_sync_write_max_active \
zio_slow_io_ms
do
if [ "$tunable" != "zfs_arc_max" ]
then
printf ",\n" >>$config
fi
printf " \"$tunable\": \"$(<$zfs_tunables/$tunable)\"" \
>>$config
done
printf "\n }\n" >>$config
else
iostat -En | awk 'BEGIN {
printf(" \"disks\": {\n"); first = 1}
/^c/ {disk = $1}
/^Size: [^0]/ {size = $2;
if (first != 1) {printf(",\n")} else {first = 0}
printf(" \"%s\": \"%s\"", disk, size)}
END {printf("\n },\n")}' >>$config
sed -n 's/^set \(.*\)[ ]=[ ]\(.*\)/\1=\2/p' /etc/system | \
awk -F= 'BEGIN {printf(" \"system\": {\n"); first = 1}
{if (first != 1) {printf(",\n")} else {first = 0};
printf(" \"%s\": %s", $1, $2)}
END {printf("\n }\n")}' >>$config
fi
echo "}" >>$config
}
function num_jobs_by_cpu
{
if is_linux; then
typeset ncpu=$($NPROC --all)
else
typeset ncpu=$(psrinfo | $WC -l)
fi
typeset num_jobs=$ncpu
[[ $ncpu -gt 8 ]] && num_jobs=$(echo "$ncpu * 3 / 4" | bc)
echo $num_jobs
}
#
# On illumos this looks like: ":sd3:sd4:sd1:sd2:"
#
function pool_to_lun_list
{
typeset pool=$1
typeset ctd ctds devname lun
typeset lun_list=':'
if is_illumos; then
ctds=$(zpool list -v $pool |
awk '/c[0-9]*t[0-9a-fA-F]*d[0-9]*/ {print $1}')
for ctd in $ctds; do
# Get the device name as it appears in /etc/path_to_inst
devname=$(readlink -f /dev/dsk/${ctd}s0 | sed -n \
's/\/devices\([^:]*\):.*/\1/p')
# Add a string composed of the driver name and instance
# number to the list for comparison with dev_statname.
lun=$(sed 's/"//g' /etc/path_to_inst | grep \
$devname | awk '{print $3$2}')
lun_list="$lun_list$lun:"
done
elif is_freebsd; then
lun_list+=$(zpool list -HLv $pool | \
awk '/a?da[0-9]+|md[0-9]+|mfid[0-9]+|nda[0-9]+|nvd[0-9]+|vtbd[0-9]+/
{ printf "%s:", $1 }')
elif is_linux; then
ctds=$(zpool list -HLv $pool | \
awk '/sd[a-z]*|loop[0-9]*|dm-[0-9]*/ {print $1}')
for ctd in $ctds; do
lun_list="$lun_list$ctd:"
done
fi
echo $lun_list
}
+function print_perf_settings
+{
+ echo "PERF_NTHREADS: $PERF_NTHREADS"
+ echo "PERF_NTHREADS_PER_FS: $PERF_NTHREADS_PER_FS"
+ echo "PERF_SYNC_TYPES: $PERF_SYNC_TYPES"
+ echo "PERF_IOSIZES: $PERF_IOSIZES"
+}
+
# Create a perf_data directory to hold performance statistics and
# configuration information.
export PERF_DATA_DIR=$(get_perf_output_dir)
[[ -f $PERF_DATA_DIR/config.json ]] || get_system_config config.json
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/random_reads.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/random_reads.ksh
index e6d207e22747..5c8066d17549 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/random_reads.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/random_reads.ksh
@@ -1,114 +1,96 @@
#!/bin/ksh
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
-# Copyright (c) 2015, 2020 by Delphix. All rights reserved.
+# Copyright (c) 2015, 2021 by Delphix. All rights reserved.
#
#
# Description:
# Trigger fio runs using the random_reads job file. The number of runs and
# data collected is determined by the PERF_* variables. See do_fio_run for
# details about these variables.
#
# The files to read from are created prior to the first fio run, and used
# for all fio runs. The ARC is cleared with `zinject -a` prior to each run
# so reads will go to disk.
#
# Thread/Concurrency settings:
# PERF_NTHREADS defines the number of files created in the test filesystem,
# as well as the number of threads that will simultaneously drive IO to
# those files. The settings chosen are from measurements in the
# PerfAutoESX/ZFSPerfESX Environments, selected at concurrency levels that
# are at peak throughput but lowest latency. Higher concurrency introduces
# queue time latency and would reduce the impact of code-induced performance
# regressions.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/perf/perf.shlib
function cleanup
{
# kill fio and iostat
pkill fio
pkill iostat
recreate_perf_pool
}
trap "log_fail \"Measure IO stats during random read load\"" SIGTERM
log_onexit cleanup
recreate_perf_pool
populate_perf_filesystems
# Aim to fill the pool to 50% capacity while accounting for a 3x compressratio.
export TOTAL_SIZE=$(($(get_prop avail $PERFPOOL) * 3 / 2))
-# Variables for use by fio.
-if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
- export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
- export PERF_RANDSEED=${PERF_RANDSEED:-'1234'}
- export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'}
- export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'}
- export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
- export PERF_NTHREADS=${PERF_NTHREADS:-'8 16 32 64'}
- export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
- export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
- export PERF_IOSIZES=${PERF_IOSIZES:-'8k 64k 128k'}
-elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
- export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
- export PERF_RANDSEED=${PERF_RANDSEED:-'1234'}
- export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'}
- export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'}
- export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
- export PERF_NTHREADS=${PERF_NTHREADS:-'16 32'}
- export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
- export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
- export PERF_IOSIZES=${PERF_IOSIZES:-'8k'}
-fi
+# Variables specific to this test for use by fio.
+export PERF_NTHREADS=${PERF_NTHREADS:-'16 32'}
+export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
+export PERF_IOSIZES=${PERF_IOSIZES:-'8k'}
# Layout the files to be used by the read tests. Create as many files as the
# largest number of threads. An fio run with fewer threads will use a subset
# of the available files.
export NUMJOBS=$(get_max $PERF_NTHREADS)
export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS))
export DIRECTORY=$(get_directory)
log_must fio $FIO_SCRIPTS/mkfiles.fio
# Set up the scripts and output files that will log performance data.
lun_list=$(pool_to_lun_list $PERFPOOL)
log_note "Collecting backend IO stats with lun list $lun_list"
if is_linux; then
typeset perf_record_cmd="perf record -F 99 -a -g -q \
-o /dev/stdout -- sleep ${PERF_RUNTIME}"
export collect_scripts=(
"zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat"
"vmstat -t 1" "vmstat"
"mpstat -P ALL 1" "mpstat"
"iostat -tdxyz 1" "iostat"
"$perf_record_cmd" "perf"
)
else
export collect_scripts=(
"$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
"vmstat -T d 1" "vmstat"
"mpstat -T d 1" "mpstat"
"iostat -T d -xcnz 1" "iostat"
)
fi
-log_note "Random reads with $PERF_RUNTYPE settings"
+log_note "Random reads with settings: $(print_perf_settings)"
do_fio_run random_reads.fio false true
log_pass "Measure IO stats during random read load"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/random_readwrite.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/random_readwrite.ksh
index 573e9c7d4c58..33d7d8c8d945 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/random_readwrite.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/random_readwrite.ksh
@@ -1,114 +1,96 @@
#!/bin/ksh
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
-# Copyright (c) 2015, 2020 by Delphix. All rights reserved.
+# Copyright (c) 2015, 2021 by Delphix. All rights reserved.
#
#
# Description:
# Trigger fio runs using the random_readwrite job file. The number of runs and
# data collected is determined by the PERF_* variables. See do_fio_run for
# details about these variables.
#
# The files to read and write from are created prior to the first fio run,
# and used for all fio runs. The ARC is cleared with `zinject -a` prior to
# each run so reads will go to disk.
#
# Thread/Concurrency settings:
# PERF_NTHREADS defines the number of files created in the test filesystem,
# as well as the number of threads that will simultaneously drive IO to
# those files. The settings chosen are from measurements in the
# PerfAutoESX/ZFSPerfESX Environments, selected at concurrency levels that
# are at peak throughput but lowest latency. Higher concurrency introduces
# queue time latency and would reduce the impact of code-induced performance
# regressions.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/perf/perf.shlib
function cleanup
{
# kill fio and iostat
pkill fio
pkill iostat
recreate_perf_pool
}
trap "log_fail \"Measure IO stats during random read load\"" SIGTERM
log_onexit cleanup
recreate_perf_pool
populate_perf_filesystems
# Aim to fill the pool to 50% capacity while accounting for a 3x compressratio.
export TOTAL_SIZE=$(($(get_prop avail $PERFPOOL) * 3 / 2))
-# Variables for use by fio.
-if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
- export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
- export PERF_RANDSEED=${PERF_RANDSEED:-'1234'}
- export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'}
- export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'}
- export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
- export PERF_NTHREADS=${PERF_NTHREADS:-'4 8 16 64'}
- export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
- export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'0 1'}
- export PERF_IOSIZES='' # bssplit used instead
-elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
- export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
- export PERF_RANDSEED=${PERF_RANDSEED:-'1234'}
- export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'}
- export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'}
- export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
- export PERF_NTHREADS=${PERF_NTHREADS:-'32 64'}
- export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
- export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
- export PERF_IOSIZES='' # bssplit used instead
-fi
+# Variables specific to this test for use by fio.
+export PERF_NTHREADS=${PERF_NTHREADS:-'32 64'}
+export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
+export PERF_IOSIZES='' # bssplit used instead
# Layout the files to be used by the readwrite tests. Create as many files
# as the largest number of threads. An fio run with fewer threads will use
# a subset of the available files.
export NUMJOBS=$(get_max $PERF_NTHREADS)
export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS))
export DIRECTORY=$(get_directory)
log_must fio $FIO_SCRIPTS/mkfiles.fio
# Set up the scripts and output files that will log performance data.
lun_list=$(pool_to_lun_list $PERFPOOL)
log_note "Collecting backend IO stats with lun list $lun_list"
if is_linux; then
typeset perf_record_cmd="perf record -F 99 -a -g -q \
-o /dev/stdout -- sleep ${PERF_RUNTIME}"
export collect_scripts=(
"zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat"
"vmstat -t 1" "vmstat"
"mpstat -P ALL 1" "mpstat"
"iostat -tdxyz 1" "iostat"
"$perf_record_cmd" "perf"
)
else
export collect_scripts=(
"$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
"vmstat -T d 1" "vmstat"
"mpstat -T d 1" "mpstat"
"iostat -T d -xcnz 1" "iostat"
)
fi
-log_note "Random reads and writes with $PERF_RUNTYPE settings"
+log_note "Random reads and writes with settings: $(print_perf_settings)"
do_fio_run random_readwrite.fio false true
log_pass "Measure IO stats during random read and write load"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/random_readwrite_fixed.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/random_readwrite_fixed.ksh
index 78af5213a3d3..bb4014563f1f 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/random_readwrite_fixed.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/random_readwrite_fixed.ksh
@@ -1,106 +1,88 @@
#!/bin/ksh
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
-# Copyright (c) 2017, 2020 by Delphix. All rights reserved.
+# Copyright (c) 2017, 2021 by Delphix. All rights reserved.
#
#
# Description:
# Trigger fio runs using the random_readwrite_fixed job file. The number of runs and
# data collected is determined by the PERF_* variables. See do_fio_run for
# details about these variables.
#
# The files to read and write from are created prior to the first fio run,
# and used for all fio runs. The ARC is cleared with `zinject -a` prior to
# each run so reads will go to disk.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/perf/perf.shlib
function cleanup
{
# kill fio and iostat
pkill fio
pkill iostat
recreate_perf_pool
}
trap "log_fail \"Measure IO stats during random read write load\"" SIGTERM
log_onexit cleanup
recreate_perf_pool
populate_perf_filesystems
# Aim to fill the pool to 50% capacity while accounting for a 3x compressratio.
export TOTAL_SIZE=$(($(get_prop avail $PERFPOOL) * 3 / 2))
-# Variables for use by fio.
-if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
- export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
- export PERF_RANDSEED=${PERF_RANDSEED:-'1234'}
- export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'}
- export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'}
- export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
- export PERF_NTHREADS=${PERF_NTHREADS:-'8 16 32 64'}
- export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
- export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'0 1'}
- export PERF_IOSIZES='8k 64k'
-elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
- export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
- export PERF_RANDSEED=${PERF_RANDSEED:-'1234'}
- export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'}
- export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'}
- export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
- export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'}
- export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
- export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
- export PERF_IOSIZES='8k'
-fi
+# Variables specific to this test for use by fio.
+export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'}
+export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
+export PERF_IOSIZES=${PERF_IOSIZES:-'8k'}
# Layout the files to be used by the readwrite tests. Create as many files
# as the largest number of threads. An fio run with fewer threads will use
# a subset of the available files.
export NUMJOBS=$(get_max $PERF_NTHREADS)
export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS))
export DIRECTORY=$(get_directory)
log_must fio $FIO_SCRIPTS/mkfiles.fio
# Set up the scripts and output files that will log performance data.
lun_list=$(pool_to_lun_list $PERFPOOL)
log_note "Collecting backend IO stats with lun list $lun_list"
if is_linux; then
typeset perf_record_cmd="perf record -F 99 -a -g -q \
-o /dev/stdout -- sleep ${PERF_RUNTIME}"
export collect_scripts=(
"zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat"
"vmstat -t 1" "vmstat"
"mpstat -P ALL 1" "mpstat"
"iostat -tdxyz 1" "iostat"
"$perf_record_cmd" "perf"
)
else
export collect_scripts=(
"kstat zfs:0 1" "kstat"
"vmstat -T d 1" "vmstat"
"mpstat -T d 1" "mpstat"
"iostat -T d -xcnz 1" "iostat"
"dtrace -Cs $PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
"dtrace -s $PERF_SCRIPTS/profile.d" "profile"
)
fi
-log_note "Random reads and writes with $PERF_RUNTYPE settings"
+log_note "Random reads and writes with settings: $(print_perf_settings)"
do_fio_run random_readwrite_fixed.fio false true
log_pass "Measure IO stats during random read and write load"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/random_writes.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/random_writes.ksh
index dca013cbae0c..4b826835efbf 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/random_writes.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/random_writes.ksh
@@ -1,105 +1,87 @@
#!/bin/ksh
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
-# Copyright (c) 2015, 2020 by Delphix. All rights reserved.
+# Copyright (c) 2015, 2021 by Delphix. All rights reserved.
#
#
# Description:
# Trigger fio runs using the random_writes job file. The number of runs and
# data collected is determined by the PERF_* variables. See do_fio_run for
# details about these variables.
#
# Prior to each fio run the dataset is recreated, and fio writes new files
# into an otherwise empty pool.
#
# Thread/Concurrency settings:
# PERF_NTHREADS defines the number of files created in the test filesystem,
# as well as the number of threads that will simultaneously drive IO to
# those files. The settings chosen are from measurements in the
# PerfAutoESX/ZFSPerfESX Environments, selected at concurrency levels that
# are at peak throughput but lowest latency. Higher concurrency introduces
# queue time latency and would reduce the impact of code-induced performance
# regressions.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/perf/perf.shlib
function cleanup
{
# kill fio and iostat
pkill fio
pkill iostat
recreate_perf_pool
}
trap "log_fail \"Measure IO stats during random read load\"" SIGTERM
log_onexit cleanup
recreate_perf_pool
populate_perf_filesystems
# Aim to fill the pool to 50% capacity while accounting for a 3x compressratio.
export TOTAL_SIZE=$(($(get_prop avail $PERFPOOL) * 3 / 2))
-# Variables for use by fio.
-if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
- export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
- export PERF_RANDSEED=${PERF_RANDSEED:-'1234'}
- export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'}
- export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'}
- export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
- export PERF_NTHREADS=${PERF_NTHREADS:-'1 4 8 16 32 64 128'}
- export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
- export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'0 1'}
- export PERF_IOSIZES=${PERF_IOSIZES:-'8k 64k 256k'}
-elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
- export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
- export PERF_RANDSEED=${PERF_RANDSEED:-'1234'}
- export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'}
- export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'}
- export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
- export PERF_NTHREADS=${PERF_NTHREADS:-'32 128'}
- export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
- export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
- export PERF_IOSIZES=${PERF_IOSIZES:-'8k'}
-fi
+# Variables specific to this test for use by fio.
+export PERF_NTHREADS=${PERF_NTHREADS:-'32 128'}
+export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
+export PERF_IOSIZES=${PERF_IOSIZES:-'8k'}
# Set up the scripts and output files that will log performance data.
lun_list=$(pool_to_lun_list $PERFPOOL)
log_note "Collecting backend IO stats with lun list $lun_list"
if is_linux; then
typeset perf_record_cmd="perf record -F 99 -a -g -q \
-o /dev/stdout -- sleep ${PERF_RUNTIME}"
export collect_scripts=(
"zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat"
"vmstat -t 1" "vmstat"
"mpstat -P ALL 1" "mpstat"
"iostat -tdxyz 1" "iostat"
"$perf_record_cmd" "perf"
)
else
export collect_scripts=(
"$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
"vmstat -T d 1" "vmstat"
"mpstat -T d 1" "mpstat"
"iostat -T d -xcnz 1" "iostat"
)
fi
-log_note "Random writes with $PERF_RUNTYPE settings"
+log_note "Random writes with settings: $(print_perf_settings)"
do_fio_run random_writes.fio true false
log_pass "Measure IO stats during random write load"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/random_writes_zil.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/random_writes_zil.ksh
index 5d4fd77a7458..522ee4526828 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/random_writes_zil.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/random_writes_zil.ksh
@@ -1,100 +1,83 @@
#!/bin/ksh
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
-# Copyright (c) 2015, 2020 by Delphix. All rights reserved.
+# Copyright (c) 2015, 2021 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/perf/perf.shlib
function cleanup
{
# kill fio and iostat
pkill fio
pkill iostat
#
# We're using many filesystems depending on the number of
# threads for each test, and there's no good way to get a list
# of all the filesystems that should be destroyed on cleanup
# (i.e. the list of filesystems used for the last test ran).
# Thus, we simply recreate the pool as a way to destroy all
# filesystems and leave a fresh pool behind.
#
recreate_perf_pool
}
trap "log_fail \"Measure IO stats during random write load\"" SIGTERM
log_onexit cleanup
recreate_perf_pool
# Aim to fill the pool to 50% capacity while accounting for a 3x compressratio.
export TOTAL_SIZE=$(($(get_prop avail $PERFPOOL) * 3 / 2))
-if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
- export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
- export PERF_RANDSEED=${PERF_RANDSEED:-'1234'}
- export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'}
- export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'}
- export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
- export PERF_NTHREADS=${PERF_NTHREADS:-'1 2 4 8 16 32 64 128'}
- export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0 1'}
- export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
- export PERF_IOSIZES=${PERF_IOSIZES:-'8k'}
-
-elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
- export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
- export PERF_RANDSEED=${PERF_RANDSEED:-'1234'}
- export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'}
- export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'}
- export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
- export PERF_NTHREADS=${PERF_NTHREADS:-'1 4 16 64'}
- export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0 1'}
- export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
- export PERF_IOSIZES=${PERF_IOSIZES:-'8k'}
-fi
+# Variables specific to this test for use by fio.
+export PERF_NTHREADS=${PERF_NTHREADS:-'1 4 16 64'}
+export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0 1'}
+export PERF_IOSIZES=${PERF_IOSIZES:-'8k'}
# Until the performance tests over NFS can deal with multiple file systems,
# force the use of only one file system when testing over NFS.
[[ $NFS -eq 1 ]] && PERF_NTHREADS_PER_FS='0'
lun_list=$(pool_to_lun_list $PERFPOOL)
log_note "Collecting backend IO stats with lun list $lun_list"
if is_linux; then
typeset perf_record_cmd="perf record -F 99 -a -g -q \
-o /dev/stdout -- sleep ${PERF_RUNTIME}"
export collect_scripts=(
"zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat"
"vmstat -t 1" "vmstat"
"mpstat -P ALL 1" "mpstat"
"iostat -tdxyz 1" "iostat"
"$perf_record_cmd" "perf"
)
else
export collect_scripts=(
"kstat zfs:0 1" "kstat"
"vmstat -T d 1" "vmstat"
"mpstat -T d 1" "mpstat"
"iostat -T d -xcnz 1" "iostat"
"dtrace -Cs $PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
"dtrace -s $PERF_SCRIPTS/zil.d $PERFPOOL 1" "zil"
"dtrace -s $PERF_SCRIPTS/profile.d" "profile"
"dtrace -s $PERF_SCRIPTS/offcpu-profile.d" "offcpu-profile"
)
fi
-log_note "ZIL specific random write workload with $PERF_RUNTYPE settings"
+log_note \
+ "ZIL specific random write workload with settings: $(print_perf_settings)"
do_fio_run random_writes.fio true false
log_pass "Measure IO stats during ZIL specific random write workload"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/sequential_reads.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/sequential_reads.ksh
index e5cf6278391c..2bdfff736f4e 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/sequential_reads.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/sequential_reads.ksh
@@ -1,116 +1,98 @@
#!/bin/ksh
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
-# Copyright (c) 2015, 2020 by Delphix. All rights reserved.
+# Copyright (c) 2015, 2021 by Delphix. All rights reserved.
#
#
# Description:
# Trigger fio runs using the sequential_reads job file. The number of runs and
# data collected is determined by the PERF_* variables. See do_fio_run for
# details about these variables.
#
# The files to read from are created prior to the first fio run, and used
# for all fio runs. The ARC is cleared with `zinject -a` prior to each run
# so reads will go to disk.
#
# Thread/Concurrency settings:
# PERF_NTHREADS defines the number of files created in the test filesystem,
# as well as the number of threads that will simultaneously drive IO to
# those files. The settings chosen are from measurements in the
# PerfAutoESX/ZFSPerfESX Environments, selected at concurrency levels that
# are at peak throughput but lowest latency. Higher concurrency introduces
# queue time latency and would reduce the impact of code-induced performance
# regressions.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/perf/perf.shlib
function cleanup
{
# kill fio and iostat
pkill fio
pkill iostat
recreate_perf_pool
}
trap "log_fail \"Measure IO stats during random read load\"" SIGTERM
log_onexit cleanup
recreate_perf_pool
populate_perf_filesystems
# Aim to fill the pool to 50% capacity while accounting for a 3x compressratio.
export TOTAL_SIZE=$(($(get_prop avail $PERFPOOL) * 3 / 2))
-# Variables for use by fio.
-if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
- export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
- export PERF_RANDSEED=${PERF_RANDSEED:-'1234'}
- export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'}
- export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'}
- export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
- export PERF_NTHREADS=${PERF_NTHREADS:-'8 16 32 64'}
- export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
- export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
- export PERF_IOSIZES=${PERF_IOSIZES:-'8k 64k 128k'}
-elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
- export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
- export PERF_RANDSEED=${PERF_RANDSEED:-'1234'}
- export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'}
- export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'}
- export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
- export PERF_NTHREADS=${PERF_NTHREADS:-'8 16'}
- export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
- export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
- export PERF_IOSIZES=${PERF_IOSIZES:-'128k 1m'}
-fi
+# Variables specific to this test for use by fio.
+export PERF_NTHREADS=${PERF_NTHREADS:-'8 16'}
+export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
+export PERF_IOSIZES=${PERF_IOSIZES:-'128k 1m'}
# Layout the files to be used by the read tests. Create as many files as the
# largest number of threads. An fio run with fewer threads will use a subset
# of the available files.
export NUMJOBS=$(get_max $PERF_NTHREADS)
export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS))
export DIRECTORY=$(get_directory)
log_must fio $FIO_SCRIPTS/mkfiles.fio
# Set up the scripts and output files that will log performance data.
lun_list=$(pool_to_lun_list $PERFPOOL)
log_note "Collecting backend IO stats with lun list $lun_list"
if is_linux; then
typeset perf_record_cmd="perf record -F 99 -a -g -q \
-o /dev/stdout -- sleep ${PERF_RUNTIME}"
export collect_scripts=(
"zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat"
"$PERF_SCRIPTS/prefetch_io.sh $PERFPOOL 1" "prefetch"
"vmstat -t 1" "vmstat"
"mpstat -P ALL 1" "mpstat"
"iostat -tdxyz 1" "iostat"
"$perf_record_cmd" "perf"
)
else
export collect_scripts=(
"$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
"$PERF_SCRIPTS/prefetch_io.d $PERFPOOL 1" "prefetch"
"vmstat -T d 1" "vmstat"
"mpstat -T d 1" "mpstat"
"iostat -T d -xcnz 1" "iostat"
)
fi
-log_note "Sequential reads with $PERF_RUNTYPE settings"
+log_note "Sequential reads with settings: $(print_perf_settings)"
do_fio_run sequential_reads.fio false true
log_pass "Measure IO stats during sequential read load"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/sequential_reads_arc_cached.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/sequential_reads_arc_cached.ksh
index d44e37f3eaaf..8127786361ba 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/sequential_reads_arc_cached.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/sequential_reads_arc_cached.ksh
@@ -1,106 +1,88 @@
#!/bin/ksh
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
-# Copyright (c) 2015, 2020 by Delphix. All rights reserved.
+# Copyright (c) 2015, 2021 by Delphix. All rights reserved.
#
#
# Description:
# Trigger fio runs using the sequential_reads job file. The number of runs and
# data collected is determined by the PERF_* variables. See do_fio_run for
# details about these variables.
#
# The files to read from are created prior to the first fio run, and used
# for all fio runs. The ARC is not cleared to ensure that all data is cached.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/perf/perf.shlib
function cleanup
{
# kill fio and iostat
pkill fio
pkill iostat
recreate_perf_pool
}
trap "log_fail \"Measure IO stats during random read load\"" SIGTERM
log_onexit cleanup
recreate_perf_pool
populate_perf_filesystems
# Make sure the working set can be cached in the arc. Aim for 1/2 of arc.
export TOTAL_SIZE=$(($(get_max_arc_size) / 2))
-# Variables for use by fio.
-if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
- export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
- export PERF_RANDSEED=${PERF_RANDSEED:-'1234'}
- export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'}
- export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'}
- export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
- export PERF_NTHREADS=${PERF_NTHREADS:-'8 16 32 64'}
- export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
- export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
- export PERF_IOSIZES=${PERF_IOSIZES:-'8k 64k 128k'}
-elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
- export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
- export PERF_RANDSEED=${PERF_RANDSEED:-'1234'}
- export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'}
- export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'}
- export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
- export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'}
- export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
- export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
- export PERF_IOSIZES=${PERF_IOSIZES:-'128k 1m'}
-fi
+# Variables specific to this test for use by fio.
+export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'}
+export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
+export PERF_IOSIZES=${PERF_IOSIZES:-'128k 1m'}
# Layout the files to be used by the read tests. Create as many files as the
# largest number of threads. An fio run with fewer threads will use a subset
# of the available files.
export NUMJOBS=$(get_max $PERF_NTHREADS)
export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS))
export DIRECTORY=$(get_directory)
log_must fio $FIO_SCRIPTS/mkfiles.fio
# Set up the scripts and output files that will log performance data.
lun_list=$(pool_to_lun_list $PERFPOOL)
log_note "Collecting backend IO stats with lun list $lun_list"
if is_linux; then
typeset perf_record_cmd="perf record -F 99 -a -g -q \
-o /dev/stdout -- sleep ${PERF_RUNTIME}"
export collect_scripts=(
"zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat"
"$PERF_SCRIPTS/prefetch_io.sh $PERFPOOL 1" "prefetch"
"vmstat -t 1" "vmstat"
"mpstat -P ALL 1" "mpstat"
"iostat -tdxyz 1" "iostat"
"$perf_record_cmd" "perf"
)
else
export collect_scripts=(
"$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
"$PERF_SCRIPTS/prefetch_io.d $PERFPOOL 1" "prefetch"
"vmstat -T d 1" "vmstat"
"mpstat -T d 1" "mpstat"
"iostat -T d -xcnz 1" "iostat"
)
fi
-log_note "Sequential cached reads with $PERF_RUNTYPE settings"
+log_note "Sequential cached reads with settings: $(print_perf_settings)"
do_fio_run sequential_reads.fio false false
log_pass "Measure IO stats during sequential cached read load"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/sequential_reads_arc_cached_clone.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/sequential_reads_arc_cached_clone.ksh
index 1b3ee85ec55a..8ce1273c2869 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/sequential_reads_arc_cached_clone.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/sequential_reads_arc_cached_clone.ksh
@@ -1,132 +1,115 @@
#!/bin/ksh
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
-# Copyright (c) 2015, 2020 by Delphix. All rights reserved.
+# Copyright (c) 2015, 2021 by Delphix. All rights reserved.
#
#
# Description:
# Trigger fio runs using the sequential_reads job file. The number of runs and
# data collected is determined by the PERF_* variables. See do_fio_run for
# details about these variables.
#
# The files to read from are created prior to the first fio run, and used
# for all fio runs. This test will exercise cached read performance from
# a clone filesystem. The data is initially cached in the ARC and then
# a snapshot and clone are created. All the performance runs are then
# initiated against the clone filesystem to exercise the performance of
# reads when the ARC has to create another buffer from a different dataset.
# It will also exercise the need to evict the duplicate buffer once the last
# reference on that buffer is released.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/perf/perf.shlib
function cleanup
{
# kill fio and iostat
pkill fio
pkill iostat
recreate_perf_pool
}
trap "log_fail \"Measure IO stats during random read load\"" SIGTERM
log_onexit cleanup
recreate_perf_pool
populate_perf_filesystems
# Make sure the working set can be cached in the arc. Aim for 1/2 of arc.
export TOTAL_SIZE=$(($(get_max_arc_size) / 2))
-# Variables for use by fio.
-if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
- export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
- export PERF_RANDSEED=${PERF_RANDSEED:-'1234'}
- export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'}
- export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'}
- export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
- export PERF_NTHREADS=${PERF_NTHREADS:-'8 16 32 64'}
- export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
- export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
- export PERF_IOSIZES=${PERF_IOSIZES:-'8k 64k 128k'}
-elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
- export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
- export PERF_RANDSEED=${PERF_RANDSEED:-'1234'}
- export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'}
- export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'}
- export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
- export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'}
- export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
- export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
- export PERF_IOSIZES=${PERF_IOSIZES:-'128k 1m'}
-fi
+# Variables specific to this test for use by fio.
+export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'}
+export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
+export PERF_IOSIZES=${PERF_IOSIZES:-'128k 1m'}
# Layout the files to be used by the read tests. Create as many files as the
# largest number of threads. An fio run with fewer threads will use a subset
# of the available files.
export NUMJOBS=$(get_max $PERF_NTHREADS)
export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS))
export DIRECTORY=$(get_directory)
log_must fio $FIO_SCRIPTS/mkfiles.fio
#
# Only a single filesystem is used by this test. To be defensive, we
# double check that TESTFS only contains a single filesystem. We
# wouldn't want to assume this was the case, and have it actually
# contain multiple filesystem (causing cascading failures later).
#
log_must test $(get_nfilesystems) -eq 1
log_note "Creating snapshot, $TESTSNAP, of $TESTFS"
create_snapshot $TESTFS $TESTSNAP
log_note "Creating clone, $PERFPOOL/$TESTCLONE, from $TESTFS@$TESTSNAP"
create_clone $TESTFS@$TESTSNAP $PERFPOOL/$TESTCLONE
#
# We want to run FIO against the clone we created above, and not the
# clone's originating filesystem. Thus, we override the default behavior
# and explicitly set TESTFS to the clone.
#
export TESTFS=$PERFPOOL/$TESTCLONE
# Set up the scripts and output files that will log performance data.
lun_list=$(pool_to_lun_list $PERFPOOL)
log_note "Collecting backend IO stats with lun list $lun_list"
if is_linux; then
typeset perf_record_cmd="perf record -F 99 -a -g -q \
-o /dev/stdout -- sleep ${PERF_RUNTIME}"
export collect_scripts=(
"zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat"
"$PERF_SCRIPTS/prefetch_io.sh $PERFPOOL 1" "prefetch"
"vmstat -t 1" "vmstat"
"mpstat -P ALL 1" "mpstat"
"iostat -tdxyz 1" "iostat"
"$perf_record_cmd" "perf"
)
else
export collect_scripts=(
"$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
"$PERF_SCRIPTS/prefetch_io.d $PERFPOOL 1" "prefetch"
"vmstat -T d 1" "vmstat"
"mpstat -T d 1" "mpstat"
"iostat -T d -xcnz 1" "iostat"
)
fi
-log_note "Sequential cached reads from $DIRECTORY with $PERF_RUNTYPE settings"
+log_note "Sequential cached reads from $DIRECTORY with " \
+ "ettings: $(print_perf_settings)"
do_fio_run sequential_reads.fio false false
log_pass "Measure IO stats during sequential cached read load"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/sequential_reads_dbuf_cached.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/sequential_reads_dbuf_cached.ksh
index 888136fec93c..adacdc29799c 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/sequential_reads_dbuf_cached.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/sequential_reads_dbuf_cached.ksh
@@ -1,112 +1,94 @@
#!/bin/ksh
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
-# Copyright (c) 2016, 2020 by Delphix. All rights reserved.
+# Copyright (c) 2016, 2021 by Delphix. All rights reserved.
#
#
# Description:
# Trigger fio runs using the sequential_reads job file. The number of runs and
# data collected is determined by the PERF_* variables. See do_fio_run for
# details about these variables.
#
# The files to read from are created prior to the first fio run, and used
# for all fio runs. The ARC is not cleared to ensure that all data is cached.
#
# This is basically a copy of the sequential_reads_cached test case, but with
# a smaller dataset so that we can fit everything into the decompressed, linear
# space in the dbuf cache.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/perf/perf.shlib
function cleanup
{
# kill fio and iostat
pkill fio
pkill iostat
recreate_perf_pool
}
trap "log_fail \"Measure IO stats during sequential read load\"" SIGTERM
log_onexit cleanup
recreate_perf_pool
populate_perf_filesystems
# Ensure the working set can be cached in the dbuf cache.
-export TOTAL_SIZE=$(($(get_max_dbuf_cache_size) * 3 / 4))
+export TOTAL_SIZE=$(($(get_dbuf_cache_size) * 3 / 4))
-# Variables for use by fio.
-if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
- export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
- export PERF_RANDSEED=${PERF_RANDSEED:-'1234'}
- export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'}
- export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'}
- export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
- export PERF_NTHREADS=${PERF_NTHREADS:-'8 16 32 64'}
- export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
- export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
- export PERF_IOSIZES=${PERF_IOSIZES:-'8k 64k 128k'}
-elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
- export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
- export PERF_RANDSEED=${PERF_RANDSEED:-'1234'}
- export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'}
- export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'}
- export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
- export PERF_NTHREADS=${PERF_NTHREADS:-'64'}
- export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
- export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
- export PERF_IOSIZES=${PERF_IOSIZES:-'64k'}
-fi
+# Variables specific to this test for use by fio.
+export PERF_NTHREADS=${PERF_NTHREADS:-'64'}
+export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
+export PERF_IOSIZES=${PERF_IOSIZES:-'64k'}
# Layout the files to be used by the read tests. Create as many files as the
# largest number of threads. An fio run with fewer threads will use a subset
# of the available files.
export NUMJOBS=$(get_max $PERF_NTHREADS)
export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS))
export DIRECTORY=$(get_directory)
log_must fio $FIO_SCRIPTS/mkfiles.fio
# Set up the scripts and output files that will log performance data.
lun_list=$(pool_to_lun_list $PERFPOOL)
log_note "Collecting backend IO stats with lun list $lun_list"
if is_linux; then
typeset perf_record_cmd="perf record -F 99 -a -g -q \
-o /dev/stdout -- sleep ${PERF_RUNTIME}"
export collect_scripts=(
"zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat"
"$PERF_SCRIPTS/prefetch_io.sh $PERFPOOL 1" "prefetch"
"vmstat -t 1" "vmstat"
"mpstat -P ALL 1" "mpstat"
"iostat -tdxyz 1" "iostat"
"$perf_record_cmd" "perf"
)
else
export collect_scripts=(
"kstat zfs:0 1" "kstat"
"vmstat -T d 1" "vmstat"
"mpstat -T d 1" "mpstat"
"iostat -T d -xcnz 1" "iostat"
"dtrace -Cs $PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
"dtrace -Cs $PERF_SCRIPTS/prefetch_io.d $PERFPOOL 1" "prefetch"
"dtrace -s $PERF_SCRIPTS/profile.d" "profile"
)
fi
-log_note "Sequential cached reads with $PERF_RUNTYPE settings"
+log_note "Sequential cached reads with settings: $(print_perf_settings)"
do_fio_run sequential_reads.fio false false
log_pass "Measure IO stats during sequential cached read load"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/sequential_writes.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/sequential_writes.ksh
index b4f466c4f65c..d32690a0542e 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/sequential_writes.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/sequential_writes.ksh
@@ -1,105 +1,87 @@
#!/bin/ksh
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
-# Copyright (c) 2015, 2020 by Delphix. All rights reserved.
+# Copyright (c) 2015, 2021 by Delphix. All rights reserved.
#
#
# Description:
# Trigger fio runs using the sequential_writes job file. The number of runs and
# data collected is determined by the PERF_* variables. See do_fio_run for
# details about these variables.
#
# Prior to each fio run the dataset is recreated, and fio writes new files
# into an otherwise empty pool.
#
# Thread/Concurrency settings:
# PERF_NTHREADS defines the number of files created in the test filesystem,
# as well as the number of threads that will simultaneously drive IO to
# those files. The settings chosen are from measurements in the
# PerfAutoESX/ZFSPerfESX Environments, selected at concurrency levels that
# are at peak throughput but lowest latency. Higher concurrency introduces
# queue time latency and would reduce the impact of code-induced performance
# regressions.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/perf/perf.shlib
function cleanup
{
# kill fio and iostat
pkill fio
pkill iostat
recreate_perf_pool
}
trap "log_fail \"Measure IO stats during random read load\"" SIGTERM
log_onexit cleanup
recreate_perf_pool
populate_perf_filesystems
# Aim to fill the pool to 50% capacity while accounting for a 3x compressratio.
export TOTAL_SIZE=$(($(get_prop avail $PERFPOOL) * 3 / 2))
-# Variables for use by fio.
-if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
- export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
- export PERF_RANDSEED=${PERF_RANDSEED:-'1234'}
- export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'}
- export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'}
- export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
- export PERF_NTHREADS=${PERF_NTHREADS:-'1 4 8 16 32 64 128'}
- export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
- export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'0 1'}
- export PERF_IOSIZES=${PERF_IOSIZES:-'8k 64k 256k'}
-elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
- export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
- export PERF_RANDSEED=${PERF_RANDSEED:-'1234'}
- export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'}
- export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'}
- export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
- export PERF_NTHREADS=${PERF_NTHREADS:-'16 32'}
- export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
- export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
- export PERF_IOSIZES=${PERF_IOSIZES:-'8k 128k 1m'}
-fi
+# Variables specific to this test for use by fio.
+export PERF_NTHREADS=${PERF_NTHREADS:-'16 32'}
+export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
+export PERF_IOSIZES=${PERF_IOSIZES:-'8k 128k 1m'}
# Set up the scripts and output files that will log performance data.
lun_list=$(pool_to_lun_list $PERFPOOL)
log_note "Collecting backend IO stats with lun list $lun_list"
if is_linux; then
typeset perf_record_cmd="perf record -F 99 -a -g -q \
-o /dev/stdout -- sleep ${PERF_RUNTIME}"
export collect_scripts=(
"zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat"
"vmstat -t 1" "vmstat"
"mpstat -P ALL 1" "mpstat"
"iostat -tdxyz 1" "iostat"
"$perf_record_cmd" "perf"
)
else
export collect_scripts=(
"$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
"vmstat -T d 1" "vmstat"
"mpstat -T d 1" "mpstat"
"iostat -T d -xcnz 1" "iostat"
)
fi
-log_note "Sequential writes with $PERF_RUNTYPE settings"
+log_note "Sequential writes with settings: $(print_perf_settings)"
do_fio_run sequential_writes.fio true false
log_pass "Measure IO stats during sequential write load"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/setup.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/setup.ksh
index 1544f637d8d9..68be00d4a63c 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/setup.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/perf/regression/setup.ksh
@@ -1,23 +1,22 @@
#!/bin/ksh
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
-# Copyright (c) 2015 by Delphix. All rights reserved.
+# Copyright (c) 2015, 2021 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
verify_runnable "global"
-verify_disk_count "$DISKS" 3
log_pass
diff --git a/sys/modules/zfs/zfs_config.h b/sys/modules/zfs/zfs_config.h
index f3b53d243ae3..9542f241179f 100644
--- a/sys/modules/zfs/zfs_config.h
+++ b/sys/modules/zfs/zfs_config.h
@@ -1,843 +1,849 @@
/*
* $FreeBSD$
*/
/* zfs_config.h. Generated from zfs_config.h.in by configure. */
/* zfs_config.h.in. Generated from configure.ac by autoheader. */
/* Define to 1 if translation of program messages to the user's native
language is requested. */
/* #undef ENABLE_NLS */
/* bio_end_io_t wants 1 arg */
/* #undef HAVE_1ARG_BIO_END_IO_T */
/* lookup_bdev() wants 1 arg */
/* #undef HAVE_1ARG_LOOKUP_BDEV */
/* submit_bio() wants 1 arg */
/* #undef HAVE_1ARG_SUBMIT_BIO */
/* bdi_setup_and_register() wants 2 args */
/* #undef HAVE_2ARGS_BDI_SETUP_AND_REGISTER */
/* vfs_getattr wants 2 args */
/* #undef HAVE_2ARGS_VFS_GETATTR */
/* zlib_deflate_workspacesize() wants 2 args */
/* #undef HAVE_2ARGS_ZLIB_DEFLATE_WORKSPACESIZE */
/* bdi_setup_and_register() wants 3 args */
/* #undef HAVE_3ARGS_BDI_SETUP_AND_REGISTER */
/* vfs_getattr wants 3 args */
/* #undef HAVE_3ARGS_VFS_GETATTR */
/* vfs_getattr wants 4 args */
/* #undef HAVE_4ARGS_VFS_GETATTR */
/* kernel has access_ok with 'type' parameter */
/* #undef HAVE_ACCESS_OK_TYPE */
/* posix_acl has refcount_t */
/* #undef HAVE_ACL_REFCOUNT */
/* Define if host toolchain supports AES */
#define HAVE_AES 1
#ifdef __amd64__
#ifndef RESCUE
/* Define if host toolchain supports AVX */
#define HAVE_AVX 1
#endif
/* Define if host toolchain supports AVX2 */
#define HAVE_AVX2 1
/* Define if host toolchain supports AVX512BW */
#define HAVE_AVX512BW 1
/* Define if host toolchain supports AVX512CD */
#define HAVE_AVX512CD 1
/* Define if host toolchain supports AVX512DQ */
#define HAVE_AVX512DQ 1
/* Define if host toolchain supports AVX512ER */
#define HAVE_AVX512ER 1
/* Define if host toolchain supports AVX512F */
#define HAVE_AVX512F 1
/* Define if host toolchain supports AVX512IFMA */
#define HAVE_AVX512IFMA 1
/* Define if host toolchain supports AVX512PF */
#define HAVE_AVX512PF 1
/* Define if host toolchain supports AVX512VBMI */
#define HAVE_AVX512VBMI 1
/* Define if host toolchain supports AVX512VL */
#define HAVE_AVX512VL 1
#endif
/* bdev_check_media_change() exists */
/* #undef HAVE_BDEV_CHECK_MEDIA_CHANGE */
/* bdev_whole() is available */
/* #undef HAVE_BDEV_WHOLE */
/* bio->bi_bdev->bd_disk exists */
/* #undef HAVE_BIO_BDEV_DISK */
/* bio->bi_opf is defined */
/* #undef HAVE_BIO_BI_OPF */
/* bio->bi_status exists */
/* #undef HAVE_BIO_BI_STATUS */
/* bio has bi_iter */
/* #undef HAVE_BIO_BVEC_ITER */
/* bio_*_io_acct() available */
/* #undef HAVE_BIO_IO_ACCT */
/* bio_max_segs() is implemented */
/* #undef HAVE_BIO_MAX_SEGS */
/* bio_set_dev() is available */
/* #undef HAVE_BIO_SET_DEV */
/* bio_set_dev() GPL-only */
/* #undef HAVE_BIO_SET_DEV_GPL_ONLY */
/* bio_set_op_attrs is available */
/* #undef HAVE_BIO_SET_OP_ATTRS */
/* blkdev_reread_part() exists */
/* #undef HAVE_BLKDEV_REREAD_PART */
/* blkg_tryget() is available */
/* #undef HAVE_BLKG_TRYGET */
/* blkg_tryget() GPL-only */
/* #undef HAVE_BLKG_TRYGET_GPL_ONLY */
+/* blk_alloc_disk() exists */
+/* #undef HAVE_BLK_ALLOC_DISK */
+
/* blk_alloc_queue() expects request function */
/* #undef HAVE_BLK_ALLOC_QUEUE_REQUEST_FN */
/* blk_alloc_queue_rh() expects request function */
/* #undef HAVE_BLK_ALLOC_QUEUE_REQUEST_FN_RH */
/* blk queue backing_dev_info is dynamic */
/* #undef HAVE_BLK_QUEUE_BDI_DYNAMIC */
/* blk_queue_flag_clear() exists */
/* #undef HAVE_BLK_QUEUE_FLAG_CLEAR */
/* blk_queue_flag_set() exists */
/* #undef HAVE_BLK_QUEUE_FLAG_SET */
/* blk_queue_flush() is available */
/* #undef HAVE_BLK_QUEUE_FLUSH */
/* blk_queue_flush() is GPL-only */
/* #undef HAVE_BLK_QUEUE_FLUSH_GPL_ONLY */
/* blk_queue_secdiscard() is available */
/* #undef HAVE_BLK_QUEUE_SECDISCARD */
/* blk_queue_secure_erase() is available */
/* #undef HAVE_BLK_QUEUE_SECURE_ERASE */
/* blk_queue_write_cache() exists */
/* #undef HAVE_BLK_QUEUE_WRITE_CACHE */
/* blk_queue_write_cache() is GPL-only */
/* #undef HAVE_BLK_QUEUE_WRITE_CACHE_GPL_ONLY */
/* Define if revalidate_disk() in block_device_operations */
/* #undef HAVE_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK */
/* Define to 1 if you have the Mac OS X function CFLocaleCopyCurrent in the
CoreFoundation framework. */
/* #undef HAVE_CFLOCALECOPYCURRENT */
/* Define to 1 if you have the Mac OS X function
CFLocaleCopyPreferredLanguages in the CoreFoundation framework. */
/* #undef HAVE_CFLOCALECOPYPREFERREDLANGUAGES */
/* Define to 1 if you have the Mac OS X function CFPreferencesCopyAppValue in
the CoreFoundation framework. */
/* #undef HAVE_CFPREFERENCESCOPYAPPVALUE */
/* check_disk_change() exists */
/* #undef HAVE_CHECK_DISK_CHANGE */
/* clear_inode() is available */
/* #undef HAVE_CLEAR_INODE */
/* dentry uses const struct dentry_operations */
/* #undef HAVE_CONST_DENTRY_OPERATIONS */
/* copy_from_iter() is available */
/* #undef HAVE_COPY_FROM_ITER */
/* copy_to_iter() is available */
/* #undef HAVE_COPY_TO_ITER */
/* yes */
/* #undef HAVE_CPU_HOTPLUG */
/* current_time() exists */
/* #undef HAVE_CURRENT_TIME */
/* Define if the GNU dcgettext() function is already present or preinstalled.
*/
/* #undef HAVE_DCGETTEXT */
/* DECLARE_EVENT_CLASS() is available */
/* #undef HAVE_DECLARE_EVENT_CLASS */
/* lookup_bdev() wants dev_t arg */
/* #undef HAVE_DEVT_LOOKUP_BDEV */
/* sops->dirty_inode() wants flags */
/* #undef HAVE_DIRTY_INODE_WITH_FLAGS */
/* disk_*_io_acct() available */
/* #undef HAVE_DISK_IO_ACCT */
/* Define to 1 if you have the <dlfcn.h> header file. */
#define HAVE_DLFCN_H 1
/* d_make_root() is available */
/* #undef HAVE_D_MAKE_ROOT */
/* d_prune_aliases() is available */
/* #undef HAVE_D_PRUNE_ALIASES */
/* dops->d_revalidate() operation takes nameidata */
/* #undef HAVE_D_REVALIDATE_NAMEIDATA */
/* eops->encode_fh() wants child and parent inodes */
/* #undef HAVE_ENCODE_FH_WITH_INODE */
/* sops->evict_inode() exists */
/* #undef HAVE_EVICT_INODE */
/* fops->aio_fsync() exists */
/* #undef HAVE_FILE_AIO_FSYNC */
/* file_dentry() is available */
/* #undef HAVE_FILE_DENTRY */
/* file_inode() is available */
/* #undef HAVE_FILE_INODE */
/* iops->follow_link() cookie */
/* #undef HAVE_FOLLOW_LINK_COOKIE */
/* iops->follow_link() nameidata */
/* #undef HAVE_FOLLOW_LINK_NAMEIDATA */
/* fops->fsync() with range */
/* #undef HAVE_FSYNC_RANGE */
/* fops->fsync() without dentry */
/* #undef HAVE_FSYNC_WITHOUT_DENTRY */
/* generic_fillattr requires struct user_namespace* */
/* #undef HAVE_GENERIC_FILLATTR_USERNS */
/* generic_*_io_acct() 3 arg available */
/* #undef HAVE_GENERIC_IO_ACCT_3ARG */
/* generic_*_io_acct() 4 arg available */
/* #undef HAVE_GENERIC_IO_ACCT_4ARG */
/* generic_readlink is global */
/* #undef HAVE_GENERIC_READLINK */
/* generic_setxattr() exists */
/* #undef HAVE_GENERIC_SETXATTR */
/* generic_write_checks() takes kiocb */
/* #undef HAVE_GENERIC_WRITE_CHECKS_KIOCB */
/* Define if the GNU gettext() function is already present or preinstalled. */
/* #undef HAVE_GETTEXT */
/* iops->get_link() cookie */
/* #undef HAVE_GET_LINK_COOKIE */
/* iops->get_link() delayed */
/* #undef HAVE_GET_LINK_DELAYED */
/* group_info->gid exists */
/* #undef HAVE_GROUP_INFO_GID */
/* has_capability() is available */
/* #undef HAVE_HAS_CAPABILITY */
/* Define if you have the iconv() function and it works. */
#define HAVE_ICONV 1
/* yes */
/* #undef HAVE_INODE_LOCK_SHARED */
/* inode_owner_or_capable() exists */
/* #undef HAVE_INODE_OWNER_OR_CAPABLE */
/* inode_owner_or_capable() takes user_ns */
/* #undef HAVE_INODE_OWNER_OR_CAPABLE_IDMAPPED */
/* inode_set_flags() exists */
/* #undef HAVE_INODE_SET_FLAGS */
/* inode_set_iversion() exists */
/* #undef HAVE_INODE_SET_IVERSION */
/* inode->i_*time's are timespec64 */
/* #undef HAVE_INODE_TIMESPEC64_TIMES */
/* timestamp_truncate() exists */
/* #undef HAVE_INODE_TIMESTAMP_TRUNCATE */
/* Define to 1 if you have the <inttypes.h> header file. */
#define HAVE_INTTYPES_H 1
/* in_compat_syscall() is available */
/* #undef HAVE_IN_COMPAT_SYSCALL */
/* iops->create() takes struct user_namespace* */
/* #undef HAVE_IOPS_CREATE_USERNS */
/* iops->mkdir() takes struct user_namespace* */
/* #undef HAVE_IOPS_MKDIR_USERNS */
/* iops->mknod() takes struct user_namespace* */
/* #undef HAVE_IOPS_MKNOD_USERNS */
/* iops->rename() takes struct user_namespace* */
/* #undef HAVE_IOPS_RENAME_USERNS */
/* iops->symlink() takes struct user_namespace* */
/* #undef HAVE_IOPS_SYMLINK_USERNS */
/* iov_iter_advance() is available */
/* #undef HAVE_IOV_ITER_ADVANCE */
/* iov_iter_count() is available */
/* #undef HAVE_IOV_ITER_COUNT */
/* iov_iter_fault_in_readable() is available */
/* #undef HAVE_IOV_ITER_FAULT_IN_READABLE */
/* iov_iter_revert() is available */
/* #undef HAVE_IOV_ITER_REVERT */
/* iov_iter types are available */
/* #undef HAVE_IOV_ITER_TYPES */
/* yes */
/* #undef HAVE_IO_SCHEDULE_TIMEOUT */
/* Define to 1 if you have the `issetugid' function. */
#define HAVE_ISSETUGID 1
/* kernel has kernel_fpu_* functions */
/* #undef HAVE_KERNEL_FPU */
/* kernel has asm/fpu/api.h */
/* #undef HAVE_KERNEL_FPU_API_HEADER */
/* kernel fpu internal */
/* #undef HAVE_KERNEL_FPU_INTERNAL */
/* uncached_acl_sentinel() exists */
/* #undef HAVE_KERNEL_GET_ACL_HANDLE_CACHE */
/* kernel does stack verification */
/* #undef HAVE_KERNEL_OBJTOOL */
/* kernel has linux/objtool.h */
/* #undef HAVE_KERNEL_OBJTOOL_HEADER */
/* kernel_read() take loff_t pointer */
/* #undef HAVE_KERNEL_READ_PPOS */
/* timer_list.function gets a timer_list */
/* #undef HAVE_KERNEL_TIMER_FUNCTION_TIMER_LIST */
/* struct timer_list has a flags member */
/* #undef HAVE_KERNEL_TIMER_LIST_FLAGS */
/* timer_setup() is available */
/* #undef HAVE_KERNEL_TIMER_SETUP */
/* kernel_write() take loff_t pointer */
/* #undef HAVE_KERNEL_WRITE_PPOS */
/* kmem_cache_create_usercopy() exists */
/* #undef HAVE_KMEM_CACHE_CREATE_USERCOPY */
/* kstrtoul() exists */
/* #undef HAVE_KSTRTOUL */
/* ktime_get_coarse_real_ts64() exists */
/* #undef HAVE_KTIME_GET_COARSE_REAL_TS64 */
/* ktime_get_raw_ts64() exists */
/* #undef HAVE_KTIME_GET_RAW_TS64 */
/* kvmalloc exists */
/* #undef HAVE_KVMALLOC */
/* Define if you have [aio] */
/* #undef HAVE_LIBAIO */
/* Define if you have [blkid] */
/* #undef HAVE_LIBBLKID */
/* Define if you have [crypto] */
#define HAVE_LIBCRYPTO 1
/* Define if you have [tirpc] */
/* #undef HAVE_LIBTIRPC */
/* Define if you have [udev] */
/* #undef HAVE_LIBUDEV */
/* Define if you have [uuid] */
/* #undef HAVE_LIBUUID */
/* lseek_execute() is available */
/* #undef HAVE_LSEEK_EXECUTE */
/* makedev() is declared in sys/mkdev.h */
/* #undef HAVE_MAKEDEV_IN_MKDEV */
/* makedev() is declared in sys/sysmacros.h */
/* #undef HAVE_MAKEDEV_IN_SYSMACROS */
/* Noting that make_request_fn() returns blk_qc_t */
/* #undef HAVE_MAKE_REQUEST_FN_RET_QC */
/* Noting that make_request_fn() returns void */
/* #undef HAVE_MAKE_REQUEST_FN_RET_VOID */
/* Define to 1 if you have the <memory.h> header file. */
#define HAVE_MEMORY_H 1
/* iops->mkdir() takes umode_t */
/* #undef HAVE_MKDIR_UMODE_T */
/* Define to 1 if you have the `mlockall' function. */
#define HAVE_MLOCKALL 1
/* lookup_bdev() wants mode arg */
/* #undef HAVE_MODE_LOOKUP_BDEV */
/* Define if host toolchain supports MOVBE */
#define HAVE_MOVBE 1
/* new_sync_read()/new_sync_write() are available */
/* #undef HAVE_NEW_SYNC_READ */
/* iops->getattr() takes a path */
/* #undef HAVE_PATH_IOPS_GETATTR */
/* Define if host toolchain supports PCLMULQDQ */
#define HAVE_PCLMULQDQ 1
/* percpu_counter_add_batch() is defined */
/* #undef HAVE_PERCPU_COUNTER_ADD_BATCH */
/* percpu_counter_init() wants gfp_t */
/* #undef HAVE_PERCPU_COUNTER_INIT_WITH_GFP */
/* posix_acl_chmod() exists */
/* #undef HAVE_POSIX_ACL_CHMOD */
/* posix_acl_from_xattr() needs user_ns */
/* #undef HAVE_POSIX_ACL_FROM_XATTR_USERNS */
/* posix_acl_release() is available */
/* #undef HAVE_POSIX_ACL_RELEASE */
/* posix_acl_release() is GPL-only */
/* #undef HAVE_POSIX_ACL_RELEASE_GPL_ONLY */
/* posix_acl_valid() wants user namespace */
/* #undef HAVE_POSIX_ACL_VALID_WITH_NS */
/* proc_ops structure exists */
/* #undef HAVE_PROC_OPS_STRUCT */
/* iops->put_link() cookie */
/* #undef HAVE_PUT_LINK_COOKIE */
/* iops->put_link() delayed */
/* #undef HAVE_PUT_LINK_DELAYED */
/* iops->put_link() nameidata */
/* #undef HAVE_PUT_LINK_NAMEIDATA */
/* If available, contains the Python version number currently in use. */
#define HAVE_PYTHON "3.7"
/* qat is enabled and existed */
/* #undef HAVE_QAT */
/* iops->rename() wants flags */
/* #undef HAVE_RENAME_WANTS_FLAGS */
/* REQ_DISCARD is defined */
/* #undef HAVE_REQ_DISCARD */
/* REQ_FLUSH is defined */
/* #undef HAVE_REQ_FLUSH */
/* REQ_OP_DISCARD is defined */
/* #undef HAVE_REQ_OP_DISCARD */
/* REQ_OP_FLUSH is defined */
/* #undef HAVE_REQ_OP_FLUSH */
/* REQ_OP_SECURE_ERASE is defined */
/* #undef HAVE_REQ_OP_SECURE_ERASE */
/* REQ_PREFLUSH is defined */
/* #undef HAVE_REQ_PREFLUSH */
/* revalidate_disk() is available */
/* #undef HAVE_REVALIDATE_DISK */
/* revalidate_disk_size() is available */
/* #undef HAVE_REVALIDATE_DISK_SIZE */
/* struct rw_semaphore has member activity */
/* #undef HAVE_RWSEM_ACTIVITY */
/* struct rw_semaphore has atomic_long_t member count */
/* #undef HAVE_RWSEM_ATOMIC_LONG_COUNT */
/* linux/sched/signal.h exists */
/* #undef HAVE_SCHED_SIGNAL_HEADER */
/* Define to 1 if you have the <security/pam_modules.h> header file. */
#define HAVE_SECURITY_PAM_MODULES_H 1
/* setattr_prepare() is available, doesn't accept user_namespace */
/* #undef HAVE_SETATTR_PREPARE_NO_USERNS */
/* setattr_prepare() accepts user_namespace */
/* #undef HAVE_SETATTR_PREPARE_USERNS */
/* iops->set_acl() exists, takes 3 args */
/* #undef HAVE_SET_ACL */
/* iops->set_acl() takes 4 args */
/* #undef HAVE_SET_ACL_USERNS */
/* set_cached_acl() is usable */
/* #undef HAVE_SET_CACHED_ACL_USABLE */
/* set_special_state() exists */
/* #undef HAVE_SET_SPECIAL_STATE */
/* struct shrink_control exists */
/* #undef HAVE_SHRINK_CONTROL_STRUCT */
/* kernel_siginfo_t exists */
/* #undef HAVE_SIGINFO */
/* signal_stop() exists */
/* #undef HAVE_SIGNAL_STOP */
/* new shrinker callback wants 2 args */
/* #undef HAVE_SINGLE_SHRINKER_CALLBACK */
/* ->count_objects exists */
/* #undef HAVE_SPLIT_SHRINKER_CALLBACK */
#if defined(__amd64__) || defined(__i386__)
/* Define if host toolchain supports SSE */
#define HAVE_SSE 1
/* Define if host toolchain supports SSE2 */
#define HAVE_SSE2 1
/* Define if host toolchain supports SSE3 */
#define HAVE_SSE3 1
/* Define if host toolchain supports SSE4.1 */
#define HAVE_SSE4_1 1
/* Define if host toolchain supports SSE4.2 */
#define HAVE_SSE4_2 1
/* Define if host toolchain supports SSSE3 */
#define HAVE_SSSE3 1
#endif
/* STACK_FRAME_NON_STANDARD is defined */
/* #undef HAVE_STACK_FRAME_NON_STANDARD */
/* Define to 1 if you have the <stdint.h> header file. */
#define HAVE_STDINT_H 1
/* Define to 1 if you have the <stdlib.h> header file. */
#define HAVE_STDLIB_H 1
/* Define to 1 if you have the <strings.h> header file. */
#define HAVE_STRINGS_H 1
/* Define to 1 if you have the <string.h> header file. */
#define HAVE_STRING_H 1
/* Define to 1 if you have the `strlcat' function. */
#define HAVE_STRLCAT 1
/* Define to 1 if you have the `strlcpy' function. */
#define HAVE_STRLCPY 1
/* submit_bio is member of struct block_device_operations */
/* #undef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS */
/* super_setup_bdi_name() exits */
/* #undef HAVE_SUPER_SETUP_BDI_NAME */
/* super_block->s_user_ns exists */
/* #undef HAVE_SUPER_USER_NS */
/* Define to 1 if you have the <sys/stat.h> header file. */
#define HAVE_SYS_STAT_H 1
/* Define to 1 if you have the <sys/types.h> header file. */
#define HAVE_SYS_TYPES_H 1
/* i_op->tmpfile() exists */
/* #undef HAVE_TMPFILE */
/* i_op->tmpfile() has userns */
/* #undef HAVE_TMPFILE_USERNS */
/* totalhigh_pages() exists */
/* #undef HAVE_TOTALHIGH_PAGES */
/* kernel has totalram_pages() */
/* #undef HAVE_TOTALRAM_PAGES_FUNC */
/* Define to 1 if you have the `udev_device_get_is_initialized' function. */
/* #undef HAVE_UDEV_DEVICE_GET_IS_INITIALIZED */
/* kernel has __kernel_fpu_* functions */
/* #undef HAVE_UNDERSCORE_KERNEL_FPU */
/* Define to 1 if you have the <unistd.h> header file. */
#define HAVE_UNISTD_H 1
/* iops->getattr() takes struct user_namespace* */
/* #undef HAVE_USERNS_IOPS_GETATTR */
/* iops->getattr() takes a vfsmount */
/* #undef HAVE_VFSMOUNT_IOPS_GETATTR */
/* aops->direct_IO() uses iovec */
/* #undef HAVE_VFS_DIRECT_IO_IOVEC */
/* aops->direct_IO() uses iov_iter without rw */
/* #undef HAVE_VFS_DIRECT_IO_ITER */
/* aops->direct_IO() uses iov_iter with offset */
/* #undef HAVE_VFS_DIRECT_IO_ITER_OFFSET */
/* aops->direct_IO() uses iov_iter with rw and offset */
/* #undef HAVE_VFS_DIRECT_IO_ITER_RW_OFFSET */
/* All required iov_iter interfaces are available */
/* #undef HAVE_VFS_IOV_ITER */
/* fops->iterate() is available */
/* #undef HAVE_VFS_ITERATE */
/* fops->iterate_shared() is available */
/* #undef HAVE_VFS_ITERATE_SHARED */
/* fops->readdir() is available */
/* #undef HAVE_VFS_READDIR */
/* fops->read/write_iter() are available */
/* #undef HAVE_VFS_RW_ITERATE */
+/* __set_page_dirty_nobuffers exists */
+/* #undef HAVE_VFS_SET_PAGE_DIRTY_NOBUFFERS */
+
/* __vmalloc page flags exists */
/* #undef HAVE_VMALLOC_PAGE_KERNEL */
/* yes */
/* #undef HAVE_WAIT_ON_BIT_ACTION */
/* wait_queue_entry_t exists */
/* #undef HAVE_WAIT_QUEUE_ENTRY_T */
/* wq_head->head and wq_entry->entry exist */
/* #undef HAVE_WAIT_QUEUE_HEAD_ENTRY */
/* xattr_handler->get() wants dentry */
/* #undef HAVE_XATTR_GET_DENTRY */
/* xattr_handler->get() wants both dentry and inode */
/* #undef HAVE_XATTR_GET_DENTRY_INODE */
/* xattr_handler->get() wants xattr_handler */
/* #undef HAVE_XATTR_GET_HANDLER */
/* xattr_handler has name */
/* #undef HAVE_XATTR_HANDLER_NAME */
/* xattr_handler->list() wants dentry */
/* #undef HAVE_XATTR_LIST_DENTRY */
/* xattr_handler->list() wants xattr_handler */
/* #undef HAVE_XATTR_LIST_HANDLER */
/* xattr_handler->list() wants simple */
/* #undef HAVE_XATTR_LIST_SIMPLE */
/* xattr_handler->set() wants dentry */
/* #undef HAVE_XATTR_SET_DENTRY */
/* xattr_handler->set() wants both dentry and inode */
/* #undef HAVE_XATTR_SET_DENTRY_INODE */
/* xattr_handler->set() wants xattr_handler */
/* #undef HAVE_XATTR_SET_HANDLER */
/* xattr_handler->set() takes user_namespace */
/* #undef HAVE_XATTR_SET_USERNS */
/* Define if you have [z] */
#define HAVE_ZLIB 1
/* __posix_acl_chmod() exists */
/* #undef HAVE___POSIX_ACL_CHMOD */
/* kernel exports FPU functions */
/* #undef KERNEL_EXPORTS_X86_FPU */
/* TBD: fetch(3) support */
#if 0
/* whether the chosen libfetch is to be loaded at run-time */
#define LIBFETCH_DYNAMIC 1
/* libfetch is fetch(3) */
#define LIBFETCH_IS_FETCH 1
/* libfetch is libcurl */
#define LIBFETCH_IS_LIBCURL 0
/* soname of chosen libfetch */
#define LIBFETCH_SONAME "libfetch.so.6"
#endif
/* Define to the sub-directory where libtool stores uninstalled libraries. */
#define LT_OBJDIR ".libs/"
/* make_request_fn() return type */
/* #undef MAKE_REQUEST_FN_RET */
/* hardened module_param_call */
/* #undef MODULE_PARAM_CALL_CONST */
/* struct shrink_control has nid */
/* #undef SHRINK_CONTROL_HAS_NID */
/* Defined for legacy compatibility. */
#define SPL_META_ALIAS ZFS_META_ALIAS
/* Defined for legacy compatibility. */
#define SPL_META_RELEASE ZFS_META_RELEASE
/* Defined for legacy compatibility. */
#define SPL_META_VERSION ZFS_META_VERSION
/* True if ZFS is to be compiled for a FreeBSD system */
#define SYSTEM_FREEBSD 1
/* True if ZFS is to be compiled for a Linux system */
/* #undef SYSTEM_LINUX */
/* zfs debugging enabled */
/* #undef ZFS_DEBUG */
/* /dev/zfs minor */
/* #undef ZFS_DEVICE_MINOR */
/* enum node_stat_item contains NR_FILE_PAGES */
/* #undef ZFS_ENUM_NODE_STAT_ITEM_NR_FILE_PAGES */
/* enum node_stat_item contains NR_INACTIVE_ANON */
/* #undef ZFS_ENUM_NODE_STAT_ITEM_NR_INACTIVE_ANON */
/* enum node_stat_item contains NR_INACTIVE_FILE */
/* #undef ZFS_ENUM_NODE_STAT_ITEM_NR_INACTIVE_FILE */
/* enum zone_stat_item contains NR_FILE_PAGES */
/* #undef ZFS_ENUM_ZONE_STAT_ITEM_NR_FILE_PAGES */
/* enum zone_stat_item contains NR_INACTIVE_ANON */
/* #undef ZFS_ENUM_ZONE_STAT_ITEM_NR_INACTIVE_ANON */
/* enum zone_stat_item contains NR_INACTIVE_FILE */
/* #undef ZFS_ENUM_ZONE_STAT_ITEM_NR_INACTIVE_FILE */
/* global_node_page_state() exists */
/* #undef ZFS_GLOBAL_NODE_PAGE_STATE */
/* global_zone_page_state() exists */
/* #undef ZFS_GLOBAL_ZONE_PAGE_STATE */
/* Define to 1 if GPL-only symbols can be used */
/* #undef ZFS_IS_GPL_COMPATIBLE */
/* Define the project alias string. */
-#define ZFS_META_ALIAS "zfs-2.1.99-FreeBSD_g14b43fbd9"
+#define ZFS_META_ALIAS "zfs-2.1.99-FreeBSD_gf3678d70f"
/* Define the project author. */
#define ZFS_META_AUTHOR "OpenZFS"
/* Define the project release date. */
/* #undef ZFS_META_DATA */
/* Define the maximum compatible kernel version. */
#define ZFS_META_KVER_MAX "5.13"
/* Define the minimum compatible kernel version. */
#define ZFS_META_KVER_MIN "3.10"
/* Define the project license. */
#define ZFS_META_LICENSE "CDDL"
/* Define the libtool library 'age' version information. */
/* #undef ZFS_META_LT_AGE */
/* Define the libtool library 'current' version information. */
/* #undef ZFS_META_LT_CURRENT */
/* Define the libtool library 'revision' version information. */
/* #undef ZFS_META_LT_REVISION */
/* Define the project name. */
#define ZFS_META_NAME "zfs"
/* Define the project release. */
-#define ZFS_META_RELEASE "FreeBSD_g14b43fbd9"
+#define ZFS_META_RELEASE "FreeBSD_gf3678d70f"
/* Define the project version. */
#define ZFS_META_VERSION "2.1.99"
/* count is located in percpu_ref.data */
/* #undef ZFS_PERCPU_REF_COUNT_IN_DATA */
diff --git a/sys/modules/zfs/zfs_gitrev.h b/sys/modules/zfs/zfs_gitrev.h
index 52424eeffc9b..fc9656910a1f 100644
--- a/sys/modules/zfs/zfs_gitrev.h
+++ b/sys/modules/zfs/zfs_gitrev.h
@@ -1,5 +1,5 @@
/*
* $FreeBSD$
*/
-#define ZFS_META_GITREV "zfs-2.1.99-375-g14b43fbd9"
+#define ZFS_META_GITREV "zfs-2.1.99-404-gf3678d70f"

File Metadata

Mime Type
application/octet-stream
Expires
Sat, May 4, 3:43 AM (1 d, 23 h)
Storage Engine
chunks
Storage Format
Chunks
Storage Handle
V1K09l.qH_tq
Default Alt Text
(5 MB)

Event Timeline